Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
873 #include <vulkan/vulkan.h>
874 
875 VK_DEFINE_HANDLE(VmaAllocator)
876 
877 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
879  VmaAllocator allocator,
880  uint32_t memoryType,
881  VkDeviceMemory memory,
882  VkDeviceSize size);
884 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
885  VmaAllocator allocator,
886  uint32_t memoryType,
887  VkDeviceMemory memory,
888  VkDeviceSize size);
889 
897 typedef struct VmaDeviceMemoryCallbacks {
903 
933 
936 typedef VkFlags VmaAllocatorCreateFlags;
937 
942 typedef struct VmaVulkanFunctions {
943  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
944  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
945  PFN_vkAllocateMemory vkAllocateMemory;
946  PFN_vkFreeMemory vkFreeMemory;
947  PFN_vkMapMemory vkMapMemory;
948  PFN_vkUnmapMemory vkUnmapMemory;
949  PFN_vkBindBufferMemory vkBindBufferMemory;
950  PFN_vkBindImageMemory vkBindImageMemory;
951  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
952  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
953  PFN_vkCreateBuffer vkCreateBuffer;
954  PFN_vkDestroyBuffer vkDestroyBuffer;
955  PFN_vkCreateImage vkCreateImage;
956  PFN_vkDestroyImage vkDestroyImage;
957  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
958  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
960 
963 {
965  VmaAllocatorCreateFlags flags;
967 
968  VkPhysicalDevice physicalDevice;
970 
971  VkDevice device;
973 
976 
977  const VkAllocationCallbacks* pAllocationCallbacks;
979 
994  uint32_t frameInUseCount;
1018  const VkDeviceSize* pHeapSizeLimit;
1032 
1034 VkResult vmaCreateAllocator(
1035  const VmaAllocatorCreateInfo* pCreateInfo,
1036  VmaAllocator* pAllocator);
1037 
1039 void vmaDestroyAllocator(
1040  VmaAllocator allocator);
1041 
1047  VmaAllocator allocator,
1048  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1049 
1055  VmaAllocator allocator,
1056  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1057 
1065  VmaAllocator allocator,
1066  uint32_t memoryTypeIndex,
1067  VkMemoryPropertyFlags* pFlags);
1068 
1078  VmaAllocator allocator,
1079  uint32_t frameIndex);
1080 
1083 typedef struct VmaStatInfo
1084 {
1086  uint32_t blockCount;
1092  VkDeviceSize usedBytes;
1094  VkDeviceSize unusedBytes;
1095  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1096  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1097 } VmaStatInfo;
1098 
1100 typedef struct VmaStats
1101 {
1102  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1103  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1105 } VmaStats;
1106 
1108 void vmaCalculateStats(
1109  VmaAllocator allocator,
1110  VmaStats* pStats);
1111 
1112 #define VMA_STATS_STRING_ENABLED 1
1113 
1114 #if VMA_STATS_STRING_ENABLED
1115 
1117 
1119 void vmaBuildStatsString(
1120  VmaAllocator allocator,
1121  char** ppStatsString,
1122  VkBool32 detailedMap);
1123 
1124 void vmaFreeStatsString(
1125  VmaAllocator allocator,
1126  char* pStatsString);
1127 
1128 #endif // #if VMA_STATS_STRING_ENABLED
1129 
1130 VK_DEFINE_HANDLE(VmaPool)
1131 
1132 typedef enum VmaMemoryUsage
1133 {
1182 } VmaMemoryUsage;
1183 
1198 
1248 
1252 
1254 {
1256  VmaAllocationCreateFlags flags;
1267  VkMemoryPropertyFlags requiredFlags;
1272  VkMemoryPropertyFlags preferredFlags;
1280  uint32_t memoryTypeBits;
1286  VmaPool pool;
1293  void* pUserData;
1295 
1312 VkResult vmaFindMemoryTypeIndex(
1313  VmaAllocator allocator,
1314  uint32_t memoryTypeBits,
1315  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1316  uint32_t* pMemoryTypeIndex);
1317 
1331  VmaAllocator allocator,
1332  const VkBufferCreateInfo* pBufferCreateInfo,
1333  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1334  uint32_t* pMemoryTypeIndex);
1335 
1349  VmaAllocator allocator,
1350  const VkImageCreateInfo* pImageCreateInfo,
1351  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1352  uint32_t* pMemoryTypeIndex);
1353 
1374 
1377 typedef VkFlags VmaPoolCreateFlags;
1378 
1381 typedef struct VmaPoolCreateInfo {
1387  VmaPoolCreateFlags flags;
1392  VkDeviceSize blockSize;
1421 
1424 typedef struct VmaPoolStats {
1427  VkDeviceSize size;
1430  VkDeviceSize unusedSize;
1443  VkDeviceSize unusedRangeSizeMax;
1444 } VmaPoolStats;
1445 
1452 VkResult vmaCreatePool(
1453  VmaAllocator allocator,
1454  const VmaPoolCreateInfo* pCreateInfo,
1455  VmaPool* pPool);
1456 
1459 void vmaDestroyPool(
1460  VmaAllocator allocator,
1461  VmaPool pool);
1462 
1469 void vmaGetPoolStats(
1470  VmaAllocator allocator,
1471  VmaPool pool,
1472  VmaPoolStats* pPoolStats);
1473 
1481  VmaAllocator allocator,
1482  VmaPool pool,
1483  size_t* pLostAllocationCount);
1484 
1485 VK_DEFINE_HANDLE(VmaAllocation)
1486 
1487 
1489 typedef struct VmaAllocationInfo {
1494  uint32_t memoryType;
1503  VkDeviceMemory deviceMemory;
1508  VkDeviceSize offset;
1513  VkDeviceSize size;
1527  void* pUserData;
1529 
1540 VkResult vmaAllocateMemory(
1541  VmaAllocator allocator,
1542  const VkMemoryRequirements* pVkMemoryRequirements,
1543  const VmaAllocationCreateInfo* pCreateInfo,
1544  VmaAllocation* pAllocation,
1545  VmaAllocationInfo* pAllocationInfo);
1546 
1554  VmaAllocator allocator,
1555  VkBuffer buffer,
1556  const VmaAllocationCreateInfo* pCreateInfo,
1557  VmaAllocation* pAllocation,
1558  VmaAllocationInfo* pAllocationInfo);
1559 
1561 VkResult vmaAllocateMemoryForImage(
1562  VmaAllocator allocator,
1563  VkImage image,
1564  const VmaAllocationCreateInfo* pCreateInfo,
1565  VmaAllocation* pAllocation,
1566  VmaAllocationInfo* pAllocationInfo);
1567 
1569 void vmaFreeMemory(
1570  VmaAllocator allocator,
1571  VmaAllocation allocation);
1572 
1578  VmaAllocator allocator,
1579  VmaAllocation allocation,
1580  VmaAllocationInfo* pAllocationInfo);
1581 
1584 VkBool32 vmaTouchAllocation(
1585  VmaAllocator allocator,
1586  VmaAllocation allocation);
1587 
1602  VmaAllocator allocator,
1603  VmaAllocation allocation,
1604  void* pUserData);
1605 
1617  VmaAllocator allocator,
1618  VmaAllocation* pAllocation);
1619 
1654 VkResult vmaMapMemory(
1655  VmaAllocator allocator,
1656  VmaAllocation allocation,
1657  void** ppData);
1658 
1663 void vmaUnmapMemory(
1664  VmaAllocator allocator,
1665  VmaAllocation allocation);
1666 
1668 typedef struct VmaDefragmentationInfo {
1673  VkDeviceSize maxBytesToMove;
1680 
1682 typedef struct VmaDefragmentationStats {
1684  VkDeviceSize bytesMoved;
1686  VkDeviceSize bytesFreed;
1692 
1775 VkResult vmaDefragment(
1776  VmaAllocator allocator,
1777  VmaAllocation* pAllocations,
1778  size_t allocationCount,
1779  VkBool32* pAllocationsChanged,
1780  const VmaDefragmentationInfo *pDefragmentationInfo,
1781  VmaDefragmentationStats* pDefragmentationStats);
1782 
1809 VkResult vmaCreateBuffer(
1810  VmaAllocator allocator,
1811  const VkBufferCreateInfo* pBufferCreateInfo,
1812  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1813  VkBuffer* pBuffer,
1814  VmaAllocation* pAllocation,
1815  VmaAllocationInfo* pAllocationInfo);
1816 
1828 void vmaDestroyBuffer(
1829  VmaAllocator allocator,
1830  VkBuffer buffer,
1831  VmaAllocation allocation);
1832 
1834 VkResult vmaCreateImage(
1835  VmaAllocator allocator,
1836  const VkImageCreateInfo* pImageCreateInfo,
1837  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1838  VkImage* pImage,
1839  VmaAllocation* pAllocation,
1840  VmaAllocationInfo* pAllocationInfo);
1841 
1853 void vmaDestroyImage(
1854  VmaAllocator allocator,
1855  VkImage image,
1856  VmaAllocation allocation);
1857 
1858 #ifdef __cplusplus
1859 }
1860 #endif
1861 
1862 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1863 
1864 // For Visual Studio IntelliSense.
1865 #ifdef __INTELLISENSE__
1866 #define VMA_IMPLEMENTATION
1867 #endif
1868 
1869 #ifdef VMA_IMPLEMENTATION
1870 #undef VMA_IMPLEMENTATION
1871 
1872 #include <cstdint>
1873 #include <cstdlib>
1874 #include <cstring>
1875 
1876 /*******************************************************************************
1877 CONFIGURATION SECTION
1878 
1879 Define some of these macros before each #include of this header or change them
1880 here if you need other then default behavior depending on your environment.
1881 */
1882 
1883 /*
1884 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1885 internally, like:
1886 
1887  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1888 
1889 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1890 VmaAllocatorCreateInfo::pVulkanFunctions.
1891 */
1892 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
1893 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1894 #endif
1895 
1896 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1897 //#define VMA_USE_STL_CONTAINERS 1
1898 
1899 /* Set this macro to 1 to make the library including and using STL containers:
1900 std::pair, std::vector, std::list, std::unordered_map.
1901 
1902 Set it to 0 or undefined to make the library using its own implementation of
1903 the containers.
1904 */
1905 #if VMA_USE_STL_CONTAINERS
1906  #define VMA_USE_STL_VECTOR 1
1907  #define VMA_USE_STL_UNORDERED_MAP 1
1908  #define VMA_USE_STL_LIST 1
1909 #endif
1910 
1911 #if VMA_USE_STL_VECTOR
1912  #include <vector>
1913 #endif
1914 
1915 #if VMA_USE_STL_UNORDERED_MAP
1916  #include <unordered_map>
1917 #endif
1918 
1919 #if VMA_USE_STL_LIST
1920  #include <list>
1921 #endif
1922 
1923 /*
1924 Following headers are used in this CONFIGURATION section only, so feel free to
1925 remove them if not needed.
1926 */
1927 #include <cassert> // for assert
1928 #include <algorithm> // for min, max
1929 #include <mutex> // for std::mutex
1930 #include <atomic> // for std::atomic
1931 
1932 #if !defined(_WIN32) && !defined(__APPLE__)
1933  #include <malloc.h> // for aligned_alloc()
1934 #endif
1935 
1936 #ifndef VMA_NULL
1937  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1938  #define VMA_NULL nullptr
1939 #endif
1940 
1941 #if defined(__APPLE__) || defined(__ANDROID__)
1942 #include <cstdlib>
1943 void *aligned_alloc(size_t alignment, size_t size)
1944 {
1945  // alignment must be >= sizeof(void*)
1946  if(alignment < sizeof(void*))
1947  {
1948  alignment = sizeof(void*);
1949  }
1950 
1951  void *pointer;
1952  if(posix_memalign(&pointer, alignment, size) == 0)
1953  return pointer;
1954  return VMA_NULL;
1955 }
1956 #endif
1957 
1958 // Normal assert to check for programmer's errors, especially in Debug configuration.
1959 #ifndef VMA_ASSERT
1960  #ifdef _DEBUG
1961  #define VMA_ASSERT(expr) assert(expr)
1962  #else
1963  #define VMA_ASSERT(expr)
1964  #endif
1965 #endif
1966 
1967 // Assert that will be called very often, like inside data structures e.g. operator[].
1968 // Making it non-empty can make program slow.
1969 #ifndef VMA_HEAVY_ASSERT
1970  #ifdef _DEBUG
1971  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
1972  #else
1973  #define VMA_HEAVY_ASSERT(expr)
1974  #endif
1975 #endif
1976 
1977 #ifndef VMA_ALIGN_OF
1978  #define VMA_ALIGN_OF(type) (__alignof(type))
1979 #endif
1980 
1981 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
1982  #if defined(_WIN32)
1983  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
1984  #else
1985  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
1986  #endif
1987 #endif
1988 
1989 #ifndef VMA_SYSTEM_FREE
1990  #if defined(_WIN32)
1991  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
1992  #else
1993  #define VMA_SYSTEM_FREE(ptr) free(ptr)
1994  #endif
1995 #endif
1996 
1997 #ifndef VMA_MIN
1998  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
1999 #endif
2000 
2001 #ifndef VMA_MAX
2002  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2003 #endif
2004 
2005 #ifndef VMA_SWAP
2006  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2007 #endif
2008 
2009 #ifndef VMA_SORT
2010  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2011 #endif
2012 
2013 #ifndef VMA_DEBUG_LOG
2014  #define VMA_DEBUG_LOG(format, ...)
2015  /*
2016  #define VMA_DEBUG_LOG(format, ...) do { \
2017  printf(format, __VA_ARGS__); \
2018  printf("\n"); \
2019  } while(false)
2020  */
2021 #endif
2022 
2023 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2024 #if VMA_STATS_STRING_ENABLED
2025  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2026  {
2027  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2028  }
2029  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2030  {
2031  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2032  }
2033  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2034  {
2035  snprintf(outStr, strLen, "%p", ptr);
2036  }
2037 #endif
2038 
2039 #ifndef VMA_MUTEX
2040  class VmaMutex
2041  {
2042  public:
2043  VmaMutex() { }
2044  ~VmaMutex() { }
2045  void Lock() { m_Mutex.lock(); }
2046  void Unlock() { m_Mutex.unlock(); }
2047  private:
2048  std::mutex m_Mutex;
2049  };
2050  #define VMA_MUTEX VmaMutex
2051 #endif
2052 
2053 /*
2054 If providing your own implementation, you need to implement a subset of std::atomic:
2055 
2056 - Constructor(uint32_t desired)
2057 - uint32_t load() const
2058 - void store(uint32_t desired)
2059 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2060 */
2061 #ifndef VMA_ATOMIC_UINT32
2062  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2063 #endif
2064 
2065 #ifndef VMA_BEST_FIT
2066 
2078  #define VMA_BEST_FIT (1)
2079 #endif
2080 
2081 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2082 
2086  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2087 #endif
2088 
2089 #ifndef VMA_DEBUG_ALIGNMENT
2090 
2094  #define VMA_DEBUG_ALIGNMENT (1)
2095 #endif
2096 
2097 #ifndef VMA_DEBUG_MARGIN
2098 
2102  #define VMA_DEBUG_MARGIN (0)
2103 #endif
2104 
2105 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2106 
2110  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2111 #endif
2112 
2113 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2114 
2118  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2119 #endif
2120 
2121 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2122  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2124 #endif
2125 
2126 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2127  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2129 #endif
2130 
2131 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2132 
2133 /*******************************************************************************
2134 END OF CONFIGURATION
2135 */
2136 
2137 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2138  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2139 
2140 // Returns number of bits set to 1 in (v).
2141 static inline uint32_t VmaCountBitsSet(uint32_t v)
2142 {
2143  uint32_t c = v - ((v >> 1) & 0x55555555);
2144  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2145  c = ((c >> 4) + c) & 0x0F0F0F0F;
2146  c = ((c >> 8) + c) & 0x00FF00FF;
2147  c = ((c >> 16) + c) & 0x0000FFFF;
2148  return c;
2149 }
2150 
2151 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2152 // Use types like uint32_t, uint64_t as T.
2153 template <typename T>
2154 static inline T VmaAlignUp(T val, T align)
2155 {
2156  return (val + align - 1) / align * align;
2157 }
2158 
2159 // Division with mathematical rounding to nearest number.
2160 template <typename T>
2161 inline T VmaRoundDiv(T x, T y)
2162 {
2163  return (x + (y / (T)2)) / y;
2164 }
2165 
2166 #ifndef VMA_SORT
2167 
2168 template<typename Iterator, typename Compare>
2169 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
2170 {
2171  Iterator centerValue = end; --centerValue;
2172  Iterator insertIndex = beg;
2173  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
2174  {
2175  if(cmp(*memTypeIndex, *centerValue))
2176  {
2177  if(insertIndex != memTypeIndex)
2178  {
2179  VMA_SWAP(*memTypeIndex, *insertIndex);
2180  }
2181  ++insertIndex;
2182  }
2183  }
2184  if(insertIndex != centerValue)
2185  {
2186  VMA_SWAP(*insertIndex, *centerValue);
2187  }
2188  return insertIndex;
2189 }
2190 
2191 template<typename Iterator, typename Compare>
2192 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
2193 {
2194  if(beg < end)
2195  {
2196  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
2197  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
2198  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
2199  }
2200 }
2201 
2202 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
2203 
2204 #endif // #ifndef VMA_SORT
2205 
2206 /*
2207 Returns true if two memory blocks occupy overlapping pages.
2208 ResourceA must be in less memory offset than ResourceB.
2209 
2210 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
2211 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
2212 */
2213 static inline bool VmaBlocksOnSamePage(
2214  VkDeviceSize resourceAOffset,
2215  VkDeviceSize resourceASize,
2216  VkDeviceSize resourceBOffset,
2217  VkDeviceSize pageSize)
2218 {
2219  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
2220  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
2221  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
2222  VkDeviceSize resourceBStart = resourceBOffset;
2223  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
2224  return resourceAEndPage == resourceBStartPage;
2225 }
2226 
2227 enum VmaSuballocationType
2228 {
2229  VMA_SUBALLOCATION_TYPE_FREE = 0,
2230  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
2231  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
2232  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
2233  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
2234  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
2235  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
2236 };
2237 
2238 /*
2239 Returns true if given suballocation types could conflict and must respect
2240 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
2241 or linear image and another one is optimal image. If type is unknown, behave
2242 conservatively.
2243 */
2244 static inline bool VmaIsBufferImageGranularityConflict(
2245  VmaSuballocationType suballocType1,
2246  VmaSuballocationType suballocType2)
2247 {
2248  if(suballocType1 > suballocType2)
2249  {
2250  VMA_SWAP(suballocType1, suballocType2);
2251  }
2252 
2253  switch(suballocType1)
2254  {
2255  case VMA_SUBALLOCATION_TYPE_FREE:
2256  return false;
2257  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
2258  return true;
2259  case VMA_SUBALLOCATION_TYPE_BUFFER:
2260  return
2261  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2262  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2263  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
2264  return
2265  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2266  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
2267  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2268  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
2269  return
2270  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2271  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
2272  return false;
2273  default:
2274  VMA_ASSERT(0);
2275  return true;
2276  }
2277 }
2278 
2279 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
2280 struct VmaMutexLock
2281 {
2282 public:
2283  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
2284  m_pMutex(useMutex ? &mutex : VMA_NULL)
2285  {
2286  if(m_pMutex)
2287  {
2288  m_pMutex->Lock();
2289  }
2290  }
2291 
2292  ~VmaMutexLock()
2293  {
2294  if(m_pMutex)
2295  {
2296  m_pMutex->Unlock();
2297  }
2298  }
2299 
2300 private:
2301  VMA_MUTEX* m_pMutex;
2302 };
2303 
2304 #if VMA_DEBUG_GLOBAL_MUTEX
2305  static VMA_MUTEX gDebugGlobalMutex;
2306  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
2307 #else
2308  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
2309 #endif
2310 
2311 // Minimum size of a free suballocation to register it in the free suballocation collection.
2312 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
2313 
2314 /*
2315 Performs binary search and returns iterator to first element that is greater or
2316 equal to (key), according to comparison (cmp).
2317 
2318 Cmp should return true if first argument is less than second argument.
2319 
2320 Returned value is the found element, if present in the collection or place where
2321 new element with value (key) should be inserted.
2322 */
2323 template <typename IterT, typename KeyT, typename CmpT>
2324 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
2325 {
2326  size_t down = 0, up = (end - beg);
2327  while(down < up)
2328  {
2329  const size_t mid = (down + up) / 2;
2330  if(cmp(*(beg+mid), key))
2331  {
2332  down = mid + 1;
2333  }
2334  else
2335  {
2336  up = mid;
2337  }
2338  }
2339  return beg + down;
2340 }
2341 
2343 // Memory allocation
2344 
2345 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
2346 {
2347  if((pAllocationCallbacks != VMA_NULL) &&
2348  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
2349  {
2350  return (*pAllocationCallbacks->pfnAllocation)(
2351  pAllocationCallbacks->pUserData,
2352  size,
2353  alignment,
2354  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2355  }
2356  else
2357  {
2358  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
2359  }
2360 }
2361 
2362 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
2363 {
2364  if((pAllocationCallbacks != VMA_NULL) &&
2365  (pAllocationCallbacks->pfnFree != VMA_NULL))
2366  {
2367  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
2368  }
2369  else
2370  {
2371  VMA_SYSTEM_FREE(ptr);
2372  }
2373 }
2374 
2375 template<typename T>
2376 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
2377 {
2378  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
2379 }
2380 
2381 template<typename T>
2382 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
2383 {
2384  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
2385 }
2386 
2387 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2388 
2389 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2390 
2391 template<typename T>
2392 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2393 {
2394  ptr->~T();
2395  VmaFree(pAllocationCallbacks, ptr);
2396 }
2397 
2398 template<typename T>
2399 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2400 {
2401  if(ptr != VMA_NULL)
2402  {
2403  for(size_t i = count; i--; )
2404  {
2405  ptr[i].~T();
2406  }
2407  VmaFree(pAllocationCallbacks, ptr);
2408  }
2409 }
2410 
2411 // STL-compatible allocator.
2412 template<typename T>
2413 class VmaStlAllocator
2414 {
2415 public:
2416  const VkAllocationCallbacks* const m_pCallbacks;
2417  typedef T value_type;
2418 
2419  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2420  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2421 
2422  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2423  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2424 
2425  template<typename U>
2426  bool operator==(const VmaStlAllocator<U>& rhs) const
2427  {
2428  return m_pCallbacks == rhs.m_pCallbacks;
2429  }
2430  template<typename U>
2431  bool operator!=(const VmaStlAllocator<U>& rhs) const
2432  {
2433  return m_pCallbacks != rhs.m_pCallbacks;
2434  }
2435 
2436  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2437 };
2438 
2439 #if VMA_USE_STL_VECTOR
2440 
2441 #define VmaVector std::vector
2442 
2443 template<typename T, typename allocatorT>
2444 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2445 {
2446  vec.insert(vec.begin() + index, item);
2447 }
2448 
2449 template<typename T, typename allocatorT>
2450 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2451 {
2452  vec.erase(vec.begin() + index);
2453 }
2454 
2455 #else // #if VMA_USE_STL_VECTOR
2456 
2457 /* Class with interface compatible with subset of std::vector.
2458 T must be POD because constructors and destructors are not called and memcpy is
2459 used for these objects. */
2460 template<typename T, typename AllocatorT>
2461 class VmaVector
2462 {
2463 public:
2464  typedef T value_type;
2465 
2466  VmaVector(const AllocatorT& allocator) :
2467  m_Allocator(allocator),
2468  m_pArray(VMA_NULL),
2469  m_Count(0),
2470  m_Capacity(0)
2471  {
2472  }
2473 
2474  VmaVector(size_t count, const AllocatorT& allocator) :
2475  m_Allocator(allocator),
2476  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2477  m_Count(count),
2478  m_Capacity(count)
2479  {
2480  }
2481 
2482  VmaVector(const VmaVector<T, AllocatorT>& src) :
2483  m_Allocator(src.m_Allocator),
2484  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2485  m_Count(src.m_Count),
2486  m_Capacity(src.m_Count)
2487  {
2488  if(m_Count != 0)
2489  {
2490  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2491  }
2492  }
2493 
2494  ~VmaVector()
2495  {
2496  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2497  }
2498 
2499  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2500  {
2501  if(&rhs != this)
2502  {
2503  resize(rhs.m_Count);
2504  if(m_Count != 0)
2505  {
2506  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2507  }
2508  }
2509  return *this;
2510  }
2511 
2512  bool empty() const { return m_Count == 0; }
2513  size_t size() const { return m_Count; }
2514  T* data() { return m_pArray; }
2515  const T* data() const { return m_pArray; }
2516 
2517  T& operator[](size_t index)
2518  {
2519  VMA_HEAVY_ASSERT(index < m_Count);
2520  return m_pArray[index];
2521  }
2522  const T& operator[](size_t index) const
2523  {
2524  VMA_HEAVY_ASSERT(index < m_Count);
2525  return m_pArray[index];
2526  }
2527 
2528  T& front()
2529  {
2530  VMA_HEAVY_ASSERT(m_Count > 0);
2531  return m_pArray[0];
2532  }
2533  const T& front() const
2534  {
2535  VMA_HEAVY_ASSERT(m_Count > 0);
2536  return m_pArray[0];
2537  }
2538  T& back()
2539  {
2540  VMA_HEAVY_ASSERT(m_Count > 0);
2541  return m_pArray[m_Count - 1];
2542  }
2543  const T& back() const
2544  {
2545  VMA_HEAVY_ASSERT(m_Count > 0);
2546  return m_pArray[m_Count - 1];
2547  }
2548 
2549  void reserve(size_t newCapacity, bool freeMemory = false)
2550  {
2551  newCapacity = VMA_MAX(newCapacity, m_Count);
2552 
2553  if((newCapacity < m_Capacity) && !freeMemory)
2554  {
2555  newCapacity = m_Capacity;
2556  }
2557 
2558  if(newCapacity != m_Capacity)
2559  {
2560  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2561  if(m_Count != 0)
2562  {
2563  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2564  }
2565  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2566  m_Capacity = newCapacity;
2567  m_pArray = newArray;
2568  }
2569  }
2570 
2571  void resize(size_t newCount, bool freeMemory = false)
2572  {
2573  size_t newCapacity = m_Capacity;
2574  if(newCount > m_Capacity)
2575  {
2576  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2577  }
2578  else if(freeMemory)
2579  {
2580  newCapacity = newCount;
2581  }
2582 
2583  if(newCapacity != m_Capacity)
2584  {
2585  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2586  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2587  if(elementsToCopy != 0)
2588  {
2589  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2590  }
2591  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2592  m_Capacity = newCapacity;
2593  m_pArray = newArray;
2594  }
2595 
2596  m_Count = newCount;
2597  }
2598 
2599  void clear(bool freeMemory = false)
2600  {
2601  resize(0, freeMemory);
2602  }
2603 
2604  void insert(size_t index, const T& src)
2605  {
2606  VMA_HEAVY_ASSERT(index <= m_Count);
2607  const size_t oldCount = size();
2608  resize(oldCount + 1);
2609  if(index < oldCount)
2610  {
2611  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2612  }
2613  m_pArray[index] = src;
2614  }
2615 
2616  void remove(size_t index)
2617  {
2618  VMA_HEAVY_ASSERT(index < m_Count);
2619  const size_t oldCount = size();
2620  if(index < oldCount - 1)
2621  {
2622  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2623  }
2624  resize(oldCount - 1);
2625  }
2626 
2627  void push_back(const T& src)
2628  {
2629  const size_t newIndex = size();
2630  resize(newIndex + 1);
2631  m_pArray[newIndex] = src;
2632  }
2633 
2634  void pop_back()
2635  {
2636  VMA_HEAVY_ASSERT(m_Count > 0);
2637  resize(size() - 1);
2638  }
2639 
2640  void push_front(const T& src)
2641  {
2642  insert(0, src);
2643  }
2644 
2645  void pop_front()
2646  {
2647  VMA_HEAVY_ASSERT(m_Count > 0);
2648  remove(0);
2649  }
2650 
2651  typedef T* iterator;
2652 
2653  iterator begin() { return m_pArray; }
2654  iterator end() { return m_pArray + m_Count; }
2655 
2656 private:
2657  AllocatorT m_Allocator;
2658  T* m_pArray;
2659  size_t m_Count;
2660  size_t m_Capacity;
2661 };
2662 
2663 template<typename T, typename allocatorT>
2664 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2665 {
2666  vec.insert(index, item);
2667 }
2668 
2669 template<typename T, typename allocatorT>
2670 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2671 {
2672  vec.remove(index);
2673 }
2674 
2675 #endif // #if VMA_USE_STL_VECTOR
2676 
2677 template<typename CmpLess, typename VectorT>
2678 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2679 {
2680  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2681  vector.data(),
2682  vector.data() + vector.size(),
2683  value,
2684  CmpLess()) - vector.data();
2685  VmaVectorInsert(vector, indexToInsert, value);
2686  return indexToInsert;
2687 }
2688 
2689 template<typename CmpLess, typename VectorT>
2690 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2691 {
2692  CmpLess comparator;
2693  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2694  vector.begin(),
2695  vector.end(),
2696  value,
2697  comparator);
2698  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2699  {
2700  size_t indexToRemove = it - vector.begin();
2701  VmaVectorRemove(vector, indexToRemove);
2702  return true;
2703  }
2704  return false;
2705 }
2706 
2707 template<typename CmpLess, typename VectorT>
2708 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2709 {
2710  CmpLess comparator;
2711  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2712  vector.data(),
2713  vector.data() + vector.size(),
2714  value,
2715  comparator);
2716  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2717  {
2718  return it - vector.begin();
2719  }
2720  else
2721  {
2722  return vector.size();
2723  }
2724 }
2725 
2727 // class VmaPoolAllocator
2728 
2729 /*
2730 Allocator for objects of type T using a list of arrays (pools) to speed up
2731 allocation. Number of elements that can be allocated is not bounded because
2732 allocator can create multiple blocks.
2733 */
2734 template<typename T>
2735 class VmaPoolAllocator
2736 {
2737 public:
2738  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2739  ~VmaPoolAllocator();
2740  void Clear();
2741  T* Alloc();
2742  void Free(T* ptr);
2743 
2744 private:
2745  union Item
2746  {
2747  uint32_t NextFreeIndex;
2748  T Value;
2749  };
2750 
2751  struct ItemBlock
2752  {
2753  Item* pItems;
2754  uint32_t FirstFreeIndex;
2755  };
2756 
2757  const VkAllocationCallbacks* m_pAllocationCallbacks;
2758  size_t m_ItemsPerBlock;
2759  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2760 
2761  ItemBlock& CreateNewBlock();
2762 };
2763 
2764 template<typename T>
2765 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2766  m_pAllocationCallbacks(pAllocationCallbacks),
2767  m_ItemsPerBlock(itemsPerBlock),
2768  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2769 {
2770  VMA_ASSERT(itemsPerBlock > 0);
2771 }
2772 
2773 template<typename T>
2774 VmaPoolAllocator<T>::~VmaPoolAllocator()
2775 {
2776  Clear();
2777 }
2778 
2779 template<typename T>
2780 void VmaPoolAllocator<T>::Clear()
2781 {
2782  for(size_t i = m_ItemBlocks.size(); i--; )
2783  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2784  m_ItemBlocks.clear();
2785 }
2786 
2787 template<typename T>
2788 T* VmaPoolAllocator<T>::Alloc()
2789 {
2790  for(size_t i = m_ItemBlocks.size(); i--; )
2791  {
2792  ItemBlock& block = m_ItemBlocks[i];
2793  // This block has some free items: Use first one.
2794  if(block.FirstFreeIndex != UINT32_MAX)
2795  {
2796  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2797  block.FirstFreeIndex = pItem->NextFreeIndex;
2798  return &pItem->Value;
2799  }
2800  }
2801 
2802  // No block has free item: Create new one and use it.
2803  ItemBlock& newBlock = CreateNewBlock();
2804  Item* const pItem = &newBlock.pItems[0];
2805  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2806  return &pItem->Value;
2807 }
2808 
2809 template<typename T>
2810 void VmaPoolAllocator<T>::Free(T* ptr)
2811 {
2812  // Search all memory blocks to find ptr.
2813  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2814  {
2815  ItemBlock& block = m_ItemBlocks[i];
2816 
2817  // Casting to union.
2818  Item* pItemPtr;
2819  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2820 
2821  // Check if pItemPtr is in address range of this block.
2822  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2823  {
2824  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2825  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2826  block.FirstFreeIndex = index;
2827  return;
2828  }
2829  }
2830  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2831 }
2832 
2833 template<typename T>
2834 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2835 {
2836  ItemBlock newBlock = {
2837  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2838 
2839  m_ItemBlocks.push_back(newBlock);
2840 
2841  // Setup singly-linked list of all free items in this block.
2842  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2843  newBlock.pItems[i].NextFreeIndex = i + 1;
2844  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2845  return m_ItemBlocks.back();
2846 }
2847 
2849 // class VmaRawList, VmaList
2850 
2851 #if VMA_USE_STL_LIST
2852 
2853 #define VmaList std::list
2854 
2855 #else // #if VMA_USE_STL_LIST
2856 
2857 template<typename T>
2858 struct VmaListItem
2859 {
2860  VmaListItem* pPrev;
2861  VmaListItem* pNext;
2862  T Value;
2863 };
2864 
2865 // Doubly linked list.
2866 template<typename T>
2867 class VmaRawList
2868 {
2869 public:
2870  typedef VmaListItem<T> ItemType;
2871 
2872  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2873  ~VmaRawList();
2874  void Clear();
2875 
2876  size_t GetCount() const { return m_Count; }
2877  bool IsEmpty() const { return m_Count == 0; }
2878 
2879  ItemType* Front() { return m_pFront; }
2880  const ItemType* Front() const { return m_pFront; }
2881  ItemType* Back() { return m_pBack; }
2882  const ItemType* Back() const { return m_pBack; }
2883 
2884  ItemType* PushBack();
2885  ItemType* PushFront();
2886  ItemType* PushBack(const T& value);
2887  ItemType* PushFront(const T& value);
2888  void PopBack();
2889  void PopFront();
2890 
2891  // Item can be null - it means PushBack.
2892  ItemType* InsertBefore(ItemType* pItem);
2893  // Item can be null - it means PushFront.
2894  ItemType* InsertAfter(ItemType* pItem);
2895 
2896  ItemType* InsertBefore(ItemType* pItem, const T& value);
2897  ItemType* InsertAfter(ItemType* pItem, const T& value);
2898 
2899  void Remove(ItemType* pItem);
2900 
2901 private:
2902  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2903  VmaPoolAllocator<ItemType> m_ItemAllocator;
2904  ItemType* m_pFront;
2905  ItemType* m_pBack;
2906  size_t m_Count;
2907 
2908  // Declared not defined, to block copy constructor and assignment operator.
2909  VmaRawList(const VmaRawList<T>& src);
2910  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2911 };
2912 
2913 template<typename T>
2914 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2915  m_pAllocationCallbacks(pAllocationCallbacks),
2916  m_ItemAllocator(pAllocationCallbacks, 128),
2917  m_pFront(VMA_NULL),
2918  m_pBack(VMA_NULL),
2919  m_Count(0)
2920 {
2921 }
2922 
2923 template<typename T>
2924 VmaRawList<T>::~VmaRawList()
2925 {
2926  // Intentionally not calling Clear, because that would be unnecessary
2927  // computations to return all items to m_ItemAllocator as free.
2928 }
2929 
2930 template<typename T>
2931 void VmaRawList<T>::Clear()
2932 {
2933  if(IsEmpty() == false)
2934  {
2935  ItemType* pItem = m_pBack;
2936  while(pItem != VMA_NULL)
2937  {
2938  ItemType* const pPrevItem = pItem->pPrev;
2939  m_ItemAllocator.Free(pItem);
2940  pItem = pPrevItem;
2941  }
2942  m_pFront = VMA_NULL;
2943  m_pBack = VMA_NULL;
2944  m_Count = 0;
2945  }
2946 }
2947 
2948 template<typename T>
2949 VmaListItem<T>* VmaRawList<T>::PushBack()
2950 {
2951  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2952  pNewItem->pNext = VMA_NULL;
2953  if(IsEmpty())
2954  {
2955  pNewItem->pPrev = VMA_NULL;
2956  m_pFront = pNewItem;
2957  m_pBack = pNewItem;
2958  m_Count = 1;
2959  }
2960  else
2961  {
2962  pNewItem->pPrev = m_pBack;
2963  m_pBack->pNext = pNewItem;
2964  m_pBack = pNewItem;
2965  ++m_Count;
2966  }
2967  return pNewItem;
2968 }
2969 
2970 template<typename T>
2971 VmaListItem<T>* VmaRawList<T>::PushFront()
2972 {
2973  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2974  pNewItem->pPrev = VMA_NULL;
2975  if(IsEmpty())
2976  {
2977  pNewItem->pNext = VMA_NULL;
2978  m_pFront = pNewItem;
2979  m_pBack = pNewItem;
2980  m_Count = 1;
2981  }
2982  else
2983  {
2984  pNewItem->pNext = m_pFront;
2985  m_pFront->pPrev = pNewItem;
2986  m_pFront = pNewItem;
2987  ++m_Count;
2988  }
2989  return pNewItem;
2990 }
2991 
2992 template<typename T>
2993 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
2994 {
2995  ItemType* const pNewItem = PushBack();
2996  pNewItem->Value = value;
2997  return pNewItem;
2998 }
2999 
3000 template<typename T>
3001 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3002 {
3003  ItemType* const pNewItem = PushFront();
3004  pNewItem->Value = value;
3005  return pNewItem;
3006 }
3007 
3008 template<typename T>
3009 void VmaRawList<T>::PopBack()
3010 {
3011  VMA_HEAVY_ASSERT(m_Count > 0);
3012  ItemType* const pBackItem = m_pBack;
3013  ItemType* const pPrevItem = pBackItem->pPrev;
3014  if(pPrevItem != VMA_NULL)
3015  {
3016  pPrevItem->pNext = VMA_NULL;
3017  }
3018  m_pBack = pPrevItem;
3019  m_ItemAllocator.Free(pBackItem);
3020  --m_Count;
3021 }
3022 
3023 template<typename T>
3024 void VmaRawList<T>::PopFront()
3025 {
3026  VMA_HEAVY_ASSERT(m_Count > 0);
3027  ItemType* const pFrontItem = m_pFront;
3028  ItemType* const pNextItem = pFrontItem->pNext;
3029  if(pNextItem != VMA_NULL)
3030  {
3031  pNextItem->pPrev = VMA_NULL;
3032  }
3033  m_pFront = pNextItem;
3034  m_ItemAllocator.Free(pFrontItem);
3035  --m_Count;
3036 }
3037 
3038 template<typename T>
3039 void VmaRawList<T>::Remove(ItemType* pItem)
3040 {
3041  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
3042  VMA_HEAVY_ASSERT(m_Count > 0);
3043 
3044  if(pItem->pPrev != VMA_NULL)
3045  {
3046  pItem->pPrev->pNext = pItem->pNext;
3047  }
3048  else
3049  {
3050  VMA_HEAVY_ASSERT(m_pFront == pItem);
3051  m_pFront = pItem->pNext;
3052  }
3053 
3054  if(pItem->pNext != VMA_NULL)
3055  {
3056  pItem->pNext->pPrev = pItem->pPrev;
3057  }
3058  else
3059  {
3060  VMA_HEAVY_ASSERT(m_pBack == pItem);
3061  m_pBack = pItem->pPrev;
3062  }
3063 
3064  m_ItemAllocator.Free(pItem);
3065  --m_Count;
3066 }
3067 
3068 template<typename T>
3069 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
3070 {
3071  if(pItem != VMA_NULL)
3072  {
3073  ItemType* const prevItem = pItem->pPrev;
3074  ItemType* const newItem = m_ItemAllocator.Alloc();
3075  newItem->pPrev = prevItem;
3076  newItem->pNext = pItem;
3077  pItem->pPrev = newItem;
3078  if(prevItem != VMA_NULL)
3079  {
3080  prevItem->pNext = newItem;
3081  }
3082  else
3083  {
3084  VMA_HEAVY_ASSERT(m_pFront == pItem);
3085  m_pFront = newItem;
3086  }
3087  ++m_Count;
3088  return newItem;
3089  }
3090  else
3091  return PushBack();
3092 }
3093 
3094 template<typename T>
3095 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3096 {
3097  if(pItem != VMA_NULL)
3098  {
3099  ItemType* const nextItem = pItem->pNext;
3100  ItemType* const newItem = m_ItemAllocator.Alloc();
3101  newItem->pNext = nextItem;
3102  newItem->pPrev = pItem;
3103  pItem->pNext = newItem;
3104  if(nextItem != VMA_NULL)
3105  {
3106  nextItem->pPrev = newItem;
3107  }
3108  else
3109  {
3110  VMA_HEAVY_ASSERT(m_pBack == pItem);
3111  m_pBack = newItem;
3112  }
3113  ++m_Count;
3114  return newItem;
3115  }
3116  else
3117  return PushFront();
3118 }
3119 
3120 template<typename T>
3121 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
3122 {
3123  ItemType* const newItem = InsertBefore(pItem);
3124  newItem->Value = value;
3125  return newItem;
3126 }
3127 
3128 template<typename T>
3129 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
3130 {
3131  ItemType* const newItem = InsertAfter(pItem);
3132  newItem->Value = value;
3133  return newItem;
3134 }
3135 
3136 template<typename T, typename AllocatorT>
3137 class VmaList
3138 {
3139 public:
3140  class iterator
3141  {
3142  public:
3143  iterator() :
3144  m_pList(VMA_NULL),
3145  m_pItem(VMA_NULL)
3146  {
3147  }
3148 
3149  T& operator*() const
3150  {
3151  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3152  return m_pItem->Value;
3153  }
3154  T* operator->() const
3155  {
3156  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3157  return &m_pItem->Value;
3158  }
3159 
3160  iterator& operator++()
3161  {
3162  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3163  m_pItem = m_pItem->pNext;
3164  return *this;
3165  }
3166  iterator& operator--()
3167  {
3168  if(m_pItem != VMA_NULL)
3169  {
3170  m_pItem = m_pItem->pPrev;
3171  }
3172  else
3173  {
3174  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3175  m_pItem = m_pList->Back();
3176  }
3177  return *this;
3178  }
3179 
3180  iterator operator++(int)
3181  {
3182  iterator result = *this;
3183  ++*this;
3184  return result;
3185  }
3186  iterator operator--(int)
3187  {
3188  iterator result = *this;
3189  --*this;
3190  return result;
3191  }
3192 
3193  bool operator==(const iterator& rhs) const
3194  {
3195  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3196  return m_pItem == rhs.m_pItem;
3197  }
3198  bool operator!=(const iterator& rhs) const
3199  {
3200  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3201  return m_pItem != rhs.m_pItem;
3202  }
3203 
3204  private:
3205  VmaRawList<T>* m_pList;
3206  VmaListItem<T>* m_pItem;
3207 
3208  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
3209  m_pList(pList),
3210  m_pItem(pItem)
3211  {
3212  }
3213 
3214  friend class VmaList<T, AllocatorT>;
3215  };
3216 
3217  class const_iterator
3218  {
3219  public:
3220  const_iterator() :
3221  m_pList(VMA_NULL),
3222  m_pItem(VMA_NULL)
3223  {
3224  }
3225 
3226  const_iterator(const iterator& src) :
3227  m_pList(src.m_pList),
3228  m_pItem(src.m_pItem)
3229  {
3230  }
3231 
3232  const T& operator*() const
3233  {
3234  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3235  return m_pItem->Value;
3236  }
3237  const T* operator->() const
3238  {
3239  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3240  return &m_pItem->Value;
3241  }
3242 
3243  const_iterator& operator++()
3244  {
3245  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3246  m_pItem = m_pItem->pNext;
3247  return *this;
3248  }
3249  const_iterator& operator--()
3250  {
3251  if(m_pItem != VMA_NULL)
3252  {
3253  m_pItem = m_pItem->pPrev;
3254  }
3255  else
3256  {
3257  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3258  m_pItem = m_pList->Back();
3259  }
3260  return *this;
3261  }
3262 
3263  const_iterator operator++(int)
3264  {
3265  const_iterator result = *this;
3266  ++*this;
3267  return result;
3268  }
3269  const_iterator operator--(int)
3270  {
3271  const_iterator result = *this;
3272  --*this;
3273  return result;
3274  }
3275 
3276  bool operator==(const const_iterator& rhs) const
3277  {
3278  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3279  return m_pItem == rhs.m_pItem;
3280  }
3281  bool operator!=(const const_iterator& rhs) const
3282  {
3283  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3284  return m_pItem != rhs.m_pItem;
3285  }
3286 
3287  private:
3288  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
3289  m_pList(pList),
3290  m_pItem(pItem)
3291  {
3292  }
3293 
3294  const VmaRawList<T>* m_pList;
3295  const VmaListItem<T>* m_pItem;
3296 
3297  friend class VmaList<T, AllocatorT>;
3298  };
3299 
3300  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
3301 
3302  bool empty() const { return m_RawList.IsEmpty(); }
3303  size_t size() const { return m_RawList.GetCount(); }
3304 
3305  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
3306  iterator end() { return iterator(&m_RawList, VMA_NULL); }
3307 
3308  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
3309  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
3310 
3311  void clear() { m_RawList.Clear(); }
3312  void push_back(const T& value) { m_RawList.PushBack(value); }
3313  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
3314  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
3315 
3316 private:
3317  VmaRawList<T> m_RawList;
3318 };
3319 
3320 #endif // #if VMA_USE_STL_LIST
3321 
3323 // class VmaMap
3324 
3325 // Unused in this version.
3326 #if 0
3327 
3328 #if VMA_USE_STL_UNORDERED_MAP
3329 
3330 #define VmaPair std::pair
3331 
3332 #define VMA_MAP_TYPE(KeyT, ValueT) \
3333  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
3334 
3335 #else // #if VMA_USE_STL_UNORDERED_MAP
3336 
3337 template<typename T1, typename T2>
3338 struct VmaPair
3339 {
3340  T1 first;
3341  T2 second;
3342 
3343  VmaPair() : first(), second() { }
3344  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
3345 };
3346 
3347 /* Class compatible with subset of interface of std::unordered_map.
3348 KeyT, ValueT must be POD because they will be stored in VmaVector.
3349 */
3350 template<typename KeyT, typename ValueT>
3351 class VmaMap
3352 {
3353 public:
3354  typedef VmaPair<KeyT, ValueT> PairType;
3355  typedef PairType* iterator;
3356 
3357  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
3358 
3359  iterator begin() { return m_Vector.begin(); }
3360  iterator end() { return m_Vector.end(); }
3361 
3362  void insert(const PairType& pair);
3363  iterator find(const KeyT& key);
3364  void erase(iterator it);
3365 
3366 private:
3367  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
3368 };
3369 
3370 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
3371 
3372 template<typename FirstT, typename SecondT>
3373 struct VmaPairFirstLess
3374 {
3375  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
3376  {
3377  return lhs.first < rhs.first;
3378  }
3379  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
3380  {
3381  return lhs.first < rhsFirst;
3382  }
3383 };
3384 
3385 template<typename KeyT, typename ValueT>
3386 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
3387 {
3388  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3389  m_Vector.data(),
3390  m_Vector.data() + m_Vector.size(),
3391  pair,
3392  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3393  VmaVectorInsert(m_Vector, indexToInsert, pair);
3394 }
3395 
3396 template<typename KeyT, typename ValueT>
3397 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3398 {
3399  PairType* it = VmaBinaryFindFirstNotLess(
3400  m_Vector.data(),
3401  m_Vector.data() + m_Vector.size(),
3402  key,
3403  VmaPairFirstLess<KeyT, ValueT>());
3404  if((it != m_Vector.end()) && (it->first == key))
3405  {
3406  return it;
3407  }
3408  else
3409  {
3410  return m_Vector.end();
3411  }
3412 }
3413 
3414 template<typename KeyT, typename ValueT>
3415 void VmaMap<KeyT, ValueT>::erase(iterator it)
3416 {
3417  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3418 }
3419 
3420 #endif // #if VMA_USE_STL_UNORDERED_MAP
3421 
3422 #endif // #if 0
3423 
3425 
3426 class VmaDeviceMemoryBlock;
3427 
3428 struct VmaAllocation_T
3429 {
3430 private:
3431  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3432 
3433  enum FLAGS
3434  {
3435  FLAG_USER_DATA_STRING = 0x01,
3436  };
3437 
3438 public:
3439  enum ALLOCATION_TYPE
3440  {
3441  ALLOCATION_TYPE_NONE,
3442  ALLOCATION_TYPE_BLOCK,
3443  ALLOCATION_TYPE_DEDICATED,
3444  };
3445 
3446  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3447  m_Alignment(1),
3448  m_Size(0),
3449  m_pUserData(VMA_NULL),
3450  m_LastUseFrameIndex(currentFrameIndex),
3451  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3452  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3453  m_MapCount(0),
3454  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3455  {
3456  }
3457 
3458  ~VmaAllocation_T()
3459  {
3460  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3461 
3462  // Check if owned string was freed.
3463  VMA_ASSERT(m_pUserData == VMA_NULL);
3464  }
3465 
3466  void InitBlockAllocation(
3467  VmaPool hPool,
3468  VmaDeviceMemoryBlock* block,
3469  VkDeviceSize offset,
3470  VkDeviceSize alignment,
3471  VkDeviceSize size,
3472  VmaSuballocationType suballocationType,
3473  bool mapped,
3474  bool canBecomeLost)
3475  {
3476  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3477  VMA_ASSERT(block != VMA_NULL);
3478  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3479  m_Alignment = alignment;
3480  m_Size = size;
3481  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3482  m_SuballocationType = (uint8_t)suballocationType;
3483  m_BlockAllocation.m_hPool = hPool;
3484  m_BlockAllocation.m_Block = block;
3485  m_BlockAllocation.m_Offset = offset;
3486  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3487  }
3488 
3489  void InitLost()
3490  {
3491  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3492  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3493  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3494  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3495  m_BlockAllocation.m_Block = VMA_NULL;
3496  m_BlockAllocation.m_Offset = 0;
3497  m_BlockAllocation.m_CanBecomeLost = true;
3498  }
3499 
3500  void ChangeBlockAllocation(
3501  VmaAllocator hAllocator,
3502  VmaDeviceMemoryBlock* block,
3503  VkDeviceSize offset);
3504 
3505  // pMappedData not null means allocation is created with MAPPED flag.
3506  void InitDedicatedAllocation(
3507  uint32_t memoryTypeIndex,
3508  VkDeviceMemory hMemory,
3509  VmaSuballocationType suballocationType,
3510  void* pMappedData,
3511  VkDeviceSize size)
3512  {
3513  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3514  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3515  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3516  m_Alignment = 0;
3517  m_Size = size;
3518  m_SuballocationType = (uint8_t)suballocationType;
3519  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3520  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3521  m_DedicatedAllocation.m_hMemory = hMemory;
3522  m_DedicatedAllocation.m_pMappedData = pMappedData;
3523  }
3524 
3525  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3526  VkDeviceSize GetAlignment() const { return m_Alignment; }
3527  VkDeviceSize GetSize() const { return m_Size; }
3528  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3529  void* GetUserData() const { return m_pUserData; }
3530  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3531  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3532 
3533  VmaDeviceMemoryBlock* GetBlock() const
3534  {
3535  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3536  return m_BlockAllocation.m_Block;
3537  }
3538  VkDeviceSize GetOffset() const;
3539  VkDeviceMemory GetMemory() const;
3540  uint32_t GetMemoryTypeIndex() const;
3541  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3542  void* GetMappedData() const;
3543  bool CanBecomeLost() const;
3544  VmaPool GetPool() const;
3545 
3546  uint32_t GetLastUseFrameIndex() const
3547  {
3548  return m_LastUseFrameIndex.load();
3549  }
3550  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3551  {
3552  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3553  }
3554  /*
3555  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3556  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3557  - Else, returns false.
3558 
3559  If hAllocation is already lost, assert - you should not call it then.
3560  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3561  */
3562  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3563 
3564  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3565  {
3566  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3567  outInfo.blockCount = 1;
3568  outInfo.allocationCount = 1;
3569  outInfo.unusedRangeCount = 0;
3570  outInfo.usedBytes = m_Size;
3571  outInfo.unusedBytes = 0;
3572  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3573  outInfo.unusedRangeSizeMin = UINT64_MAX;
3574  outInfo.unusedRangeSizeMax = 0;
3575  }
3576 
3577  void BlockAllocMap();
3578  void BlockAllocUnmap();
3579  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3580  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3581 
3582 private:
3583  VkDeviceSize m_Alignment;
3584  VkDeviceSize m_Size;
3585  void* m_pUserData;
3586  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3587  uint8_t m_Type; // ALLOCATION_TYPE
3588  uint8_t m_SuballocationType; // VmaSuballocationType
3589  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3590  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
3591  uint8_t m_MapCount;
3592  uint8_t m_Flags; // enum FLAGS
3593 
3594  // Allocation out of VmaDeviceMemoryBlock.
3595  struct BlockAllocation
3596  {
3597  VmaPool m_hPool; // Null if belongs to general memory.
3598  VmaDeviceMemoryBlock* m_Block;
3599  VkDeviceSize m_Offset;
3600  bool m_CanBecomeLost;
3601  };
3602 
3603  // Allocation for an object that has its own private VkDeviceMemory.
3604  struct DedicatedAllocation
3605  {
3606  uint32_t m_MemoryTypeIndex;
3607  VkDeviceMemory m_hMemory;
3608  void* m_pMappedData; // Not null means memory is mapped.
3609  };
3610 
3611  union
3612  {
3613  // Allocation out of VmaDeviceMemoryBlock.
3614  BlockAllocation m_BlockAllocation;
3615  // Allocation for an object that has its own private VkDeviceMemory.
3616  DedicatedAllocation m_DedicatedAllocation;
3617  };
3618 
3619  void FreeUserDataString(VmaAllocator hAllocator);
3620 };
3621 
3622 /*
3623 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3624 allocated memory block or free.
3625 */
3626 struct VmaSuballocation
3627 {
3628  VkDeviceSize offset;
3629  VkDeviceSize size;
3630  VmaAllocation hAllocation;
3631  VmaSuballocationType type;
3632 };
3633 
3634 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3635 
3636 // Cost of one additional allocation lost, as equivalent in bytes.
3637 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3638 
3639 /*
3640 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3641 
3642 If canMakeOtherLost was false:
3643 - item points to a FREE suballocation.
3644 - itemsToMakeLostCount is 0.
3645 
3646 If canMakeOtherLost was true:
3647 - item points to first of sequence of suballocations, which are either FREE,
3648  or point to VmaAllocations that can become lost.
3649 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3650  the requested allocation to succeed.
3651 */
3652 struct VmaAllocationRequest
3653 {
3654  VkDeviceSize offset;
3655  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3656  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3657  VmaSuballocationList::iterator item;
3658  size_t itemsToMakeLostCount;
3659 
3660  VkDeviceSize CalcCost() const
3661  {
3662  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3663  }
3664 };
3665 
3666 /*
3667 Data structure used for bookkeeping of allocations and unused ranges of memory
3668 in a single VkDeviceMemory block.
3669 */
3670 class VmaBlockMetadata
3671 {
3672 public:
3673  VmaBlockMetadata(VmaAllocator hAllocator);
3674  ~VmaBlockMetadata();
3675  void Init(VkDeviceSize size);
3676 
3677  // Validates all data structures inside this object. If not valid, returns false.
3678  bool Validate() const;
3679  VkDeviceSize GetSize() const { return m_Size; }
3680  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3681  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3682  VkDeviceSize GetUnusedRangeSizeMax() const;
3683  // Returns true if this block is empty - contains only single free suballocation.
3684  bool IsEmpty() const;
3685 
3686  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3687  void AddPoolStats(VmaPoolStats& inoutStats) const;
3688 
3689 #if VMA_STATS_STRING_ENABLED
3690  void PrintDetailedMap(class VmaJsonWriter& json) const;
3691 #endif
3692 
3693  // Creates trivial request for case when block is empty.
3694  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3695 
3696  // Tries to find a place for suballocation with given parameters inside this block.
3697  // If succeeded, fills pAllocationRequest and returns true.
3698  // If failed, returns false.
3699  bool CreateAllocationRequest(
3700  uint32_t currentFrameIndex,
3701  uint32_t frameInUseCount,
3702  VkDeviceSize bufferImageGranularity,
3703  VkDeviceSize allocSize,
3704  VkDeviceSize allocAlignment,
3705  VmaSuballocationType allocType,
3706  bool canMakeOtherLost,
3707  VmaAllocationRequest* pAllocationRequest);
3708 
3709  bool MakeRequestedAllocationsLost(
3710  uint32_t currentFrameIndex,
3711  uint32_t frameInUseCount,
3712  VmaAllocationRequest* pAllocationRequest);
3713 
3714  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3715 
3716  // Makes actual allocation based on request. Request must already be checked and valid.
3717  void Alloc(
3718  const VmaAllocationRequest& request,
3719  VmaSuballocationType type,
3720  VkDeviceSize allocSize,
3721  VmaAllocation hAllocation);
3722 
3723  // Frees suballocation assigned to given memory region.
3724  void Free(const VmaAllocation allocation);
3725  void FreeAtOffset(VkDeviceSize offset);
3726 
3727 private:
3728  VkDeviceSize m_Size;
3729  uint32_t m_FreeCount;
3730  VkDeviceSize m_SumFreeSize;
3731  VmaSuballocationList m_Suballocations;
3732  // Suballocations that are free and have size greater than certain threshold.
3733  // Sorted by size, ascending.
3734  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3735 
3736  bool ValidateFreeSuballocationList() const;
3737 
3738  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3739  // If yes, fills pOffset and returns true. If no, returns false.
3740  bool CheckAllocation(
3741  uint32_t currentFrameIndex,
3742  uint32_t frameInUseCount,
3743  VkDeviceSize bufferImageGranularity,
3744  VkDeviceSize allocSize,
3745  VkDeviceSize allocAlignment,
3746  VmaSuballocationType allocType,
3747  VmaSuballocationList::const_iterator suballocItem,
3748  bool canMakeOtherLost,
3749  VkDeviceSize* pOffset,
3750  size_t* itemsToMakeLostCount,
3751  VkDeviceSize* pSumFreeSize,
3752  VkDeviceSize* pSumItemSize) const;
3753  // Given free suballocation, it merges it with following one, which must also be free.
3754  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3755  // Releases given suballocation, making it free.
3756  // Merges it with adjacent free suballocations if applicable.
3757  // Returns iterator to new free suballocation at this place.
3758  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3759  // Given free suballocation, it inserts it into sorted list of
3760  // m_FreeSuballocationsBySize if it's suitable.
3761  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3762  // Given free suballocation, it removes it from sorted list of
3763  // m_FreeSuballocationsBySize if it's suitable.
3764  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3765 };
3766 
3767 // Helper class that represents mapped memory. Synchronized internally.
3768 class VmaDeviceMemoryMapping
3769 {
3770 public:
3771  VmaDeviceMemoryMapping();
3772  ~VmaDeviceMemoryMapping();
3773 
3774  void* GetMappedData() const { return m_pMappedData; }
3775 
3776  // ppData can be null.
3777  VkResult Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData);
3778  void Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count);
3779 
3780 private:
3781  VMA_MUTEX m_Mutex;
3782  uint32_t m_MapCount;
3783  void* m_pMappedData;
3784 };
3785 
3786 /*
3787 Represents a single block of device memory (`VkDeviceMemory`) with all the
3788 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
3789 
3790 Thread-safety: This class must be externally synchronized.
3791 */
3792 class VmaDeviceMemoryBlock
3793 {
3794 public:
3795  uint32_t m_MemoryTypeIndex;
3796  VkDeviceMemory m_hMemory;
3797  VmaDeviceMemoryMapping m_Mapping;
3798  VmaBlockMetadata m_Metadata;
3799 
3800  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3801 
3802  ~VmaDeviceMemoryBlock()
3803  {
3804  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3805  }
3806 
3807  // Always call after construction.
3808  void Init(
3809  uint32_t newMemoryTypeIndex,
3810  VkDeviceMemory newMemory,
3811  VkDeviceSize newSize);
3812  // Always call before destruction.
3813  void Destroy(VmaAllocator allocator);
3814 
3815  // Validates all data structures inside this object. If not valid, returns false.
3816  bool Validate() const;
3817 
3818  // ppData can be null.
3819  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
3820  void Unmap(VmaAllocator hAllocator, uint32_t count);
3821 };
3822 
3823 struct VmaPointerLess
3824 {
3825  bool operator()(const void* lhs, const void* rhs) const
3826  {
3827  return lhs < rhs;
3828  }
3829 };
3830 
3831 class VmaDefragmentator;
3832 
3833 /*
3834 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3835 Vulkan memory type.
3836 
3837 Synchronized internally with a mutex.
3838 */
3839 struct VmaBlockVector
3840 {
3841  VmaBlockVector(
3842  VmaAllocator hAllocator,
3843  uint32_t memoryTypeIndex,
3844  VkDeviceSize preferredBlockSize,
3845  size_t minBlockCount,
3846  size_t maxBlockCount,
3847  VkDeviceSize bufferImageGranularity,
3848  uint32_t frameInUseCount,
3849  bool isCustomPool);
3850  ~VmaBlockVector();
3851 
3852  VkResult CreateMinBlocks();
3853 
3854  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3855  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3856  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3857  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3858 
3859  void GetPoolStats(VmaPoolStats* pStats);
3860 
3861  bool IsEmpty() const { return m_Blocks.empty(); }
3862 
3863  VkResult Allocate(
3864  VmaPool hCurrentPool,
3865  uint32_t currentFrameIndex,
3866  const VkMemoryRequirements& vkMemReq,
3867  const VmaAllocationCreateInfo& createInfo,
3868  VmaSuballocationType suballocType,
3869  VmaAllocation* pAllocation);
3870 
3871  void Free(
3872  VmaAllocation hAllocation);
3873 
3874  // Adds statistics of this BlockVector to pStats.
3875  void AddStats(VmaStats* pStats);
3876 
3877 #if VMA_STATS_STRING_ENABLED
3878  void PrintDetailedMap(class VmaJsonWriter& json);
3879 #endif
3880 
3881  void MakePoolAllocationsLost(
3882  uint32_t currentFrameIndex,
3883  size_t* pLostAllocationCount);
3884 
3885  VmaDefragmentator* EnsureDefragmentator(
3886  VmaAllocator hAllocator,
3887  uint32_t currentFrameIndex);
3888 
3889  VkResult Defragment(
3890  VmaDefragmentationStats* pDefragmentationStats,
3891  VkDeviceSize& maxBytesToMove,
3892  uint32_t& maxAllocationsToMove);
3893 
3894  void DestroyDefragmentator();
3895 
3896 private:
3897  friend class VmaDefragmentator;
3898 
3899  const VmaAllocator m_hAllocator;
3900  const uint32_t m_MemoryTypeIndex;
3901  const VkDeviceSize m_PreferredBlockSize;
3902  const size_t m_MinBlockCount;
3903  const size_t m_MaxBlockCount;
3904  const VkDeviceSize m_BufferImageGranularity;
3905  const uint32_t m_FrameInUseCount;
3906  const bool m_IsCustomPool;
3907  VMA_MUTEX m_Mutex;
3908  // Incrementally sorted by sumFreeSize, ascending.
3909  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3910  /* There can be at most one allocation that is completely empty - a
3911  hysteresis to avoid pessimistic case of alternating creation and destruction
3912  of a VkDeviceMemory. */
3913  bool m_HasEmptyBlock;
3914  VmaDefragmentator* m_pDefragmentator;
3915 
3916  size_t CalcMaxBlockSize() const;
3917 
3918  // Finds and removes given block from vector.
3919  void Remove(VmaDeviceMemoryBlock* pBlock);
3920 
3921  // Performs single step in sorting m_Blocks. They may not be fully sorted
3922  // after this call.
3923  void IncrementallySortBlocks();
3924 
3925  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3926 };
3927 
3928 struct VmaPool_T
3929 {
3930 public:
3931  VmaBlockVector m_BlockVector;
3932 
3933  // Takes ownership.
3934  VmaPool_T(
3935  VmaAllocator hAllocator,
3936  const VmaPoolCreateInfo& createInfo);
3937  ~VmaPool_T();
3938 
3939  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3940 
3941 #if VMA_STATS_STRING_ENABLED
3942  //void PrintDetailedMap(class VmaStringBuilder& sb);
3943 #endif
3944 };
3945 
3946 class VmaDefragmentator
3947 {
3948  const VmaAllocator m_hAllocator;
3949  VmaBlockVector* const m_pBlockVector;
3950  uint32_t m_CurrentFrameIndex;
3951  VkDeviceSize m_BytesMoved;
3952  uint32_t m_AllocationsMoved;
3953 
3954  struct AllocationInfo
3955  {
3956  VmaAllocation m_hAllocation;
3957  VkBool32* m_pChanged;
3958 
3959  AllocationInfo() :
3960  m_hAllocation(VK_NULL_HANDLE),
3961  m_pChanged(VMA_NULL)
3962  {
3963  }
3964  };
3965 
3966  struct AllocationInfoSizeGreater
3967  {
3968  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3969  {
3970  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3971  }
3972  };
3973 
3974  // Used between AddAllocation and Defragment.
3975  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3976 
3977  struct BlockInfo
3978  {
3979  VmaDeviceMemoryBlock* m_pBlock;
3980  bool m_HasNonMovableAllocations;
3981  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3982 
3983  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3984  m_pBlock(VMA_NULL),
3985  m_HasNonMovableAllocations(true),
3986  m_Allocations(pAllocationCallbacks),
3987  m_pMappedDataForDefragmentation(VMA_NULL)
3988  {
3989  }
3990 
3991  void CalcHasNonMovableAllocations()
3992  {
3993  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
3994  const size_t defragmentAllocCount = m_Allocations.size();
3995  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3996  }
3997 
3998  void SortAllocationsBySizeDescecnding()
3999  {
4000  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
4001  }
4002 
4003  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
4004  void Unmap(VmaAllocator hAllocator);
4005 
4006  private:
4007  // Not null if mapped for defragmentation only, not originally mapped.
4008  void* m_pMappedDataForDefragmentation;
4009  };
4010 
4011  struct BlockPointerLess
4012  {
4013  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
4014  {
4015  return pLhsBlockInfo->m_pBlock < pRhsBlock;
4016  }
4017  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4018  {
4019  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
4020  }
4021  };
4022 
4023  // 1. Blocks with some non-movable allocations go first.
4024  // 2. Blocks with smaller sumFreeSize go first.
4025  struct BlockInfoCompareMoveDestination
4026  {
4027  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4028  {
4029  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
4030  {
4031  return true;
4032  }
4033  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
4034  {
4035  return false;
4036  }
4037  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
4038  {
4039  return true;
4040  }
4041  return false;
4042  }
4043  };
4044 
4045  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
4046  BlockInfoVector m_Blocks;
4047 
4048  VkResult DefragmentRound(
4049  VkDeviceSize maxBytesToMove,
4050  uint32_t maxAllocationsToMove);
4051 
4052  static bool MoveMakesSense(
4053  size_t dstBlockIndex, VkDeviceSize dstOffset,
4054  size_t srcBlockIndex, VkDeviceSize srcOffset);
4055 
4056 public:
4057  VmaDefragmentator(
4058  VmaAllocator hAllocator,
4059  VmaBlockVector* pBlockVector,
4060  uint32_t currentFrameIndex);
4061 
4062  ~VmaDefragmentator();
4063 
4064  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
4065  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
4066 
4067  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
4068 
4069  VkResult Defragment(
4070  VkDeviceSize maxBytesToMove,
4071  uint32_t maxAllocationsToMove);
4072 };
4073 
4074 // Main allocator object.
4075 struct VmaAllocator_T
4076 {
4077  bool m_UseMutex;
4078  bool m_UseKhrDedicatedAllocation;
4079  VkDevice m_hDevice;
4080  bool m_AllocationCallbacksSpecified;
4081  VkAllocationCallbacks m_AllocationCallbacks;
4082  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
4083 
4084  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
4085  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
4086  VMA_MUTEX m_HeapSizeLimitMutex;
4087 
4088  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
4089  VkPhysicalDeviceMemoryProperties m_MemProps;
4090 
4091  // Default pools.
4092  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
4093 
4094  // Each vector is sorted by memory (handle value).
4095  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
4096  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
4097  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
4098 
4099  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
4100  ~VmaAllocator_T();
4101 
4102  const VkAllocationCallbacks* GetAllocationCallbacks() const
4103  {
4104  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
4105  }
4106  const VmaVulkanFunctions& GetVulkanFunctions() const
4107  {
4108  return m_VulkanFunctions;
4109  }
4110 
4111  VkDeviceSize GetBufferImageGranularity() const
4112  {
4113  return VMA_MAX(
4114  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
4115  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
4116  }
4117 
4118  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
4119  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
4120 
4121  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
4122  {
4123  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
4124  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
4125  }
4126 
4127  void GetBufferMemoryRequirements(
4128  VkBuffer hBuffer,
4129  VkMemoryRequirements& memReq,
4130  bool& requiresDedicatedAllocation,
4131  bool& prefersDedicatedAllocation) const;
4132  void GetImageMemoryRequirements(
4133  VkImage hImage,
4134  VkMemoryRequirements& memReq,
4135  bool& requiresDedicatedAllocation,
4136  bool& prefersDedicatedAllocation) const;
4137 
4138  // Main allocation function.
4139  VkResult AllocateMemory(
4140  const VkMemoryRequirements& vkMemReq,
4141  bool requiresDedicatedAllocation,
4142  bool prefersDedicatedAllocation,
4143  VkBuffer dedicatedBuffer,
4144  VkImage dedicatedImage,
4145  const VmaAllocationCreateInfo& createInfo,
4146  VmaSuballocationType suballocType,
4147  VmaAllocation* pAllocation);
4148 
4149  // Main deallocation function.
4150  void FreeMemory(const VmaAllocation allocation);
4151 
4152  void CalculateStats(VmaStats* pStats);
4153 
4154 #if VMA_STATS_STRING_ENABLED
4155  void PrintDetailedMap(class VmaJsonWriter& json);
4156 #endif
4157 
4158  VkResult Defragment(
4159  VmaAllocation* pAllocations,
4160  size_t allocationCount,
4161  VkBool32* pAllocationsChanged,
4162  const VmaDefragmentationInfo* pDefragmentationInfo,
4163  VmaDefragmentationStats* pDefragmentationStats);
4164 
4165  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
4166  bool TouchAllocation(VmaAllocation hAllocation);
4167 
4168  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
4169  void DestroyPool(VmaPool pool);
4170  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
4171 
4172  void SetCurrentFrameIndex(uint32_t frameIndex);
4173 
4174  void MakePoolAllocationsLost(
4175  VmaPool hPool,
4176  size_t* pLostAllocationCount);
4177 
4178  void CreateLostAllocation(VmaAllocation* pAllocation);
4179 
4180  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
4181  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
4182 
4183  VkResult Map(VmaAllocation hAllocation, void** ppData);
4184  void Unmap(VmaAllocation hAllocation);
4185 
4186 private:
4187  VkDeviceSize m_PreferredLargeHeapBlockSize;
4188 
4189  VkPhysicalDevice m_PhysicalDevice;
4190  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
4191 
4192  VMA_MUTEX m_PoolsMutex;
4193  // Protected by m_PoolsMutex. Sorted by pointer value.
4194  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
4195 
4196  VmaVulkanFunctions m_VulkanFunctions;
4197 
4198  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
4199 
4200  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
4201 
4202  VkResult AllocateMemoryOfType(
4203  const VkMemoryRequirements& vkMemReq,
4204  bool dedicatedAllocation,
4205  VkBuffer dedicatedBuffer,
4206  VkImage dedicatedImage,
4207  const VmaAllocationCreateInfo& createInfo,
4208  uint32_t memTypeIndex,
4209  VmaSuballocationType suballocType,
4210  VmaAllocation* pAllocation);
4211 
4212  // Allocates and registers new VkDeviceMemory specifically for single allocation.
4213  VkResult AllocateDedicatedMemory(
4214  VkDeviceSize size,
4215  VmaSuballocationType suballocType,
4216  uint32_t memTypeIndex,
4217  bool map,
4218  bool isUserDataString,
4219  void* pUserData,
4220  VkBuffer dedicatedBuffer,
4221  VkImage dedicatedImage,
4222  VmaAllocation* pAllocation);
4223 
4224  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
4225  void FreeDedicatedMemory(VmaAllocation allocation);
4226 };
4227 
4229 // Memory allocation #2 after VmaAllocator_T definition
4230 
4231 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
4232 {
4233  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
4234 }
4235 
4236 static void VmaFree(VmaAllocator hAllocator, void* ptr)
4237 {
4238  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
4239 }
4240 
4241 template<typename T>
4242 static T* VmaAllocate(VmaAllocator hAllocator)
4243 {
4244  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
4245 }
4246 
4247 template<typename T>
4248 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
4249 {
4250  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
4251 }
4252 
4253 template<typename T>
4254 static void vma_delete(VmaAllocator hAllocator, T* ptr)
4255 {
4256  if(ptr != VMA_NULL)
4257  {
4258  ptr->~T();
4259  VmaFree(hAllocator, ptr);
4260  }
4261 }
4262 
4263 template<typename T>
4264 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
4265 {
4266  if(ptr != VMA_NULL)
4267  {
4268  for(size_t i = count; i--; )
4269  ptr[i].~T();
4270  VmaFree(hAllocator, ptr);
4271  }
4272 }
4273 
4275 // VmaStringBuilder
4276 
4277 #if VMA_STATS_STRING_ENABLED
4278 
4279 class VmaStringBuilder
4280 {
4281 public:
4282  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
4283  size_t GetLength() const { return m_Data.size(); }
4284  const char* GetData() const { return m_Data.data(); }
4285 
4286  void Add(char ch) { m_Data.push_back(ch); }
4287  void Add(const char* pStr);
4288  void AddNewLine() { Add('\n'); }
4289  void AddNumber(uint32_t num);
4290  void AddNumber(uint64_t num);
4291  void AddPointer(const void* ptr);
4292 
4293 private:
4294  VmaVector< char, VmaStlAllocator<char> > m_Data;
4295 };
4296 
4297 void VmaStringBuilder::Add(const char* pStr)
4298 {
4299  const size_t strLen = strlen(pStr);
4300  if(strLen > 0)
4301  {
4302  const size_t oldCount = m_Data.size();
4303  m_Data.resize(oldCount + strLen);
4304  memcpy(m_Data.data() + oldCount, pStr, strLen);
4305  }
4306 }
4307 
4308 void VmaStringBuilder::AddNumber(uint32_t num)
4309 {
4310  char buf[11];
4311  VmaUint32ToStr(buf, sizeof(buf), num);
4312  Add(buf);
4313 }
4314 
4315 void VmaStringBuilder::AddNumber(uint64_t num)
4316 {
4317  char buf[21];
4318  VmaUint64ToStr(buf, sizeof(buf), num);
4319  Add(buf);
4320 }
4321 
4322 void VmaStringBuilder::AddPointer(const void* ptr)
4323 {
4324  char buf[21];
4325  VmaPtrToStr(buf, sizeof(buf), ptr);
4326  Add(buf);
4327 }
4328 
4329 #endif // #if VMA_STATS_STRING_ENABLED
4330 
4332 // VmaJsonWriter
4333 
4334 #if VMA_STATS_STRING_ENABLED
4335 
4336 class VmaJsonWriter
4337 {
4338 public:
4339  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
4340  ~VmaJsonWriter();
4341 
4342  void BeginObject(bool singleLine = false);
4343  void EndObject();
4344 
4345  void BeginArray(bool singleLine = false);
4346  void EndArray();
4347 
4348  void WriteString(const char* pStr);
4349  void BeginString(const char* pStr = VMA_NULL);
4350  void ContinueString(const char* pStr);
4351  void ContinueString(uint32_t n);
4352  void ContinueString(uint64_t n);
4353  void ContinueString_Pointer(const void* ptr);
4354  void EndString(const char* pStr = VMA_NULL);
4355 
4356  void WriteNumber(uint32_t n);
4357  void WriteNumber(uint64_t n);
4358  void WriteBool(bool b);
4359  void WriteNull();
4360 
4361 private:
4362  static const char* const INDENT;
4363 
4364  enum COLLECTION_TYPE
4365  {
4366  COLLECTION_TYPE_OBJECT,
4367  COLLECTION_TYPE_ARRAY,
4368  };
4369  struct StackItem
4370  {
4371  COLLECTION_TYPE type;
4372  uint32_t valueCount;
4373  bool singleLineMode;
4374  };
4375 
4376  VmaStringBuilder& m_SB;
4377  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
4378  bool m_InsideString;
4379 
4380  void BeginValue(bool isString);
4381  void WriteIndent(bool oneLess = false);
4382 };
4383 
4384 const char* const VmaJsonWriter::INDENT = " ";
4385 
4386 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
4387  m_SB(sb),
4388  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4389  m_InsideString(false)
4390 {
4391 }
4392 
4393 VmaJsonWriter::~VmaJsonWriter()
4394 {
4395  VMA_ASSERT(!m_InsideString);
4396  VMA_ASSERT(m_Stack.empty());
4397 }
4398 
4399 void VmaJsonWriter::BeginObject(bool singleLine)
4400 {
4401  VMA_ASSERT(!m_InsideString);
4402 
4403  BeginValue(false);
4404  m_SB.Add('{');
4405 
4406  StackItem item;
4407  item.type = COLLECTION_TYPE_OBJECT;
4408  item.valueCount = 0;
4409  item.singleLineMode = singleLine;
4410  m_Stack.push_back(item);
4411 }
4412 
4413 void VmaJsonWriter::EndObject()
4414 {
4415  VMA_ASSERT(!m_InsideString);
4416 
4417  WriteIndent(true);
4418  m_SB.Add('}');
4419 
4420  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4421  m_Stack.pop_back();
4422 }
4423 
4424 void VmaJsonWriter::BeginArray(bool singleLine)
4425 {
4426  VMA_ASSERT(!m_InsideString);
4427 
4428  BeginValue(false);
4429  m_SB.Add('[');
4430 
4431  StackItem item;
4432  item.type = COLLECTION_TYPE_ARRAY;
4433  item.valueCount = 0;
4434  item.singleLineMode = singleLine;
4435  m_Stack.push_back(item);
4436 }
4437 
4438 void VmaJsonWriter::EndArray()
4439 {
4440  VMA_ASSERT(!m_InsideString);
4441 
4442  WriteIndent(true);
4443  m_SB.Add(']');
4444 
4445  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4446  m_Stack.pop_back();
4447 }
4448 
4449 void VmaJsonWriter::WriteString(const char* pStr)
4450 {
4451  BeginString(pStr);
4452  EndString();
4453 }
4454 
4455 void VmaJsonWriter::BeginString(const char* pStr)
4456 {
4457  VMA_ASSERT(!m_InsideString);
4458 
4459  BeginValue(true);
4460  m_SB.Add('"');
4461  m_InsideString = true;
4462  if(pStr != VMA_NULL && pStr[0] != '\0')
4463  {
4464  ContinueString(pStr);
4465  }
4466 }
4467 
4468 void VmaJsonWriter::ContinueString(const char* pStr)
4469 {
4470  VMA_ASSERT(m_InsideString);
4471 
4472  const size_t strLen = strlen(pStr);
4473  for(size_t i = 0; i < strLen; ++i)
4474  {
4475  char ch = pStr[i];
4476  if(ch == '\'')
4477  {
4478  m_SB.Add("\\\\");
4479  }
4480  else if(ch == '"')
4481  {
4482  m_SB.Add("\\\"");
4483  }
4484  else if(ch >= 32)
4485  {
4486  m_SB.Add(ch);
4487  }
4488  else switch(ch)
4489  {
4490  case '\b':
4491  m_SB.Add("\\b");
4492  break;
4493  case '\f':
4494  m_SB.Add("\\f");
4495  break;
4496  case '\n':
4497  m_SB.Add("\\n");
4498  break;
4499  case '\r':
4500  m_SB.Add("\\r");
4501  break;
4502  case '\t':
4503  m_SB.Add("\\t");
4504  break;
4505  default:
4506  VMA_ASSERT(0 && "Character not currently supported.");
4507  break;
4508  }
4509  }
4510 }
4511 
4512 void VmaJsonWriter::ContinueString(uint32_t n)
4513 {
4514  VMA_ASSERT(m_InsideString);
4515  m_SB.AddNumber(n);
4516 }
4517 
4518 void VmaJsonWriter::ContinueString(uint64_t n)
4519 {
4520  VMA_ASSERT(m_InsideString);
4521  m_SB.AddNumber(n);
4522 }
4523 
4524 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4525 {
4526  VMA_ASSERT(m_InsideString);
4527  m_SB.AddPointer(ptr);
4528 }
4529 
4530 void VmaJsonWriter::EndString(const char* pStr)
4531 {
4532  VMA_ASSERT(m_InsideString);
4533  if(pStr != VMA_NULL && pStr[0] != '\0')
4534  {
4535  ContinueString(pStr);
4536  }
4537  m_SB.Add('"');
4538  m_InsideString = false;
4539 }
4540 
4541 void VmaJsonWriter::WriteNumber(uint32_t n)
4542 {
4543  VMA_ASSERT(!m_InsideString);
4544  BeginValue(false);
4545  m_SB.AddNumber(n);
4546 }
4547 
4548 void VmaJsonWriter::WriteNumber(uint64_t n)
4549 {
4550  VMA_ASSERT(!m_InsideString);
4551  BeginValue(false);
4552  m_SB.AddNumber(n);
4553 }
4554 
4555 void VmaJsonWriter::WriteBool(bool b)
4556 {
4557  VMA_ASSERT(!m_InsideString);
4558  BeginValue(false);
4559  m_SB.Add(b ? "true" : "false");
4560 }
4561 
4562 void VmaJsonWriter::WriteNull()
4563 {
4564  VMA_ASSERT(!m_InsideString);
4565  BeginValue(false);
4566  m_SB.Add("null");
4567 }
4568 
4569 void VmaJsonWriter::BeginValue(bool isString)
4570 {
4571  if(!m_Stack.empty())
4572  {
4573  StackItem& currItem = m_Stack.back();
4574  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4575  currItem.valueCount % 2 == 0)
4576  {
4577  VMA_ASSERT(isString);
4578  }
4579 
4580  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4581  currItem.valueCount % 2 != 0)
4582  {
4583  m_SB.Add(": ");
4584  }
4585  else if(currItem.valueCount > 0)
4586  {
4587  m_SB.Add(", ");
4588  WriteIndent();
4589  }
4590  else
4591  {
4592  WriteIndent();
4593  }
4594  ++currItem.valueCount;
4595  }
4596 }
4597 
4598 void VmaJsonWriter::WriteIndent(bool oneLess)
4599 {
4600  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4601  {
4602  m_SB.AddNewLine();
4603 
4604  size_t count = m_Stack.size();
4605  if(count > 0 && oneLess)
4606  {
4607  --count;
4608  }
4609  for(size_t i = 0; i < count; ++i)
4610  {
4611  m_SB.Add(INDENT);
4612  }
4613  }
4614 }
4615 
4616 #endif // #if VMA_STATS_STRING_ENABLED
4617 
4619 
4620 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
4621 {
4622  if(IsUserDataString())
4623  {
4624  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
4625 
4626  FreeUserDataString(hAllocator);
4627 
4628  if(pUserData != VMA_NULL)
4629  {
4630  const char* const newStrSrc = (char*)pUserData;
4631  const size_t newStrLen = strlen(newStrSrc);
4632  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
4633  memcpy(newStrDst, newStrSrc, newStrLen + 1);
4634  m_pUserData = newStrDst;
4635  }
4636  }
4637  else
4638  {
4639  m_pUserData = pUserData;
4640  }
4641 }
4642 
4643 void VmaAllocation_T::ChangeBlockAllocation(
4644  VmaAllocator hAllocator,
4645  VmaDeviceMemoryBlock* block,
4646  VkDeviceSize offset)
4647 {
4648  VMA_ASSERT(block != VMA_NULL);
4649  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4650 
4651  // Move mapping reference counter from old block to new block.
4652  if(block != m_BlockAllocation.m_Block)
4653  {
4654  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
4655  if(IsPersistentMap())
4656  ++mapRefCount;
4657  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
4658  block->Map(hAllocator, mapRefCount, VMA_NULL);
4659  }
4660 
4661  m_BlockAllocation.m_Block = block;
4662  m_BlockAllocation.m_Offset = offset;
4663 }
4664 
4665 VkDeviceSize VmaAllocation_T::GetOffset() const
4666 {
4667  switch(m_Type)
4668  {
4669  case ALLOCATION_TYPE_BLOCK:
4670  return m_BlockAllocation.m_Offset;
4671  case ALLOCATION_TYPE_DEDICATED:
4672  return 0;
4673  default:
4674  VMA_ASSERT(0);
4675  return 0;
4676  }
4677 }
4678 
4679 VkDeviceMemory VmaAllocation_T::GetMemory() const
4680 {
4681  switch(m_Type)
4682  {
4683  case ALLOCATION_TYPE_BLOCK:
4684  return m_BlockAllocation.m_Block->m_hMemory;
4685  case ALLOCATION_TYPE_DEDICATED:
4686  return m_DedicatedAllocation.m_hMemory;
4687  default:
4688  VMA_ASSERT(0);
4689  return VK_NULL_HANDLE;
4690  }
4691 }
4692 
4693 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
4694 {
4695  switch(m_Type)
4696  {
4697  case ALLOCATION_TYPE_BLOCK:
4698  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
4699  case ALLOCATION_TYPE_DEDICATED:
4700  return m_DedicatedAllocation.m_MemoryTypeIndex;
4701  default:
4702  VMA_ASSERT(0);
4703  return UINT32_MAX;
4704  }
4705 }
4706 
4707 void* VmaAllocation_T::GetMappedData() const
4708 {
4709  switch(m_Type)
4710  {
4711  case ALLOCATION_TYPE_BLOCK:
4712  if(m_MapCount != 0)
4713  {
4714  void* pBlockData = m_BlockAllocation.m_Block->m_Mapping.GetMappedData();
4715  VMA_ASSERT(pBlockData != VMA_NULL);
4716  return (char*)pBlockData + m_BlockAllocation.m_Offset;
4717  }
4718  else
4719  {
4720  return VMA_NULL;
4721  }
4722  break;
4723  case ALLOCATION_TYPE_DEDICATED:
4724  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
4725  return m_DedicatedAllocation.m_pMappedData;
4726  default:
4727  VMA_ASSERT(0);
4728  return VMA_NULL;
4729  }
4730 }
4731 
4732 bool VmaAllocation_T::CanBecomeLost() const
4733 {
4734  switch(m_Type)
4735  {
4736  case ALLOCATION_TYPE_BLOCK:
4737  return m_BlockAllocation.m_CanBecomeLost;
4738  case ALLOCATION_TYPE_DEDICATED:
4739  return false;
4740  default:
4741  VMA_ASSERT(0);
4742  return false;
4743  }
4744 }
4745 
4746 VmaPool VmaAllocation_T::GetPool() const
4747 {
4748  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4749  return m_BlockAllocation.m_hPool;
4750 }
4751 
4752 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4753 {
4754  VMA_ASSERT(CanBecomeLost());
4755 
4756  /*
4757  Warning: This is a carefully designed algorithm.
4758  Do not modify unless you really know what you're doing :)
4759  */
4760  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4761  for(;;)
4762  {
4763  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4764  {
4765  VMA_ASSERT(0);
4766  return false;
4767  }
4768  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4769  {
4770  return false;
4771  }
4772  else // Last use time earlier than current time.
4773  {
4774  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4775  {
4776  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4777  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4778  return true;
4779  }
4780  }
4781  }
4782 }
4783 
4784 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
4785 {
4786  VMA_ASSERT(IsUserDataString());
4787  if(m_pUserData != VMA_NULL)
4788  {
4789  char* const oldStr = (char*)m_pUserData;
4790  const size_t oldStrLen = strlen(oldStr);
4791  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
4792  m_pUserData = VMA_NULL;
4793  }
4794 }
4795 
4796 void VmaAllocation_T::BlockAllocMap()
4797 {
4798  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4799 
4800  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4801  {
4802  ++m_MapCount;
4803  }
4804  else
4805  {
4806  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
4807  }
4808 }
4809 
4810 void VmaAllocation_T::BlockAllocUnmap()
4811 {
4812  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4813 
4814  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4815  {
4816  --m_MapCount;
4817  }
4818  else
4819  {
4820  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
4821  }
4822 }
4823 
4824 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
4825 {
4826  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4827 
4828  if(m_MapCount != 0)
4829  {
4830  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4831  {
4832  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
4833  *ppData = m_DedicatedAllocation.m_pMappedData;
4834  ++m_MapCount;
4835  return VK_SUCCESS;
4836  }
4837  else
4838  {
4839  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
4840  return VK_ERROR_MEMORY_MAP_FAILED;
4841  }
4842  }
4843  else
4844  {
4845  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4846  hAllocator->m_hDevice,
4847  m_DedicatedAllocation.m_hMemory,
4848  0, // offset
4849  VK_WHOLE_SIZE,
4850  0, // flags
4851  ppData);
4852  if(result == VK_SUCCESS)
4853  {
4854  m_DedicatedAllocation.m_pMappedData = *ppData;
4855  m_MapCount = 1;
4856  }
4857  return result;
4858  }
4859 }
4860 
4861 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
4862 {
4863  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4864 
4865  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4866  {
4867  --m_MapCount;
4868  if(m_MapCount == 0)
4869  {
4870  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
4871  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
4872  hAllocator->m_hDevice,
4873  m_DedicatedAllocation.m_hMemory);
4874  }
4875  }
4876  else
4877  {
4878  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
4879  }
4880 }
4881 
4882 #if VMA_STATS_STRING_ENABLED
4883 
4884 // Correspond to values of enum VmaSuballocationType.
4885 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4886  "FREE",
4887  "UNKNOWN",
4888  "BUFFER",
4889  "IMAGE_UNKNOWN",
4890  "IMAGE_LINEAR",
4891  "IMAGE_OPTIMAL",
4892 };
4893 
4894 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4895 {
4896  json.BeginObject();
4897 
4898  json.WriteString("Blocks");
4899  json.WriteNumber(stat.blockCount);
4900 
4901  json.WriteString("Allocations");
4902  json.WriteNumber(stat.allocationCount);
4903 
4904  json.WriteString("UnusedRanges");
4905  json.WriteNumber(stat.unusedRangeCount);
4906 
4907  json.WriteString("UsedBytes");
4908  json.WriteNumber(stat.usedBytes);
4909 
4910  json.WriteString("UnusedBytes");
4911  json.WriteNumber(stat.unusedBytes);
4912 
4913  if(stat.allocationCount > 1)
4914  {
4915  json.WriteString("AllocationSize");
4916  json.BeginObject(true);
4917  json.WriteString("Min");
4918  json.WriteNumber(stat.allocationSizeMin);
4919  json.WriteString("Avg");
4920  json.WriteNumber(stat.allocationSizeAvg);
4921  json.WriteString("Max");
4922  json.WriteNumber(stat.allocationSizeMax);
4923  json.EndObject();
4924  }
4925 
4926  if(stat.unusedRangeCount > 1)
4927  {
4928  json.WriteString("UnusedRangeSize");
4929  json.BeginObject(true);
4930  json.WriteString("Min");
4931  json.WriteNumber(stat.unusedRangeSizeMin);
4932  json.WriteString("Avg");
4933  json.WriteNumber(stat.unusedRangeSizeAvg);
4934  json.WriteString("Max");
4935  json.WriteNumber(stat.unusedRangeSizeMax);
4936  json.EndObject();
4937  }
4938 
4939  json.EndObject();
4940 }
4941 
4942 #endif // #if VMA_STATS_STRING_ENABLED
4943 
4944 struct VmaSuballocationItemSizeLess
4945 {
4946  bool operator()(
4947  const VmaSuballocationList::iterator lhs,
4948  const VmaSuballocationList::iterator rhs) const
4949  {
4950  return lhs->size < rhs->size;
4951  }
4952  bool operator()(
4953  const VmaSuballocationList::iterator lhs,
4954  VkDeviceSize rhsSize) const
4955  {
4956  return lhs->size < rhsSize;
4957  }
4958 };
4959 
4961 // class VmaBlockMetadata
4962 
4963 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
4964  m_Size(0),
4965  m_FreeCount(0),
4966  m_SumFreeSize(0),
4967  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
4968  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
4969 {
4970 }
4971 
4972 VmaBlockMetadata::~VmaBlockMetadata()
4973 {
4974 }
4975 
4976 void VmaBlockMetadata::Init(VkDeviceSize size)
4977 {
4978  m_Size = size;
4979  m_FreeCount = 1;
4980  m_SumFreeSize = size;
4981 
4982  VmaSuballocation suballoc = {};
4983  suballoc.offset = 0;
4984  suballoc.size = size;
4985  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4986  suballoc.hAllocation = VK_NULL_HANDLE;
4987 
4988  m_Suballocations.push_back(suballoc);
4989  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
4990  --suballocItem;
4991  m_FreeSuballocationsBySize.push_back(suballocItem);
4992 }
4993 
4994 bool VmaBlockMetadata::Validate() const
4995 {
4996  if(m_Suballocations.empty())
4997  {
4998  return false;
4999  }
5000 
5001  // Expected offset of new suballocation as calculates from previous ones.
5002  VkDeviceSize calculatedOffset = 0;
5003  // Expected number of free suballocations as calculated from traversing their list.
5004  uint32_t calculatedFreeCount = 0;
5005  // Expected sum size of free suballocations as calculated from traversing their list.
5006  VkDeviceSize calculatedSumFreeSize = 0;
5007  // Expected number of free suballocations that should be registered in
5008  // m_FreeSuballocationsBySize calculated from traversing their list.
5009  size_t freeSuballocationsToRegister = 0;
5010  // True if previous visisted suballocation was free.
5011  bool prevFree = false;
5012 
5013  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5014  suballocItem != m_Suballocations.cend();
5015  ++suballocItem)
5016  {
5017  const VmaSuballocation& subAlloc = *suballocItem;
5018 
5019  // Actual offset of this suballocation doesn't match expected one.
5020  if(subAlloc.offset != calculatedOffset)
5021  {
5022  return false;
5023  }
5024 
5025  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
5026  // Two adjacent free suballocations are invalid. They should be merged.
5027  if(prevFree && currFree)
5028  {
5029  return false;
5030  }
5031 
5032  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
5033  {
5034  return false;
5035  }
5036 
5037  if(currFree)
5038  {
5039  calculatedSumFreeSize += subAlloc.size;
5040  ++calculatedFreeCount;
5041  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5042  {
5043  ++freeSuballocationsToRegister;
5044  }
5045  }
5046  else
5047  {
5048  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
5049  {
5050  return false;
5051  }
5052  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
5053  {
5054  return false;
5055  }
5056  }
5057 
5058  calculatedOffset += subAlloc.size;
5059  prevFree = currFree;
5060  }
5061 
5062  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
5063  // match expected one.
5064  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
5065  {
5066  return false;
5067  }
5068 
5069  VkDeviceSize lastSize = 0;
5070  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
5071  {
5072  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
5073 
5074  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
5075  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5076  {
5077  return false;
5078  }
5079  // They must be sorted by size ascending.
5080  if(suballocItem->size < lastSize)
5081  {
5082  return false;
5083  }
5084 
5085  lastSize = suballocItem->size;
5086  }
5087 
5088  // Check if totals match calculacted values.
5089  if(!ValidateFreeSuballocationList() ||
5090  (calculatedOffset != m_Size) ||
5091  (calculatedSumFreeSize != m_SumFreeSize) ||
5092  (calculatedFreeCount != m_FreeCount))
5093  {
5094  return false;
5095  }
5096 
5097  return true;
5098 }
5099 
5100 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
5101 {
5102  if(!m_FreeSuballocationsBySize.empty())
5103  {
5104  return m_FreeSuballocationsBySize.back()->size;
5105  }
5106  else
5107  {
5108  return 0;
5109  }
5110 }
5111 
5112 bool VmaBlockMetadata::IsEmpty() const
5113 {
5114  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
5115 }
5116 
5117 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
5118 {
5119  outInfo.blockCount = 1;
5120 
5121  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5122  outInfo.allocationCount = rangeCount - m_FreeCount;
5123  outInfo.unusedRangeCount = m_FreeCount;
5124 
5125  outInfo.unusedBytes = m_SumFreeSize;
5126  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
5127 
5128  outInfo.allocationSizeMin = UINT64_MAX;
5129  outInfo.allocationSizeMax = 0;
5130  outInfo.unusedRangeSizeMin = UINT64_MAX;
5131  outInfo.unusedRangeSizeMax = 0;
5132 
5133  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5134  suballocItem != m_Suballocations.cend();
5135  ++suballocItem)
5136  {
5137  const VmaSuballocation& suballoc = *suballocItem;
5138  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5139  {
5140  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
5141  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
5142  }
5143  else
5144  {
5145  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
5146  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
5147  }
5148  }
5149 }
5150 
5151 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
5152 {
5153  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5154 
5155  inoutStats.size += m_Size;
5156  inoutStats.unusedSize += m_SumFreeSize;
5157  inoutStats.allocationCount += rangeCount - m_FreeCount;
5158  inoutStats.unusedRangeCount += m_FreeCount;
5159  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
5160 }
5161 
5162 #if VMA_STATS_STRING_ENABLED
5163 
5164 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
5165 {
5166  json.BeginObject();
5167 
5168  json.WriteString("TotalBytes");
5169  json.WriteNumber(m_Size);
5170 
5171  json.WriteString("UnusedBytes");
5172  json.WriteNumber(m_SumFreeSize);
5173 
5174  json.WriteString("Allocations");
5175  json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);
5176 
5177  json.WriteString("UnusedRanges");
5178  json.WriteNumber(m_FreeCount);
5179 
5180  json.WriteString("Suballocations");
5181  json.BeginArray();
5182  size_t i = 0;
5183  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5184  suballocItem != m_Suballocations.cend();
5185  ++suballocItem, ++i)
5186  {
5187  json.BeginObject(true);
5188 
5189  json.WriteString("Type");
5190  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
5191 
5192  json.WriteString("Size");
5193  json.WriteNumber(suballocItem->size);
5194 
5195  json.WriteString("Offset");
5196  json.WriteNumber(suballocItem->offset);
5197 
5198  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5199  {
5200  const void* pUserData = suballocItem->hAllocation->GetUserData();
5201  if(pUserData != VMA_NULL)
5202  {
5203  json.WriteString("UserData");
5204  if(suballocItem->hAllocation->IsUserDataString())
5205  {
5206  json.WriteString((const char*)pUserData);
5207  }
5208  else
5209  {
5210  json.BeginString();
5211  json.ContinueString_Pointer(pUserData);
5212  json.EndString();
5213  }
5214  }
5215  }
5216 
5217  json.EndObject();
5218  }
5219  json.EndArray();
5220 
5221  json.EndObject();
5222 }
5223 
5224 #endif // #if VMA_STATS_STRING_ENABLED
5225 
5226 /*
5227 How many suitable free suballocations to analyze before choosing best one.
5228 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
5229  be chosen.
5230 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
5231  suballocations will be analized and best one will be chosen.
5232 - Any other value is also acceptable.
5233 */
5234 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
5235 
5236 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
5237 {
5238  VMA_ASSERT(IsEmpty());
5239  pAllocationRequest->offset = 0;
5240  pAllocationRequest->sumFreeSize = m_SumFreeSize;
5241  pAllocationRequest->sumItemSize = 0;
5242  pAllocationRequest->item = m_Suballocations.begin();
5243  pAllocationRequest->itemsToMakeLostCount = 0;
5244 }
5245 
5246 bool VmaBlockMetadata::CreateAllocationRequest(
5247  uint32_t currentFrameIndex,
5248  uint32_t frameInUseCount,
5249  VkDeviceSize bufferImageGranularity,
5250  VkDeviceSize allocSize,
5251  VkDeviceSize allocAlignment,
5252  VmaSuballocationType allocType,
5253  bool canMakeOtherLost,
5254  VmaAllocationRequest* pAllocationRequest)
5255 {
5256  VMA_ASSERT(allocSize > 0);
5257  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5258  VMA_ASSERT(pAllocationRequest != VMA_NULL);
5259  VMA_HEAVY_ASSERT(Validate());
5260 
5261  // There is not enough total free space in this block to fullfill the request: Early return.
5262  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
5263  {
5264  return false;
5265  }
5266 
5267  // New algorithm, efficiently searching freeSuballocationsBySize.
5268  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
5269  if(freeSuballocCount > 0)
5270  {
5271  if(VMA_BEST_FIT)
5272  {
5273  // Find first free suballocation with size not less than allocSize.
5274  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5275  m_FreeSuballocationsBySize.data(),
5276  m_FreeSuballocationsBySize.data() + freeSuballocCount,
5277  allocSize,
5278  VmaSuballocationItemSizeLess());
5279  size_t index = it - m_FreeSuballocationsBySize.data();
5280  for(; index < freeSuballocCount; ++index)
5281  {
5282  if(CheckAllocation(
5283  currentFrameIndex,
5284  frameInUseCount,
5285  bufferImageGranularity,
5286  allocSize,
5287  allocAlignment,
5288  allocType,
5289  m_FreeSuballocationsBySize[index],
5290  false, // canMakeOtherLost
5291  &pAllocationRequest->offset,
5292  &pAllocationRequest->itemsToMakeLostCount,
5293  &pAllocationRequest->sumFreeSize,
5294  &pAllocationRequest->sumItemSize))
5295  {
5296  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5297  return true;
5298  }
5299  }
5300  }
5301  else
5302  {
5303  // Search staring from biggest suballocations.
5304  for(size_t index = freeSuballocCount; index--; )
5305  {
5306  if(CheckAllocation(
5307  currentFrameIndex,
5308  frameInUseCount,
5309  bufferImageGranularity,
5310  allocSize,
5311  allocAlignment,
5312  allocType,
5313  m_FreeSuballocationsBySize[index],
5314  false, // canMakeOtherLost
5315  &pAllocationRequest->offset,
5316  &pAllocationRequest->itemsToMakeLostCount,
5317  &pAllocationRequest->sumFreeSize,
5318  &pAllocationRequest->sumItemSize))
5319  {
5320  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5321  return true;
5322  }
5323  }
5324  }
5325  }
5326 
5327  if(canMakeOtherLost)
5328  {
5329  // Brute-force algorithm. TODO: Come up with something better.
5330 
5331  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
5332  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
5333 
5334  VmaAllocationRequest tmpAllocRequest = {};
5335  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
5336  suballocIt != m_Suballocations.end();
5337  ++suballocIt)
5338  {
5339  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
5340  suballocIt->hAllocation->CanBecomeLost())
5341  {
5342  if(CheckAllocation(
5343  currentFrameIndex,
5344  frameInUseCount,
5345  bufferImageGranularity,
5346  allocSize,
5347  allocAlignment,
5348  allocType,
5349  suballocIt,
5350  canMakeOtherLost,
5351  &tmpAllocRequest.offset,
5352  &tmpAllocRequest.itemsToMakeLostCount,
5353  &tmpAllocRequest.sumFreeSize,
5354  &tmpAllocRequest.sumItemSize))
5355  {
5356  tmpAllocRequest.item = suballocIt;
5357 
5358  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
5359  {
5360  *pAllocationRequest = tmpAllocRequest;
5361  }
5362  }
5363  }
5364  }
5365 
5366  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
5367  {
5368  return true;
5369  }
5370  }
5371 
5372  return false;
5373 }
5374 
5375 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
5376  uint32_t currentFrameIndex,
5377  uint32_t frameInUseCount,
5378  VmaAllocationRequest* pAllocationRequest)
5379 {
5380  while(pAllocationRequest->itemsToMakeLostCount > 0)
5381  {
5382  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
5383  {
5384  ++pAllocationRequest->item;
5385  }
5386  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5387  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
5388  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
5389  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5390  {
5391  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
5392  --pAllocationRequest->itemsToMakeLostCount;
5393  }
5394  else
5395  {
5396  return false;
5397  }
5398  }
5399 
5400  VMA_HEAVY_ASSERT(Validate());
5401  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5402  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
5403 
5404  return true;
5405 }
5406 
5407 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5408 {
5409  uint32_t lostAllocationCount = 0;
5410  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
5411  it != m_Suballocations.end();
5412  ++it)
5413  {
5414  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
5415  it->hAllocation->CanBecomeLost() &&
5416  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5417  {
5418  it = FreeSuballocation(it);
5419  ++lostAllocationCount;
5420  }
5421  }
5422  return lostAllocationCount;
5423 }
5424 
5425 void VmaBlockMetadata::Alloc(
5426  const VmaAllocationRequest& request,
5427  VmaSuballocationType type,
5428  VkDeviceSize allocSize,
5429  VmaAllocation hAllocation)
5430 {
5431  VMA_ASSERT(request.item != m_Suballocations.end());
5432  VmaSuballocation& suballoc = *request.item;
5433  // Given suballocation is a free block.
5434  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5435  // Given offset is inside this suballocation.
5436  VMA_ASSERT(request.offset >= suballoc.offset);
5437  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
5438  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
5439  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
5440 
5441  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
5442  // it to become used.
5443  UnregisterFreeSuballocation(request.item);
5444 
5445  suballoc.offset = request.offset;
5446  suballoc.size = allocSize;
5447  suballoc.type = type;
5448  suballoc.hAllocation = hAllocation;
5449 
5450  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
5451  if(paddingEnd)
5452  {
5453  VmaSuballocation paddingSuballoc = {};
5454  paddingSuballoc.offset = request.offset + allocSize;
5455  paddingSuballoc.size = paddingEnd;
5456  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5457  VmaSuballocationList::iterator next = request.item;
5458  ++next;
5459  const VmaSuballocationList::iterator paddingEndItem =
5460  m_Suballocations.insert(next, paddingSuballoc);
5461  RegisterFreeSuballocation(paddingEndItem);
5462  }
5463 
5464  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5465  if(paddingBegin)
5466  {
5467  VmaSuballocation paddingSuballoc = {};
5468  paddingSuballoc.offset = request.offset - paddingBegin;
5469  paddingSuballoc.size = paddingBegin;
5470  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5471  const VmaSuballocationList::iterator paddingBeginItem =
5472  m_Suballocations.insert(request.item, paddingSuballoc);
5473  RegisterFreeSuballocation(paddingBeginItem);
5474  }
5475 
5476  // Update totals.
5477  m_FreeCount = m_FreeCount - 1;
5478  if(paddingBegin > 0)
5479  {
5480  ++m_FreeCount;
5481  }
5482  if(paddingEnd > 0)
5483  {
5484  ++m_FreeCount;
5485  }
5486  m_SumFreeSize -= allocSize;
5487 }
5488 
5489 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5490 {
5491  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5492  suballocItem != m_Suballocations.end();
5493  ++suballocItem)
5494  {
5495  VmaSuballocation& suballoc = *suballocItem;
5496  if(suballoc.hAllocation == allocation)
5497  {
5498  FreeSuballocation(suballocItem);
5499  VMA_HEAVY_ASSERT(Validate());
5500  return;
5501  }
5502  }
5503  VMA_ASSERT(0 && "Not found!");
5504 }
5505 
5506 void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)
5507 {
5508  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5509  suballocItem != m_Suballocations.end();
5510  ++suballocItem)
5511  {
5512  VmaSuballocation& suballoc = *suballocItem;
5513  if(suballoc.offset == offset)
5514  {
5515  FreeSuballocation(suballocItem);
5516  return;
5517  }
5518  }
5519  VMA_ASSERT(0 && "Not found!");
5520 }
5521 
5522 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5523 {
5524  VkDeviceSize lastSize = 0;
5525  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5526  {
5527  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5528 
5529  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5530  {
5531  VMA_ASSERT(0);
5532  return false;
5533  }
5534  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5535  {
5536  VMA_ASSERT(0);
5537  return false;
5538  }
5539  if(it->size < lastSize)
5540  {
5541  VMA_ASSERT(0);
5542  return false;
5543  }
5544 
5545  lastSize = it->size;
5546  }
5547  return true;
5548 }
5549 
5550 bool VmaBlockMetadata::CheckAllocation(
5551  uint32_t currentFrameIndex,
5552  uint32_t frameInUseCount,
5553  VkDeviceSize bufferImageGranularity,
5554  VkDeviceSize allocSize,
5555  VkDeviceSize allocAlignment,
5556  VmaSuballocationType allocType,
5557  VmaSuballocationList::const_iterator suballocItem,
5558  bool canMakeOtherLost,
5559  VkDeviceSize* pOffset,
5560  size_t* itemsToMakeLostCount,
5561  VkDeviceSize* pSumFreeSize,
5562  VkDeviceSize* pSumItemSize) const
5563 {
5564  VMA_ASSERT(allocSize > 0);
5565  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5566  VMA_ASSERT(suballocItem != m_Suballocations.cend());
5567  VMA_ASSERT(pOffset != VMA_NULL);
5568 
5569  *itemsToMakeLostCount = 0;
5570  *pSumFreeSize = 0;
5571  *pSumItemSize = 0;
5572 
5573  if(canMakeOtherLost)
5574  {
5575  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5576  {
5577  *pSumFreeSize = suballocItem->size;
5578  }
5579  else
5580  {
5581  if(suballocItem->hAllocation->CanBecomeLost() &&
5582  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5583  {
5584  ++*itemsToMakeLostCount;
5585  *pSumItemSize = suballocItem->size;
5586  }
5587  else
5588  {
5589  return false;
5590  }
5591  }
5592 
5593  // Remaining size is too small for this request: Early return.
5594  if(m_Size - suballocItem->offset < allocSize)
5595  {
5596  return false;
5597  }
5598 
5599  // Start from offset equal to beginning of this suballocation.
5600  *pOffset = suballocItem->offset;
5601 
5602  // Apply VMA_DEBUG_MARGIN at the beginning.
5603  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5604  {
5605  *pOffset += VMA_DEBUG_MARGIN;
5606  }
5607 
5608  // Apply alignment.
5609  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5610  *pOffset = VmaAlignUp(*pOffset, alignment);
5611 
5612  // Check previous suballocations for BufferImageGranularity conflicts.
5613  // Make bigger alignment if necessary.
5614  if(bufferImageGranularity > 1)
5615  {
5616  bool bufferImageGranularityConflict = false;
5617  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5618  while(prevSuballocItem != m_Suballocations.cbegin())
5619  {
5620  --prevSuballocItem;
5621  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5622  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5623  {
5624  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5625  {
5626  bufferImageGranularityConflict = true;
5627  break;
5628  }
5629  }
5630  else
5631  // Already on previous page.
5632  break;
5633  }
5634  if(bufferImageGranularityConflict)
5635  {
5636  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5637  }
5638  }
5639 
5640  // Now that we have final *pOffset, check if we are past suballocItem.
5641  // If yes, return false - this function should be called for another suballocItem as starting point.
5642  if(*pOffset >= suballocItem->offset + suballocItem->size)
5643  {
5644  return false;
5645  }
5646 
5647  // Calculate padding at the beginning based on current offset.
5648  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
5649 
5650  // Calculate required margin at the end if this is not last suballocation.
5651  VmaSuballocationList::const_iterator next = suballocItem;
5652  ++next;
5653  const VkDeviceSize requiredEndMargin =
5654  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5655 
5656  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
5657  // Another early return check.
5658  if(suballocItem->offset + totalSize > m_Size)
5659  {
5660  return false;
5661  }
5662 
5663  // Advance lastSuballocItem until desired size is reached.
5664  // Update itemsToMakeLostCount.
5665  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
5666  if(totalSize > suballocItem->size)
5667  {
5668  VkDeviceSize remainingSize = totalSize - suballocItem->size;
5669  while(remainingSize > 0)
5670  {
5671  ++lastSuballocItem;
5672  if(lastSuballocItem == m_Suballocations.cend())
5673  {
5674  return false;
5675  }
5676  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5677  {
5678  *pSumFreeSize += lastSuballocItem->size;
5679  }
5680  else
5681  {
5682  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
5683  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
5684  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5685  {
5686  ++*itemsToMakeLostCount;
5687  *pSumItemSize += lastSuballocItem->size;
5688  }
5689  else
5690  {
5691  return false;
5692  }
5693  }
5694  remainingSize = (lastSuballocItem->size < remainingSize) ?
5695  remainingSize - lastSuballocItem->size : 0;
5696  }
5697  }
5698 
5699  // Check next suballocations for BufferImageGranularity conflicts.
5700  // If conflict exists, we must mark more allocations lost or fail.
5701  if(bufferImageGranularity > 1)
5702  {
5703  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
5704  ++nextSuballocItem;
5705  while(nextSuballocItem != m_Suballocations.cend())
5706  {
5707  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5708  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5709  {
5710  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5711  {
5712  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
5713  if(nextSuballoc.hAllocation->CanBecomeLost() &&
5714  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5715  {
5716  ++*itemsToMakeLostCount;
5717  }
5718  else
5719  {
5720  return false;
5721  }
5722  }
5723  }
5724  else
5725  {
5726  // Already on next page.
5727  break;
5728  }
5729  ++nextSuballocItem;
5730  }
5731  }
5732  }
5733  else
5734  {
5735  const VmaSuballocation& suballoc = *suballocItem;
5736  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5737 
5738  *pSumFreeSize = suballoc.size;
5739 
5740  // Size of this suballocation is too small for this request: Early return.
5741  if(suballoc.size < allocSize)
5742  {
5743  return false;
5744  }
5745 
5746  // Start from offset equal to beginning of this suballocation.
5747  *pOffset = suballoc.offset;
5748 
5749  // Apply VMA_DEBUG_MARGIN at the beginning.
5750  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5751  {
5752  *pOffset += VMA_DEBUG_MARGIN;
5753  }
5754 
5755  // Apply alignment.
5756  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5757  *pOffset = VmaAlignUp(*pOffset, alignment);
5758 
5759  // Check previous suballocations for BufferImageGranularity conflicts.
5760  // Make bigger alignment if necessary.
5761  if(bufferImageGranularity > 1)
5762  {
5763  bool bufferImageGranularityConflict = false;
5764  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5765  while(prevSuballocItem != m_Suballocations.cbegin())
5766  {
5767  --prevSuballocItem;
5768  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5769  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5770  {
5771  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5772  {
5773  bufferImageGranularityConflict = true;
5774  break;
5775  }
5776  }
5777  else
5778  // Already on previous page.
5779  break;
5780  }
5781  if(bufferImageGranularityConflict)
5782  {
5783  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5784  }
5785  }
5786 
5787  // Calculate padding at the beginning based on current offset.
5788  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
5789 
5790  // Calculate required margin at the end if this is not last suballocation.
5791  VmaSuballocationList::const_iterator next = suballocItem;
5792  ++next;
5793  const VkDeviceSize requiredEndMargin =
5794  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5795 
5796  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
5797  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
5798  {
5799  return false;
5800  }
5801 
5802  // Check next suballocations for BufferImageGranularity conflicts.
5803  // If conflict exists, allocation cannot be made here.
5804  if(bufferImageGranularity > 1)
5805  {
5806  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
5807  ++nextSuballocItem;
5808  while(nextSuballocItem != m_Suballocations.cend())
5809  {
5810  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5811  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5812  {
5813  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5814  {
5815  return false;
5816  }
5817  }
5818  else
5819  {
5820  // Already on next page.
5821  break;
5822  }
5823  ++nextSuballocItem;
5824  }
5825  }
5826  }
5827 
5828  // All tests passed: Success. pOffset is already filled.
5829  return true;
5830 }
5831 
5832 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
5833 {
5834  VMA_ASSERT(item != m_Suballocations.end());
5835  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5836 
5837  VmaSuballocationList::iterator nextItem = item;
5838  ++nextItem;
5839  VMA_ASSERT(nextItem != m_Suballocations.end());
5840  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
5841 
5842  item->size += nextItem->size;
5843  --m_FreeCount;
5844  m_Suballocations.erase(nextItem);
5845 }
5846 
5847 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
5848 {
5849  // Change this suballocation to be marked as free.
5850  VmaSuballocation& suballoc = *suballocItem;
5851  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5852  suballoc.hAllocation = VK_NULL_HANDLE;
5853 
5854  // Update totals.
5855  ++m_FreeCount;
5856  m_SumFreeSize += suballoc.size;
5857 
5858  // Merge with previous and/or next suballocation if it's also free.
5859  bool mergeWithNext = false;
5860  bool mergeWithPrev = false;
5861 
5862  VmaSuballocationList::iterator nextItem = suballocItem;
5863  ++nextItem;
5864  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
5865  {
5866  mergeWithNext = true;
5867  }
5868 
5869  VmaSuballocationList::iterator prevItem = suballocItem;
5870  if(suballocItem != m_Suballocations.begin())
5871  {
5872  --prevItem;
5873  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5874  {
5875  mergeWithPrev = true;
5876  }
5877  }
5878 
5879  if(mergeWithNext)
5880  {
5881  UnregisterFreeSuballocation(nextItem);
5882  MergeFreeWithNext(suballocItem);
5883  }
5884 
5885  if(mergeWithPrev)
5886  {
5887  UnregisterFreeSuballocation(prevItem);
5888  MergeFreeWithNext(prevItem);
5889  RegisterFreeSuballocation(prevItem);
5890  return prevItem;
5891  }
5892  else
5893  {
5894  RegisterFreeSuballocation(suballocItem);
5895  return suballocItem;
5896  }
5897 }
5898 
5899 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
5900 {
5901  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5902  VMA_ASSERT(item->size > 0);
5903 
5904  // You may want to enable this validation at the beginning or at the end of
5905  // this function, depending on what do you want to check.
5906  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5907 
5908  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5909  {
5910  if(m_FreeSuballocationsBySize.empty())
5911  {
5912  m_FreeSuballocationsBySize.push_back(item);
5913  }
5914  else
5915  {
5916  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
5917  }
5918  }
5919 
5920  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5921 }
5922 
5923 
5924 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
5925 {
5926  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5927  VMA_ASSERT(item->size > 0);
5928 
5929  // You may want to enable this validation at the beginning or at the end of
5930  // this function, depending on what do you want to check.
5931  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5932 
5933  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5934  {
5935  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5936  m_FreeSuballocationsBySize.data(),
5937  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
5938  item,
5939  VmaSuballocationItemSizeLess());
5940  for(size_t index = it - m_FreeSuballocationsBySize.data();
5941  index < m_FreeSuballocationsBySize.size();
5942  ++index)
5943  {
5944  if(m_FreeSuballocationsBySize[index] == item)
5945  {
5946  VmaVectorRemove(m_FreeSuballocationsBySize, index);
5947  return;
5948  }
5949  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
5950  }
5951  VMA_ASSERT(0 && "Not found.");
5952  }
5953 
5954  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5955 }
5956 
5958 // class VmaDeviceMemoryMapping
5959 
5960 VmaDeviceMemoryMapping::VmaDeviceMemoryMapping() :
5961  m_MapCount(0),
5962  m_pMappedData(VMA_NULL)
5963 {
5964 }
5965 
5966 VmaDeviceMemoryMapping::~VmaDeviceMemoryMapping()
5967 {
5968  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5969 }
5970 
5971 VkResult VmaDeviceMemoryMapping::Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData)
5972 {
5973  if(count == 0)
5974  {
5975  return VK_SUCCESS;
5976  }
5977 
5978  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
5979  if(m_MapCount != 0)
5980  {
5981  m_MapCount += count;
5982  VMA_ASSERT(m_pMappedData != VMA_NULL);
5983  if(ppData != VMA_NULL)
5984  {
5985  *ppData = m_pMappedData;
5986  }
5987  return VK_SUCCESS;
5988  }
5989  else
5990  {
5991  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5992  hAllocator->m_hDevice,
5993  hMemory,
5994  0, // offset
5995  VK_WHOLE_SIZE,
5996  0, // flags
5997  &m_pMappedData);
5998  if(result == VK_SUCCESS)
5999  {
6000  if(ppData != VMA_NULL)
6001  {
6002  *ppData = m_pMappedData;
6003  }
6004  m_MapCount = count;
6005  }
6006  return result;
6007  }
6008 }
6009 
6010 void VmaDeviceMemoryMapping::Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count)
6011 {
6012  if(count == 0)
6013  {
6014  return;
6015  }
6016 
6017  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6018  if(m_MapCount >= count)
6019  {
6020  m_MapCount -= count;
6021  if(m_MapCount == 0)
6022  {
6023  m_pMappedData = VMA_NULL;
6024  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, hMemory);
6025  }
6026  }
6027  else
6028  {
6029  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
6030  }
6031 }
6032 
6034 // class VmaDeviceMemoryBlock
6035 
6036 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
6037  m_MemoryTypeIndex(UINT32_MAX),
6038  m_hMemory(VK_NULL_HANDLE),
6039  m_Metadata(hAllocator)
6040 {
6041 }
6042 
6043 void VmaDeviceMemoryBlock::Init(
6044  uint32_t newMemoryTypeIndex,
6045  VkDeviceMemory newMemory,
6046  VkDeviceSize newSize)
6047 {
6048  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6049 
6050  m_MemoryTypeIndex = newMemoryTypeIndex;
6051  m_hMemory = newMemory;
6052 
6053  m_Metadata.Init(newSize);
6054 }
6055 
6056 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
6057 {
6058  // This is the most important assert in the entire library.
6059  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
6060  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
6061 
6062  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
6063  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
6064  m_hMemory = VK_NULL_HANDLE;
6065 }
6066 
6067 bool VmaDeviceMemoryBlock::Validate() const
6068 {
6069  if((m_hMemory == VK_NULL_HANDLE) ||
6070  (m_Metadata.GetSize() == 0))
6071  {
6072  return false;
6073  }
6074 
6075  return m_Metadata.Validate();
6076 }
6077 
6078 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
6079 {
6080  return m_Mapping.Map(hAllocator, m_hMemory, count, ppData);
6081 }
6082 
6083 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
6084 {
6085  m_Mapping.Unmap(hAllocator, m_hMemory, count);
6086 }
6087 
6088 static void InitStatInfo(VmaStatInfo& outInfo)
6089 {
6090  memset(&outInfo, 0, sizeof(outInfo));
6091  outInfo.allocationSizeMin = UINT64_MAX;
6092  outInfo.unusedRangeSizeMin = UINT64_MAX;
6093 }
6094 
6095 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
6096 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
6097 {
6098  inoutInfo.blockCount += srcInfo.blockCount;
6099  inoutInfo.allocationCount += srcInfo.allocationCount;
6100  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
6101  inoutInfo.usedBytes += srcInfo.usedBytes;
6102  inoutInfo.unusedBytes += srcInfo.unusedBytes;
6103  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
6104  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
6105  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
6106  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
6107 }
6108 
6109 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
6110 {
6111  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
6112  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
6113  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
6114  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
6115 }
6116 
6117 VmaPool_T::VmaPool_T(
6118  VmaAllocator hAllocator,
6119  const VmaPoolCreateInfo& createInfo) :
6120  m_BlockVector(
6121  hAllocator,
6122  createInfo.memoryTypeIndex,
6123  createInfo.blockSize,
6124  createInfo.minBlockCount,
6125  createInfo.maxBlockCount,
6126  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
6127  createInfo.frameInUseCount,
6128  true) // isCustomPool
6129 {
6130 }
6131 
6132 VmaPool_T::~VmaPool_T()
6133 {
6134 }
6135 
6136 #if VMA_STATS_STRING_ENABLED
6137 
6138 #endif // #if VMA_STATS_STRING_ENABLED
6139 
6140 VmaBlockVector::VmaBlockVector(
6141  VmaAllocator hAllocator,
6142  uint32_t memoryTypeIndex,
6143  VkDeviceSize preferredBlockSize,
6144  size_t minBlockCount,
6145  size_t maxBlockCount,
6146  VkDeviceSize bufferImageGranularity,
6147  uint32_t frameInUseCount,
6148  bool isCustomPool) :
6149  m_hAllocator(hAllocator),
6150  m_MemoryTypeIndex(memoryTypeIndex),
6151  m_PreferredBlockSize(preferredBlockSize),
6152  m_MinBlockCount(minBlockCount),
6153  m_MaxBlockCount(maxBlockCount),
6154  m_BufferImageGranularity(bufferImageGranularity),
6155  m_FrameInUseCount(frameInUseCount),
6156  m_IsCustomPool(isCustomPool),
6157  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
6158  m_HasEmptyBlock(false),
6159  m_pDefragmentator(VMA_NULL)
6160 {
6161 }
6162 
6163 VmaBlockVector::~VmaBlockVector()
6164 {
6165  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
6166 
6167  for(size_t i = m_Blocks.size(); i--; )
6168  {
6169  m_Blocks[i]->Destroy(m_hAllocator);
6170  vma_delete(m_hAllocator, m_Blocks[i]);
6171  }
6172 }
6173 
6174 VkResult VmaBlockVector::CreateMinBlocks()
6175 {
6176  for(size_t i = 0; i < m_MinBlockCount; ++i)
6177  {
6178  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
6179  if(res != VK_SUCCESS)
6180  {
6181  return res;
6182  }
6183  }
6184  return VK_SUCCESS;
6185 }
6186 
6187 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
6188 {
6189  pStats->size = 0;
6190  pStats->unusedSize = 0;
6191  pStats->allocationCount = 0;
6192  pStats->unusedRangeCount = 0;
6193  pStats->unusedRangeSizeMax = 0;
6194 
6195  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6196 
6197  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6198  {
6199  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6200  VMA_ASSERT(pBlock);
6201  VMA_HEAVY_ASSERT(pBlock->Validate());
6202  pBlock->m_Metadata.AddPoolStats(*pStats);
6203  }
6204 }
6205 
6206 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
6207 
6208 VkResult VmaBlockVector::Allocate(
6209  VmaPool hCurrentPool,
6210  uint32_t currentFrameIndex,
6211  const VkMemoryRequirements& vkMemReq,
6212  const VmaAllocationCreateInfo& createInfo,
6213  VmaSuballocationType suballocType,
6214  VmaAllocation* pAllocation)
6215 {
6216  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
6217  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
6218 
6219  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6220 
6221  // 1. Search existing allocations. Try to allocate without making other allocations lost.
6222  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6223  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6224  {
6225  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6226  VMA_ASSERT(pCurrBlock);
6227  VmaAllocationRequest currRequest = {};
6228  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6229  currentFrameIndex,
6230  m_FrameInUseCount,
6231  m_BufferImageGranularity,
6232  vkMemReq.size,
6233  vkMemReq.alignment,
6234  suballocType,
6235  false, // canMakeOtherLost
6236  &currRequest))
6237  {
6238  // Allocate from pCurrBlock.
6239  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
6240 
6241  if(mapped)
6242  {
6243  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
6244  if(res != VK_SUCCESS)
6245  {
6246  return res;
6247  }
6248  }
6249 
6250  // We no longer have an empty Allocation.
6251  if(pCurrBlock->m_Metadata.IsEmpty())
6252  {
6253  m_HasEmptyBlock = false;
6254  }
6255 
6256  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6257  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
6258  (*pAllocation)->InitBlockAllocation(
6259  hCurrentPool,
6260  pCurrBlock,
6261  currRequest.offset,
6262  vkMemReq.alignment,
6263  vkMemReq.size,
6264  suballocType,
6265  mapped,
6266  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6267  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
6268  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6269  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6270  return VK_SUCCESS;
6271  }
6272  }
6273 
6274  const bool canCreateNewBlock =
6275  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
6276  (m_Blocks.size() < m_MaxBlockCount);
6277 
6278  // 2. Try to create new block.
6279  if(canCreateNewBlock)
6280  {
6281  // Calculate optimal size for new block.
6282  VkDeviceSize newBlockSize = m_PreferredBlockSize;
6283  uint32_t newBlockSizeShift = 0;
6284  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
6285 
6286  // Allocating blocks of other sizes is allowed only in default pools.
6287  // In custom pools block size is fixed.
6288  if(m_IsCustomPool == false)
6289  {
6290  // Allocate 1/8, 1/4, 1/2 as first blocks.
6291  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
6292  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
6293  {
6294  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6295  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= vkMemReq.size * 2)
6296  {
6297  newBlockSize = smallerNewBlockSize;
6298  ++newBlockSizeShift;
6299  }
6300  else
6301  {
6302  break;
6303  }
6304  }
6305  }
6306 
6307  size_t newBlockIndex = 0;
6308  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
6309  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
6310  if(m_IsCustomPool == false)
6311  {
6312  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
6313  {
6314  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6315  if(smallerNewBlockSize >= vkMemReq.size)
6316  {
6317  newBlockSize = smallerNewBlockSize;
6318  ++newBlockSizeShift;
6319  res = CreateBlock(newBlockSize, &newBlockIndex);
6320  }
6321  else
6322  {
6323  break;
6324  }
6325  }
6326  }
6327 
6328  if(res == VK_SUCCESS)
6329  {
6330  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
6331  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
6332 
6333  if(mapped)
6334  {
6335  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
6336  if(res != VK_SUCCESS)
6337  {
6338  return res;
6339  }
6340  }
6341 
6342  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
6343  VmaAllocationRequest allocRequest;
6344  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
6345  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6346  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
6347  (*pAllocation)->InitBlockAllocation(
6348  hCurrentPool,
6349  pBlock,
6350  allocRequest.offset,
6351  vkMemReq.alignment,
6352  vkMemReq.size,
6353  suballocType,
6354  mapped,
6355  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6356  VMA_HEAVY_ASSERT(pBlock->Validate());
6357  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
6358  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6359  return VK_SUCCESS;
6360  }
6361  }
6362 
6363  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
6364 
6365  // 3. Try to allocate from existing blocks with making other allocations lost.
6366  if(canMakeOtherLost)
6367  {
6368  uint32_t tryIndex = 0;
6369  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
6370  {
6371  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
6372  VmaAllocationRequest bestRequest = {};
6373  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
6374 
6375  // 1. Search existing allocations.
6376  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6377  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6378  {
6379  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6380  VMA_ASSERT(pCurrBlock);
6381  VmaAllocationRequest currRequest = {};
6382  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6383  currentFrameIndex,
6384  m_FrameInUseCount,
6385  m_BufferImageGranularity,
6386  vkMemReq.size,
6387  vkMemReq.alignment,
6388  suballocType,
6389  canMakeOtherLost,
6390  &currRequest))
6391  {
6392  const VkDeviceSize currRequestCost = currRequest.CalcCost();
6393  if(pBestRequestBlock == VMA_NULL ||
6394  currRequestCost < bestRequestCost)
6395  {
6396  pBestRequestBlock = pCurrBlock;
6397  bestRequest = currRequest;
6398  bestRequestCost = currRequestCost;
6399 
6400  if(bestRequestCost == 0)
6401  {
6402  break;
6403  }
6404  }
6405  }
6406  }
6407 
6408  if(pBestRequestBlock != VMA_NULL)
6409  {
6410  if(mapped)
6411  {
6412  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
6413  if(res != VK_SUCCESS)
6414  {
6415  return res;
6416  }
6417  }
6418 
6419  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
6420  currentFrameIndex,
6421  m_FrameInUseCount,
6422  &bestRequest))
6423  {
6424  // We no longer have an empty Allocation.
6425  if(pBestRequestBlock->m_Metadata.IsEmpty())
6426  {
6427  m_HasEmptyBlock = false;
6428  }
6429  // Allocate from this pBlock.
6430  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6431  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
6432  (*pAllocation)->InitBlockAllocation(
6433  hCurrentPool,
6434  pBestRequestBlock,
6435  bestRequest.offset,
6436  vkMemReq.alignment,
6437  vkMemReq.size,
6438  suballocType,
6439  mapped,
6440  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6441  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
6442  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6443  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6444  return VK_SUCCESS;
6445  }
6446  // else: Some allocations must have been touched while we are here. Next try.
6447  }
6448  else
6449  {
6450  // Could not find place in any of the blocks - break outer loop.
6451  break;
6452  }
6453  }
6454  /* Maximum number of tries exceeded - a very unlike event when many other
6455  threads are simultaneously touching allocations making it impossible to make
6456  lost at the same time as we try to allocate. */
6457  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
6458  {
6459  return VK_ERROR_TOO_MANY_OBJECTS;
6460  }
6461  }
6462 
6463  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6464 }
6465 
6466 void VmaBlockVector::Free(
6467  VmaAllocation hAllocation)
6468 {
6469  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
6470 
6471  // Scope for lock.
6472  {
6473  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6474 
6475  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
6476 
6477  if(hAllocation->IsPersistentMap())
6478  {
6479  pBlock->m_Mapping.Unmap(m_hAllocator, pBlock->m_hMemory, 1);
6480  }
6481 
6482  pBlock->m_Metadata.Free(hAllocation);
6483  VMA_HEAVY_ASSERT(pBlock->Validate());
6484 
6485  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
6486 
6487  // pBlock became empty after this deallocation.
6488  if(pBlock->m_Metadata.IsEmpty())
6489  {
6490  // Already has empty Allocation. We don't want to have two, so delete this one.
6491  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
6492  {
6493  pBlockToDelete = pBlock;
6494  Remove(pBlock);
6495  }
6496  // We now have first empty Allocation.
6497  else
6498  {
6499  m_HasEmptyBlock = true;
6500  }
6501  }
6502  // pBlock didn't become empty, but we have another empty block - find and free that one.
6503  // (This is optional, heuristics.)
6504  else if(m_HasEmptyBlock)
6505  {
6506  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6507  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6508  {
6509  pBlockToDelete = pLastBlock;
6510  m_Blocks.pop_back();
6511  m_HasEmptyBlock = false;
6512  }
6513  }
6514 
6515  IncrementallySortBlocks();
6516  }
6517 
6518  // Destruction of a free Allocation. Deferred until this point, outside of mutex
6519  // lock, for performance reason.
6520  if(pBlockToDelete != VMA_NULL)
6521  {
6522  VMA_DEBUG_LOG(" Deleted empty allocation");
6523  pBlockToDelete->Destroy(m_hAllocator);
6524  vma_delete(m_hAllocator, pBlockToDelete);
6525  }
6526 }
6527 
6528 size_t VmaBlockVector::CalcMaxBlockSize() const
6529 {
6530  size_t result = 0;
6531  for(size_t i = m_Blocks.size(); i--; )
6532  {
6533  result = VMA_MAX((uint64_t)result, (uint64_t)m_Blocks[i]->m_Metadata.GetSize());
6534  if(result >= m_PreferredBlockSize)
6535  {
6536  break;
6537  }
6538  }
6539  return result;
6540 }
6541 
6542 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
6543 {
6544  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6545  {
6546  if(m_Blocks[blockIndex] == pBlock)
6547  {
6548  VmaVectorRemove(m_Blocks, blockIndex);
6549  return;
6550  }
6551  }
6552  VMA_ASSERT(0);
6553 }
6554 
6555 void VmaBlockVector::IncrementallySortBlocks()
6556 {
6557  // Bubble sort only until first swap.
6558  for(size_t i = 1; i < m_Blocks.size(); ++i)
6559  {
6560  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
6561  {
6562  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
6563  return;
6564  }
6565  }
6566 }
6567 
6568 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
6569 {
6570  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6571  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
6572  allocInfo.allocationSize = blockSize;
6573  VkDeviceMemory mem = VK_NULL_HANDLE;
6574  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
6575  if(res < 0)
6576  {
6577  return res;
6578  }
6579 
6580  // New VkDeviceMemory successfully created.
6581 
6582  // Create new Allocation for it.
6583  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
6584  pBlock->Init(
6585  m_MemoryTypeIndex,
6586  mem,
6587  allocInfo.allocationSize);
6588 
6589  m_Blocks.push_back(pBlock);
6590  if(pNewBlockIndex != VMA_NULL)
6591  {
6592  *pNewBlockIndex = m_Blocks.size() - 1;
6593  }
6594 
6595  return VK_SUCCESS;
6596 }
6597 
6598 #if VMA_STATS_STRING_ENABLED
6599 
6600 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
6601 {
6602  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6603 
6604  json.BeginObject();
6605 
6606  if(m_IsCustomPool)
6607  {
6608  json.WriteString("MemoryTypeIndex");
6609  json.WriteNumber(m_MemoryTypeIndex);
6610 
6611  json.WriteString("BlockSize");
6612  json.WriteNumber(m_PreferredBlockSize);
6613 
6614  json.WriteString("BlockCount");
6615  json.BeginObject(true);
6616  if(m_MinBlockCount > 0)
6617  {
6618  json.WriteString("Min");
6619  json.WriteNumber((uint64_t)m_MinBlockCount);
6620  }
6621  if(m_MaxBlockCount < SIZE_MAX)
6622  {
6623  json.WriteString("Max");
6624  json.WriteNumber((uint64_t)m_MaxBlockCount);
6625  }
6626  json.WriteString("Cur");
6627  json.WriteNumber((uint64_t)m_Blocks.size());
6628  json.EndObject();
6629 
6630  if(m_FrameInUseCount > 0)
6631  {
6632  json.WriteString("FrameInUseCount");
6633  json.WriteNumber(m_FrameInUseCount);
6634  }
6635  }
6636  else
6637  {
6638  json.WriteString("PreferredBlockSize");
6639  json.WriteNumber(m_PreferredBlockSize);
6640  }
6641 
6642  json.WriteString("Blocks");
6643  json.BeginArray();
6644  for(size_t i = 0; i < m_Blocks.size(); ++i)
6645  {
6646  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
6647  }
6648  json.EndArray();
6649 
6650  json.EndObject();
6651 }
6652 
6653 #endif // #if VMA_STATS_STRING_ENABLED
6654 
6655 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
6656  VmaAllocator hAllocator,
6657  uint32_t currentFrameIndex)
6658 {
6659  if(m_pDefragmentator == VMA_NULL)
6660  {
6661  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
6662  hAllocator,
6663  this,
6664  currentFrameIndex);
6665  }
6666 
6667  return m_pDefragmentator;
6668 }
6669 
6670 VkResult VmaBlockVector::Defragment(
6671  VmaDefragmentationStats* pDefragmentationStats,
6672  VkDeviceSize& maxBytesToMove,
6673  uint32_t& maxAllocationsToMove)
6674 {
6675  if(m_pDefragmentator == VMA_NULL)
6676  {
6677  return VK_SUCCESS;
6678  }
6679 
6680  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6681 
6682  // Defragment.
6683  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
6684 
6685  // Accumulate statistics.
6686  if(pDefragmentationStats != VMA_NULL)
6687  {
6688  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
6689  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
6690  pDefragmentationStats->bytesMoved += bytesMoved;
6691  pDefragmentationStats->allocationsMoved += allocationsMoved;
6692  VMA_ASSERT(bytesMoved <= maxBytesToMove);
6693  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
6694  maxBytesToMove -= bytesMoved;
6695  maxAllocationsToMove -= allocationsMoved;
6696  }
6697 
6698  // Free empty blocks.
6699  m_HasEmptyBlock = false;
6700  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
6701  {
6702  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
6703  if(pBlock->m_Metadata.IsEmpty())
6704  {
6705  if(m_Blocks.size() > m_MinBlockCount)
6706  {
6707  if(pDefragmentationStats != VMA_NULL)
6708  {
6709  ++pDefragmentationStats->deviceMemoryBlocksFreed;
6710  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
6711  }
6712 
6713  VmaVectorRemove(m_Blocks, blockIndex);
6714  pBlock->Destroy(m_hAllocator);
6715  vma_delete(m_hAllocator, pBlock);
6716  }
6717  else
6718  {
6719  m_HasEmptyBlock = true;
6720  }
6721  }
6722  }
6723 
6724  return result;
6725 }
6726 
6727 void VmaBlockVector::DestroyDefragmentator()
6728 {
6729  if(m_pDefragmentator != VMA_NULL)
6730  {
6731  vma_delete(m_hAllocator, m_pDefragmentator);
6732  m_pDefragmentator = VMA_NULL;
6733  }
6734 }
6735 
6736 void VmaBlockVector::MakePoolAllocationsLost(
6737  uint32_t currentFrameIndex,
6738  size_t* pLostAllocationCount)
6739 {
6740  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6741  size_t lostAllocationCount = 0;
6742  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6743  {
6744  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6745  VMA_ASSERT(pBlock);
6746  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
6747  }
6748  if(pLostAllocationCount != VMA_NULL)
6749  {
6750  *pLostAllocationCount = lostAllocationCount;
6751  }
6752 }
6753 
6754 void VmaBlockVector::AddStats(VmaStats* pStats)
6755 {
6756  const uint32_t memTypeIndex = m_MemoryTypeIndex;
6757  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
6758 
6759  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6760 
6761  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6762  {
6763  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6764  VMA_ASSERT(pBlock);
6765  VMA_HEAVY_ASSERT(pBlock->Validate());
6766  VmaStatInfo allocationStatInfo;
6767  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
6768  VmaAddStatInfo(pStats->total, allocationStatInfo);
6769  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6770  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6771  }
6772 }
6773 
6775 // VmaDefragmentator members definition
6776 
6777 VmaDefragmentator::VmaDefragmentator(
6778  VmaAllocator hAllocator,
6779  VmaBlockVector* pBlockVector,
6780  uint32_t currentFrameIndex) :
6781  m_hAllocator(hAllocator),
6782  m_pBlockVector(pBlockVector),
6783  m_CurrentFrameIndex(currentFrameIndex),
6784  m_BytesMoved(0),
6785  m_AllocationsMoved(0),
6786  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
6787  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
6788 {
6789 }
6790 
6791 VmaDefragmentator::~VmaDefragmentator()
6792 {
6793  for(size_t i = m_Blocks.size(); i--; )
6794  {
6795  vma_delete(m_hAllocator, m_Blocks[i]);
6796  }
6797 }
6798 
6799 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
6800 {
6801  AllocationInfo allocInfo;
6802  allocInfo.m_hAllocation = hAlloc;
6803  allocInfo.m_pChanged = pChanged;
6804  m_Allocations.push_back(allocInfo);
6805 }
6806 
6807 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
6808 {
6809  // It has already been mapped for defragmentation.
6810  if(m_pMappedDataForDefragmentation)
6811  {
6812  *ppMappedData = m_pMappedDataForDefragmentation;
6813  return VK_SUCCESS;
6814  }
6815 
6816  // It is originally mapped.
6817  if(m_pBlock->m_Mapping.GetMappedData())
6818  {
6819  *ppMappedData = m_pBlock->m_Mapping.GetMappedData();
6820  return VK_SUCCESS;
6821  }
6822 
6823  // Map on first usage.
6824  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
6825  *ppMappedData = m_pMappedDataForDefragmentation;
6826  return res;
6827 }
6828 
6829 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
6830 {
6831  if(m_pMappedDataForDefragmentation != VMA_NULL)
6832  {
6833  m_pBlock->Unmap(hAllocator, 1);
6834  }
6835 }
6836 
6837 VkResult VmaDefragmentator::DefragmentRound(
6838  VkDeviceSize maxBytesToMove,
6839  uint32_t maxAllocationsToMove)
6840 {
6841  if(m_Blocks.empty())
6842  {
6843  return VK_SUCCESS;
6844  }
6845 
6846  size_t srcBlockIndex = m_Blocks.size() - 1;
6847  size_t srcAllocIndex = SIZE_MAX;
6848  for(;;)
6849  {
6850  // 1. Find next allocation to move.
6851  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
6852  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
6853  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
6854  {
6855  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
6856  {
6857  // Finished: no more allocations to process.
6858  if(srcBlockIndex == 0)
6859  {
6860  return VK_SUCCESS;
6861  }
6862  else
6863  {
6864  --srcBlockIndex;
6865  srcAllocIndex = SIZE_MAX;
6866  }
6867  }
6868  else
6869  {
6870  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
6871  }
6872  }
6873 
6874  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
6875  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
6876 
6877  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
6878  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
6879  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
6880  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
6881 
6882  // 2. Try to find new place for this allocation in preceding or current block.
6883  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
6884  {
6885  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
6886  VmaAllocationRequest dstAllocRequest;
6887  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
6888  m_CurrentFrameIndex,
6889  m_pBlockVector->GetFrameInUseCount(),
6890  m_pBlockVector->GetBufferImageGranularity(),
6891  size,
6892  alignment,
6893  suballocType,
6894  false, // canMakeOtherLost
6895  &dstAllocRequest) &&
6896  MoveMakesSense(
6897  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
6898  {
6899  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
6900 
6901  // Reached limit on number of allocations or bytes to move.
6902  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
6903  (m_BytesMoved + size > maxBytesToMove))
6904  {
6905  return VK_INCOMPLETE;
6906  }
6907 
6908  void* pDstMappedData = VMA_NULL;
6909  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
6910  if(res != VK_SUCCESS)
6911  {
6912  return res;
6913  }
6914 
6915  void* pSrcMappedData = VMA_NULL;
6916  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
6917  if(res != VK_SUCCESS)
6918  {
6919  return res;
6920  }
6921 
6922  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
6923  memcpy(
6924  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
6925  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
6926  static_cast<size_t>(size));
6927 
6928  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
6929  pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);
6930 
6931  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
6932 
6933  if(allocInfo.m_pChanged != VMA_NULL)
6934  {
6935  *allocInfo.m_pChanged = VK_TRUE;
6936  }
6937 
6938  ++m_AllocationsMoved;
6939  m_BytesMoved += size;
6940 
6941  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
6942 
6943  break;
6944  }
6945  }
6946 
6947  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
6948 
6949  if(srcAllocIndex > 0)
6950  {
6951  --srcAllocIndex;
6952  }
6953  else
6954  {
6955  if(srcBlockIndex > 0)
6956  {
6957  --srcBlockIndex;
6958  srcAllocIndex = SIZE_MAX;
6959  }
6960  else
6961  {
6962  return VK_SUCCESS;
6963  }
6964  }
6965  }
6966 }
6967 
6968 VkResult VmaDefragmentator::Defragment(
6969  VkDeviceSize maxBytesToMove,
6970  uint32_t maxAllocationsToMove)
6971 {
6972  if(m_Allocations.empty())
6973  {
6974  return VK_SUCCESS;
6975  }
6976 
6977  // Create block info for each block.
6978  const size_t blockCount = m_pBlockVector->m_Blocks.size();
6979  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6980  {
6981  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
6982  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
6983  m_Blocks.push_back(pBlockInfo);
6984  }
6985 
6986  // Sort them by m_pBlock pointer value.
6987  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
6988 
6989  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
6990  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
6991  {
6992  AllocationInfo& allocInfo = m_Allocations[blockIndex];
6993  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
6994  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6995  {
6996  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
6997  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
6998  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
6999  {
7000  (*it)->m_Allocations.push_back(allocInfo);
7001  }
7002  else
7003  {
7004  VMA_ASSERT(0);
7005  }
7006  }
7007  }
7008  m_Allocations.clear();
7009 
7010  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7011  {
7012  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
7013  pBlockInfo->CalcHasNonMovableAllocations();
7014  pBlockInfo->SortAllocationsBySizeDescecnding();
7015  }
7016 
7017  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
7018  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
7019 
7020  // Execute defragmentation rounds (the main part).
7021  VkResult result = VK_SUCCESS;
7022  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
7023  {
7024  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
7025  }
7026 
7027  // Unmap blocks that were mapped for defragmentation.
7028  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7029  {
7030  m_Blocks[blockIndex]->Unmap(m_hAllocator);
7031  }
7032 
7033  return result;
7034 }
7035 
7036 bool VmaDefragmentator::MoveMakesSense(
7037  size_t dstBlockIndex, VkDeviceSize dstOffset,
7038  size_t srcBlockIndex, VkDeviceSize srcOffset)
7039 {
7040  if(dstBlockIndex < srcBlockIndex)
7041  {
7042  return true;
7043  }
7044  if(dstBlockIndex > srcBlockIndex)
7045  {
7046  return false;
7047  }
7048  if(dstOffset < srcOffset)
7049  {
7050  return true;
7051  }
7052  return false;
7053 }
7054 
7056 // VmaAllocator_T
7057 
7058 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
7059  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
7060  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
7061  m_hDevice(pCreateInfo->device),
7062  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
7063  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
7064  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
7065  m_PreferredLargeHeapBlockSize(0),
7066  m_PhysicalDevice(pCreateInfo->physicalDevice),
7067  m_CurrentFrameIndex(0),
7068  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
7069 {
7070  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
7071 
7072  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
7073  memset(&m_MemProps, 0, sizeof(m_MemProps));
7074  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
7075 
7076  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
7077  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
7078 
7079  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7080  {
7081  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
7082  }
7083 
7084  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
7085  {
7086  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
7087  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
7088  }
7089 
7090  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
7091 
7092  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
7093  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
7094 
7095  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
7096  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
7097 
7098  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
7099  {
7100  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
7101  {
7102  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
7103  if(limit != VK_WHOLE_SIZE)
7104  {
7105  m_HeapSizeLimit[heapIndex] = limit;
7106  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
7107  {
7108  m_MemProps.memoryHeaps[heapIndex].size = limit;
7109  }
7110  }
7111  }
7112  }
7113 
7114  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7115  {
7116  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
7117 
7118  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
7119  this,
7120  memTypeIndex,
7121  preferredBlockSize,
7122  0,
7123  SIZE_MAX,
7124  GetBufferImageGranularity(),
7125  pCreateInfo->frameInUseCount,
7126  false); // isCustomPool
7127  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
7128  // becase minBlockCount is 0.
7129  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
7130  }
7131 }
7132 
7133 VmaAllocator_T::~VmaAllocator_T()
7134 {
7135  VMA_ASSERT(m_Pools.empty());
7136 
7137  for(size_t i = GetMemoryTypeCount(); i--; )
7138  {
7139  vma_delete(this, m_pDedicatedAllocations[i]);
7140  vma_delete(this, m_pBlockVectors[i]);
7141  }
7142 }
7143 
7144 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
7145 {
7146 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7147  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
7148  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
7149  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
7150  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
7151  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
7152  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
7153  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
7154  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
7155  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
7156  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
7157  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
7158  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
7159  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
7160  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
7161  if(m_UseKhrDedicatedAllocation)
7162  {
7163  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
7164  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
7165  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
7166  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
7167  }
7168 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7169 
7170 #define VMA_COPY_IF_NOT_NULL(funcName) \
7171  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
7172 
7173  if(pVulkanFunctions != VMA_NULL)
7174  {
7175  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
7176  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
7177  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
7178  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
7179  VMA_COPY_IF_NOT_NULL(vkMapMemory);
7180  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
7181  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
7182  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
7183  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
7184  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
7185  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
7186  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
7187  VMA_COPY_IF_NOT_NULL(vkCreateImage);
7188  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
7189  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
7190  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
7191  }
7192 
7193 #undef VMA_COPY_IF_NOT_NULL
7194 
7195  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
7196  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
7197  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
7198  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
7199  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
7200  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
7201  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
7202  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
7203  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
7204  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
7205  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
7206  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
7207  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
7208  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
7209  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
7210  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
7211  if(m_UseKhrDedicatedAllocation)
7212  {
7213  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
7214  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
7215  }
7216 }
7217 
7218 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
7219 {
7220  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7221  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
7222  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
7223  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
7224 }
7225 
7226 VkResult VmaAllocator_T::AllocateMemoryOfType(
7227  const VkMemoryRequirements& vkMemReq,
7228  bool dedicatedAllocation,
7229  VkBuffer dedicatedBuffer,
7230  VkImage dedicatedImage,
7231  const VmaAllocationCreateInfo& createInfo,
7232  uint32_t memTypeIndex,
7233  VmaSuballocationType suballocType,
7234  VmaAllocation* pAllocation)
7235 {
7236  VMA_ASSERT(pAllocation != VMA_NULL);
7237  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
7238 
7239  VmaAllocationCreateInfo finalCreateInfo = createInfo;
7240 
7241  // If memory type is not HOST_VISIBLE, disable MAPPED.
7242  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7243  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
7244  {
7245  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
7246  }
7247 
7248  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
7249  VMA_ASSERT(blockVector);
7250 
7251  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
7252  bool preferDedicatedMemory =
7253  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
7254  dedicatedAllocation ||
7255  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
7256  vkMemReq.size > preferredBlockSize / 2;
7257 
7258  if(preferDedicatedMemory &&
7259  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
7260  finalCreateInfo.pool == VK_NULL_HANDLE)
7261  {
7263  }
7264 
7265  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
7266  {
7267  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7268  {
7269  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7270  }
7271  else
7272  {
7273  return AllocateDedicatedMemory(
7274  vkMemReq.size,
7275  suballocType,
7276  memTypeIndex,
7277  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7278  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7279  finalCreateInfo.pUserData,
7280  dedicatedBuffer,
7281  dedicatedImage,
7282  pAllocation);
7283  }
7284  }
7285  else
7286  {
7287  VkResult res = blockVector->Allocate(
7288  VK_NULL_HANDLE, // hCurrentPool
7289  m_CurrentFrameIndex.load(),
7290  vkMemReq,
7291  finalCreateInfo,
7292  suballocType,
7293  pAllocation);
7294  if(res == VK_SUCCESS)
7295  {
7296  return res;
7297  }
7298 
7299  // 5. Try dedicated memory.
7300  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7301  {
7302  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7303  }
7304  else
7305  {
7306  res = AllocateDedicatedMemory(
7307  vkMemReq.size,
7308  suballocType,
7309  memTypeIndex,
7310  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7311  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7312  finalCreateInfo.pUserData,
7313  dedicatedBuffer,
7314  dedicatedImage,
7315  pAllocation);
7316  if(res == VK_SUCCESS)
7317  {
7318  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
7319  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
7320  return VK_SUCCESS;
7321  }
7322  else
7323  {
7324  // Everything failed: Return error code.
7325  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7326  return res;
7327  }
7328  }
7329  }
7330 }
7331 
7332 VkResult VmaAllocator_T::AllocateDedicatedMemory(
7333  VkDeviceSize size,
7334  VmaSuballocationType suballocType,
7335  uint32_t memTypeIndex,
7336  bool map,
7337  bool isUserDataString,
7338  void* pUserData,
7339  VkBuffer dedicatedBuffer,
7340  VkImage dedicatedImage,
7341  VmaAllocation* pAllocation)
7342 {
7343  VMA_ASSERT(pAllocation);
7344 
7345  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
7346  allocInfo.memoryTypeIndex = memTypeIndex;
7347  allocInfo.allocationSize = size;
7348 
7349  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
7350  if(m_UseKhrDedicatedAllocation)
7351  {
7352  if(dedicatedBuffer != VK_NULL_HANDLE)
7353  {
7354  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
7355  dedicatedAllocInfo.buffer = dedicatedBuffer;
7356  allocInfo.pNext = &dedicatedAllocInfo;
7357  }
7358  else if(dedicatedImage != VK_NULL_HANDLE)
7359  {
7360  dedicatedAllocInfo.image = dedicatedImage;
7361  allocInfo.pNext = &dedicatedAllocInfo;
7362  }
7363  }
7364 
7365  // Allocate VkDeviceMemory.
7366  VkDeviceMemory hMemory = VK_NULL_HANDLE;
7367  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
7368  if(res < 0)
7369  {
7370  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7371  return res;
7372  }
7373 
7374  void* pMappedData = VMA_NULL;
7375  if(map)
7376  {
7377  res = (*m_VulkanFunctions.vkMapMemory)(
7378  m_hDevice,
7379  hMemory,
7380  0,
7381  VK_WHOLE_SIZE,
7382  0,
7383  &pMappedData);
7384  if(res < 0)
7385  {
7386  VMA_DEBUG_LOG(" vkMapMemory FAILED");
7387  FreeVulkanMemory(memTypeIndex, size, hMemory);
7388  return res;
7389  }
7390  }
7391 
7392  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
7393  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
7394  (*pAllocation)->SetUserData(this, pUserData);
7395 
7396  // Register it in m_pDedicatedAllocations.
7397  {
7398  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7399  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7400  VMA_ASSERT(pDedicatedAllocations);
7401  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
7402  }
7403 
7404  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
7405 
7406  return VK_SUCCESS;
7407 }
7408 
7409 void VmaAllocator_T::GetBufferMemoryRequirements(
7410  VkBuffer hBuffer,
7411  VkMemoryRequirements& memReq,
7412  bool& requiresDedicatedAllocation,
7413  bool& prefersDedicatedAllocation) const
7414 {
7415  if(m_UseKhrDedicatedAllocation)
7416  {
7417  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
7418  memReqInfo.buffer = hBuffer;
7419 
7420  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7421 
7422  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7423  memReq2.pNext = &memDedicatedReq;
7424 
7425  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7426 
7427  memReq = memReq2.memoryRequirements;
7428  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7429  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7430  }
7431  else
7432  {
7433  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
7434  requiresDedicatedAllocation = false;
7435  prefersDedicatedAllocation = false;
7436  }
7437 }
7438 
7439 void VmaAllocator_T::GetImageMemoryRequirements(
7440  VkImage hImage,
7441  VkMemoryRequirements& memReq,
7442  bool& requiresDedicatedAllocation,
7443  bool& prefersDedicatedAllocation) const
7444 {
7445  if(m_UseKhrDedicatedAllocation)
7446  {
7447  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
7448  memReqInfo.image = hImage;
7449 
7450  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7451 
7452  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7453  memReq2.pNext = &memDedicatedReq;
7454 
7455  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7456 
7457  memReq = memReq2.memoryRequirements;
7458  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7459  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7460  }
7461  else
7462  {
7463  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
7464  requiresDedicatedAllocation = false;
7465  prefersDedicatedAllocation = false;
7466  }
7467 }
7468 
7469 VkResult VmaAllocator_T::AllocateMemory(
7470  const VkMemoryRequirements& vkMemReq,
7471  bool requiresDedicatedAllocation,
7472  bool prefersDedicatedAllocation,
7473  VkBuffer dedicatedBuffer,
7474  VkImage dedicatedImage,
7475  const VmaAllocationCreateInfo& createInfo,
7476  VmaSuballocationType suballocType,
7477  VmaAllocation* pAllocation)
7478 {
7479  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
7480  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7481  {
7482  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
7483  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7484  }
7485  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7487  {
7488  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
7489  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7490  }
7491  if(requiresDedicatedAllocation)
7492  {
7493  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7494  {
7495  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
7496  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7497  }
7498  if(createInfo.pool != VK_NULL_HANDLE)
7499  {
7500  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
7501  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7502  }
7503  }
7504  if((createInfo.pool != VK_NULL_HANDLE) &&
7505  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
7506  {
7507  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
7508  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7509  }
7510 
7511  if(createInfo.pool != VK_NULL_HANDLE)
7512  {
7513  return createInfo.pool->m_BlockVector.Allocate(
7514  createInfo.pool,
7515  m_CurrentFrameIndex.load(),
7516  vkMemReq,
7517  createInfo,
7518  suballocType,
7519  pAllocation);
7520  }
7521  else
7522  {
7523  // Bit mask of memory Vulkan types acceptable for this allocation.
7524  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
7525  uint32_t memTypeIndex = UINT32_MAX;
7526  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7527  if(res == VK_SUCCESS)
7528  {
7529  res = AllocateMemoryOfType(
7530  vkMemReq,
7531  requiresDedicatedAllocation || prefersDedicatedAllocation,
7532  dedicatedBuffer,
7533  dedicatedImage,
7534  createInfo,
7535  memTypeIndex,
7536  suballocType,
7537  pAllocation);
7538  // Succeeded on first try.
7539  if(res == VK_SUCCESS)
7540  {
7541  return res;
7542  }
7543  // Allocation from this memory type failed. Try other compatible memory types.
7544  else
7545  {
7546  for(;;)
7547  {
7548  // Remove old memTypeIndex from list of possibilities.
7549  memoryTypeBits &= ~(1u << memTypeIndex);
7550  // Find alternative memTypeIndex.
7551  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7552  if(res == VK_SUCCESS)
7553  {
7554  res = AllocateMemoryOfType(
7555  vkMemReq,
7556  requiresDedicatedAllocation || prefersDedicatedAllocation,
7557  dedicatedBuffer,
7558  dedicatedImage,
7559  createInfo,
7560  memTypeIndex,
7561  suballocType,
7562  pAllocation);
7563  // Allocation from this alternative memory type succeeded.
7564  if(res == VK_SUCCESS)
7565  {
7566  return res;
7567  }
7568  // else: Allocation from this memory type failed. Try next one - next loop iteration.
7569  }
7570  // No other matching memory type index could be found.
7571  else
7572  {
7573  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
7574  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7575  }
7576  }
7577  }
7578  }
7579  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
7580  else
7581  return res;
7582  }
7583 }
7584 
7585 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
7586 {
7587  VMA_ASSERT(allocation);
7588 
7589  if(allocation->CanBecomeLost() == false ||
7590  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7591  {
7592  switch(allocation->GetType())
7593  {
7594  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7595  {
7596  VmaBlockVector* pBlockVector = VMA_NULL;
7597  VmaPool hPool = allocation->GetPool();
7598  if(hPool != VK_NULL_HANDLE)
7599  {
7600  pBlockVector = &hPool->m_BlockVector;
7601  }
7602  else
7603  {
7604  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7605  pBlockVector = m_pBlockVectors[memTypeIndex];
7606  }
7607  pBlockVector->Free(allocation);
7608  }
7609  break;
7610  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7611  FreeDedicatedMemory(allocation);
7612  break;
7613  default:
7614  VMA_ASSERT(0);
7615  }
7616  }
7617 
7618  allocation->SetUserData(this, VMA_NULL);
7619  vma_delete(this, allocation);
7620 }
7621 
7622 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
7623 {
7624  // Initialize.
7625  InitStatInfo(pStats->total);
7626  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
7627  InitStatInfo(pStats->memoryType[i]);
7628  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7629  InitStatInfo(pStats->memoryHeap[i]);
7630 
7631  // Process default pools.
7632  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7633  {
7634  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
7635  VMA_ASSERT(pBlockVector);
7636  pBlockVector->AddStats(pStats);
7637  }
7638 
7639  // Process custom pools.
7640  {
7641  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7642  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
7643  {
7644  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
7645  }
7646  }
7647 
7648  // Process dedicated allocations.
7649  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7650  {
7651  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7652  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7653  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7654  VMA_ASSERT(pDedicatedAllocVector);
7655  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
7656  {
7657  VmaStatInfo allocationStatInfo;
7658  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
7659  VmaAddStatInfo(pStats->total, allocationStatInfo);
7660  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7661  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7662  }
7663  }
7664 
7665  // Postprocess.
7666  VmaPostprocessCalcStatInfo(pStats->total);
7667  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
7668  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
7669  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
7670  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
7671 }
7672 
7673 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
7674 
7675 VkResult VmaAllocator_T::Defragment(
7676  VmaAllocation* pAllocations,
7677  size_t allocationCount,
7678  VkBool32* pAllocationsChanged,
7679  const VmaDefragmentationInfo* pDefragmentationInfo,
7680  VmaDefragmentationStats* pDefragmentationStats)
7681 {
7682  if(pAllocationsChanged != VMA_NULL)
7683  {
7684  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
7685  }
7686  if(pDefragmentationStats != VMA_NULL)
7687  {
7688  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
7689  }
7690 
7691  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
7692 
7693  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
7694 
7695  const size_t poolCount = m_Pools.size();
7696 
7697  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
7698  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
7699  {
7700  VmaAllocation hAlloc = pAllocations[allocIndex];
7701  VMA_ASSERT(hAlloc);
7702  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
7703  // DedicatedAlloc cannot be defragmented.
7704  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
7705  // Only HOST_VISIBLE memory types can be defragmented.
7706  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
7707  // Lost allocation cannot be defragmented.
7708  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
7709  {
7710  VmaBlockVector* pAllocBlockVector = VMA_NULL;
7711 
7712  const VmaPool hAllocPool = hAlloc->GetPool();
7713  // This allocation belongs to custom pool.
7714  if(hAllocPool != VK_NULL_HANDLE)
7715  {
7716  pAllocBlockVector = &hAllocPool->GetBlockVector();
7717  }
7718  // This allocation belongs to general pool.
7719  else
7720  {
7721  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
7722  }
7723 
7724  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
7725 
7726  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
7727  &pAllocationsChanged[allocIndex] : VMA_NULL;
7728  pDefragmentator->AddAllocation(hAlloc, pChanged);
7729  }
7730  }
7731 
7732  VkResult result = VK_SUCCESS;
7733 
7734  // ======== Main processing.
7735 
7736  VkDeviceSize maxBytesToMove = SIZE_MAX;
7737  uint32_t maxAllocationsToMove = UINT32_MAX;
7738  if(pDefragmentationInfo != VMA_NULL)
7739  {
7740  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
7741  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
7742  }
7743 
7744  // Process standard memory.
7745  for(uint32_t memTypeIndex = 0;
7746  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
7747  ++memTypeIndex)
7748  {
7749  // Only HOST_VISIBLE memory types can be defragmented.
7750  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7751  {
7752  result = m_pBlockVectors[memTypeIndex]->Defragment(
7753  pDefragmentationStats,
7754  maxBytesToMove,
7755  maxAllocationsToMove);
7756  }
7757  }
7758 
7759  // Process custom pools.
7760  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
7761  {
7762  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
7763  pDefragmentationStats,
7764  maxBytesToMove,
7765  maxAllocationsToMove);
7766  }
7767 
7768  // ======== Destroy defragmentators.
7769 
7770  // Process custom pools.
7771  for(size_t poolIndex = poolCount; poolIndex--; )
7772  {
7773  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
7774  }
7775 
7776  // Process standard memory.
7777  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
7778  {
7779  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7780  {
7781  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
7782  }
7783  }
7784 
7785  return result;
7786 }
7787 
7788 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
7789 {
7790  if(hAllocation->CanBecomeLost())
7791  {
7792  /*
7793  Warning: This is a carefully designed algorithm.
7794  Do not modify unless you really know what you're doing :)
7795  */
7796  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7797  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7798  for(;;)
7799  {
7800  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7801  {
7802  pAllocationInfo->memoryType = UINT32_MAX;
7803  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
7804  pAllocationInfo->offset = 0;
7805  pAllocationInfo->size = hAllocation->GetSize();
7806  pAllocationInfo->pMappedData = VMA_NULL;
7807  pAllocationInfo->pUserData = hAllocation->GetUserData();
7808  return;
7809  }
7810  else if(localLastUseFrameIndex == localCurrFrameIndex)
7811  {
7812  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7813  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7814  pAllocationInfo->offset = hAllocation->GetOffset();
7815  pAllocationInfo->size = hAllocation->GetSize();
7816  pAllocationInfo->pMappedData = VMA_NULL;
7817  pAllocationInfo->pUserData = hAllocation->GetUserData();
7818  return;
7819  }
7820  else // Last use time earlier than current time.
7821  {
7822  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7823  {
7824  localLastUseFrameIndex = localCurrFrameIndex;
7825  }
7826  }
7827  }
7828  }
7829  else
7830  {
7831  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7832  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7833  pAllocationInfo->offset = hAllocation->GetOffset();
7834  pAllocationInfo->size = hAllocation->GetSize();
7835  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7836  pAllocationInfo->pUserData = hAllocation->GetUserData();
7837  }
7838 }
7839 
7840 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
7841 {
7842  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
7843  if(hAllocation->CanBecomeLost())
7844  {
7845  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7846  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7847  for(;;)
7848  {
7849  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7850  {
7851  return false;
7852  }
7853  else if(localLastUseFrameIndex == localCurrFrameIndex)
7854  {
7855  return true;
7856  }
7857  else // Last use time earlier than current time.
7858  {
7859  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7860  {
7861  localLastUseFrameIndex = localCurrFrameIndex;
7862  }
7863  }
7864  }
7865  }
7866  else
7867  {
7868  return true;
7869  }
7870 }
7871 
7872 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
7873 {
7874  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
7875 
7876  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
7877 
7878  if(newCreateInfo.maxBlockCount == 0)
7879  {
7880  newCreateInfo.maxBlockCount = SIZE_MAX;
7881  }
7882  if(newCreateInfo.blockSize == 0)
7883  {
7884  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
7885  }
7886 
7887  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
7888 
7889  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
7890  if(res != VK_SUCCESS)
7891  {
7892  vma_delete(this, *pPool);
7893  *pPool = VMA_NULL;
7894  return res;
7895  }
7896 
7897  // Add to m_Pools.
7898  {
7899  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7900  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
7901  }
7902 
7903  return VK_SUCCESS;
7904 }
7905 
7906 void VmaAllocator_T::DestroyPool(VmaPool pool)
7907 {
7908  // Remove from m_Pools.
7909  {
7910  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7911  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
7912  VMA_ASSERT(success && "Pool not found in Allocator.");
7913  }
7914 
7915  vma_delete(this, pool);
7916 }
7917 
7918 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
7919 {
7920  pool->m_BlockVector.GetPoolStats(pPoolStats);
7921 }
7922 
7923 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
7924 {
7925  m_CurrentFrameIndex.store(frameIndex);
7926 }
7927 
7928 void VmaAllocator_T::MakePoolAllocationsLost(
7929  VmaPool hPool,
7930  size_t* pLostAllocationCount)
7931 {
7932  hPool->m_BlockVector.MakePoolAllocationsLost(
7933  m_CurrentFrameIndex.load(),
7934  pLostAllocationCount);
7935 }
7936 
7937 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
7938 {
7939  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
7940  (*pAllocation)->InitLost();
7941 }
7942 
7943 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
7944 {
7945  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
7946 
7947  VkResult res;
7948  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7949  {
7950  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7951  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
7952  {
7953  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7954  if(res == VK_SUCCESS)
7955  {
7956  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
7957  }
7958  }
7959  else
7960  {
7961  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
7962  }
7963  }
7964  else
7965  {
7966  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7967  }
7968 
7969  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
7970  {
7971  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
7972  }
7973 
7974  return res;
7975 }
7976 
7977 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
7978 {
7979  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
7980  {
7981  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
7982  }
7983 
7984  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
7985 
7986  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
7987  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7988  {
7989  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7990  m_HeapSizeLimit[heapIndex] += size;
7991  }
7992 }
7993 
7994 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
7995 {
7996  if(hAllocation->CanBecomeLost())
7997  {
7998  return VK_ERROR_MEMORY_MAP_FAILED;
7999  }
8000 
8001  switch(hAllocation->GetType())
8002  {
8003  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8004  {
8005  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8006  char *pBytes = VMA_NULL;
8007  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
8008  if(res == VK_SUCCESS)
8009  {
8010  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
8011  hAllocation->BlockAllocMap();
8012  }
8013  return res;
8014  }
8015  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8016  return hAllocation->DedicatedAllocMap(this, ppData);
8017  default:
8018  VMA_ASSERT(0);
8019  return VK_ERROR_MEMORY_MAP_FAILED;
8020  }
8021 }
8022 
8023 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
8024 {
8025  switch(hAllocation->GetType())
8026  {
8027  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8028  {
8029  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8030  hAllocation->BlockAllocUnmap();
8031  pBlock->Unmap(this, 1);
8032  }
8033  break;
8034  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8035  hAllocation->DedicatedAllocUnmap(this);
8036  break;
8037  default:
8038  VMA_ASSERT(0);
8039  }
8040 }
8041 
8042 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
8043 {
8044  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
8045 
8046  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
8047  {
8048  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8049  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
8050  VMA_ASSERT(pDedicatedAllocations);
8051  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
8052  VMA_ASSERT(success);
8053  }
8054 
8055  VkDeviceMemory hMemory = allocation->GetMemory();
8056 
8057  if(allocation->GetMappedData() != VMA_NULL)
8058  {
8059  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
8060  }
8061 
8062  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
8063 
8064  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
8065 }
8066 
8067 #if VMA_STATS_STRING_ENABLED
8068 
8069 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
8070 {
8071  bool dedicatedAllocationsStarted = false;
8072  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8073  {
8074  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8075  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
8076  VMA_ASSERT(pDedicatedAllocVector);
8077  if(pDedicatedAllocVector->empty() == false)
8078  {
8079  if(dedicatedAllocationsStarted == false)
8080  {
8081  dedicatedAllocationsStarted = true;
8082  json.WriteString("DedicatedAllocations");
8083  json.BeginObject();
8084  }
8085 
8086  json.BeginString("Type ");
8087  json.ContinueString(memTypeIndex);
8088  json.EndString();
8089 
8090  json.BeginArray();
8091 
8092  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
8093  {
8094  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
8095  json.BeginObject(true);
8096 
8097  json.WriteString("Type");
8098  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
8099 
8100  json.WriteString("Size");
8101  json.WriteNumber(hAlloc->GetSize());
8102 
8103  const void* pUserData = hAlloc->GetUserData();
8104  if(pUserData != VMA_NULL)
8105  {
8106  json.WriteString("UserData");
8107  if(hAlloc->IsUserDataString())
8108  {
8109  json.WriteString((const char*)pUserData);
8110  }
8111  else
8112  {
8113  json.BeginString();
8114  json.ContinueString_Pointer(pUserData);
8115  json.EndString();
8116  }
8117  }
8118 
8119  json.EndObject();
8120  }
8121 
8122  json.EndArray();
8123  }
8124  }
8125  if(dedicatedAllocationsStarted)
8126  {
8127  json.EndObject();
8128  }
8129 
8130  {
8131  bool allocationsStarted = false;
8132  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8133  {
8134  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
8135  {
8136  if(allocationsStarted == false)
8137  {
8138  allocationsStarted = true;
8139  json.WriteString("DefaultPools");
8140  json.BeginObject();
8141  }
8142 
8143  json.BeginString("Type ");
8144  json.ContinueString(memTypeIndex);
8145  json.EndString();
8146 
8147  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
8148  }
8149  }
8150  if(allocationsStarted)
8151  {
8152  json.EndObject();
8153  }
8154  }
8155 
8156  {
8157  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8158  const size_t poolCount = m_Pools.size();
8159  if(poolCount > 0)
8160  {
8161  json.WriteString("Pools");
8162  json.BeginArray();
8163  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
8164  {
8165  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
8166  }
8167  json.EndArray();
8168  }
8169  }
8170 }
8171 
8172 #endif // #if VMA_STATS_STRING_ENABLED
8173 
8174 static VkResult AllocateMemoryForImage(
8175  VmaAllocator allocator,
8176  VkImage image,
8177  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8178  VmaSuballocationType suballocType,
8179  VmaAllocation* pAllocation)
8180 {
8181  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
8182 
8183  VkMemoryRequirements vkMemReq = {};
8184  bool requiresDedicatedAllocation = false;
8185  bool prefersDedicatedAllocation = false;
8186  allocator->GetImageMemoryRequirements(image, vkMemReq,
8187  requiresDedicatedAllocation, prefersDedicatedAllocation);
8188 
8189  return allocator->AllocateMemory(
8190  vkMemReq,
8191  requiresDedicatedAllocation,
8192  prefersDedicatedAllocation,
8193  VK_NULL_HANDLE, // dedicatedBuffer
8194  image, // dedicatedImage
8195  *pAllocationCreateInfo,
8196  suballocType,
8197  pAllocation);
8198 }
8199 
8201 // Public interface
8202 
8203 VkResult vmaCreateAllocator(
8204  const VmaAllocatorCreateInfo* pCreateInfo,
8205  VmaAllocator* pAllocator)
8206 {
8207  VMA_ASSERT(pCreateInfo && pAllocator);
8208  VMA_DEBUG_LOG("vmaCreateAllocator");
8209  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
8210  return VK_SUCCESS;
8211 }
8212 
8213 void vmaDestroyAllocator(
8214  VmaAllocator allocator)
8215 {
8216  if(allocator != VK_NULL_HANDLE)
8217  {
8218  VMA_DEBUG_LOG("vmaDestroyAllocator");
8219  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
8220  vma_delete(&allocationCallbacks, allocator);
8221  }
8222 }
8223 
8225  VmaAllocator allocator,
8226  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
8227 {
8228  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
8229  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
8230 }
8231 
8233  VmaAllocator allocator,
8234  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
8235 {
8236  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
8237  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
8238 }
8239 
8241  VmaAllocator allocator,
8242  uint32_t memoryTypeIndex,
8243  VkMemoryPropertyFlags* pFlags)
8244 {
8245  VMA_ASSERT(allocator && pFlags);
8246  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
8247  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
8248 }
8249 
8251  VmaAllocator allocator,
8252  uint32_t frameIndex)
8253 {
8254  VMA_ASSERT(allocator);
8255  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
8256 
8257  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8258 
8259  allocator->SetCurrentFrameIndex(frameIndex);
8260 }
8261 
8262 void vmaCalculateStats(
8263  VmaAllocator allocator,
8264  VmaStats* pStats)
8265 {
8266  VMA_ASSERT(allocator && pStats);
8267  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8268  allocator->CalculateStats(pStats);
8269 }
8270 
8271 #if VMA_STATS_STRING_ENABLED
8272 
8273 void vmaBuildStatsString(
8274  VmaAllocator allocator,
8275  char** ppStatsString,
8276  VkBool32 detailedMap)
8277 {
8278  VMA_ASSERT(allocator && ppStatsString);
8279  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8280 
8281  VmaStringBuilder sb(allocator);
8282  {
8283  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
8284  json.BeginObject();
8285 
8286  VmaStats stats;
8287  allocator->CalculateStats(&stats);
8288 
8289  json.WriteString("Total");
8290  VmaPrintStatInfo(json, stats.total);
8291 
8292  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
8293  {
8294  json.BeginString("Heap ");
8295  json.ContinueString(heapIndex);
8296  json.EndString();
8297  json.BeginObject();
8298 
8299  json.WriteString("Size");
8300  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
8301 
8302  json.WriteString("Flags");
8303  json.BeginArray(true);
8304  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
8305  {
8306  json.WriteString("DEVICE_LOCAL");
8307  }
8308  json.EndArray();
8309 
8310  if(stats.memoryHeap[heapIndex].blockCount > 0)
8311  {
8312  json.WriteString("Stats");
8313  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
8314  }
8315 
8316  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
8317  {
8318  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
8319  {
8320  json.BeginString("Type ");
8321  json.ContinueString(typeIndex);
8322  json.EndString();
8323 
8324  json.BeginObject();
8325 
8326  json.WriteString("Flags");
8327  json.BeginArray(true);
8328  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
8329  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
8330  {
8331  json.WriteString("DEVICE_LOCAL");
8332  }
8333  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8334  {
8335  json.WriteString("HOST_VISIBLE");
8336  }
8337  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
8338  {
8339  json.WriteString("HOST_COHERENT");
8340  }
8341  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
8342  {
8343  json.WriteString("HOST_CACHED");
8344  }
8345  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
8346  {
8347  json.WriteString("LAZILY_ALLOCATED");
8348  }
8349  json.EndArray();
8350 
8351  if(stats.memoryType[typeIndex].blockCount > 0)
8352  {
8353  json.WriteString("Stats");
8354  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
8355  }
8356 
8357  json.EndObject();
8358  }
8359  }
8360 
8361  json.EndObject();
8362  }
8363  if(detailedMap == VK_TRUE)
8364  {
8365  allocator->PrintDetailedMap(json);
8366  }
8367 
8368  json.EndObject();
8369  }
8370 
8371  const size_t len = sb.GetLength();
8372  char* const pChars = vma_new_array(allocator, char, len + 1);
8373  if(len > 0)
8374  {
8375  memcpy(pChars, sb.GetData(), len);
8376  }
8377  pChars[len] = '\0';
8378  *ppStatsString = pChars;
8379 }
8380 
8381 void vmaFreeStatsString(
8382  VmaAllocator allocator,
8383  char* pStatsString)
8384 {
8385  if(pStatsString != VMA_NULL)
8386  {
8387  VMA_ASSERT(allocator);
8388  size_t len = strlen(pStatsString);
8389  vma_delete_array(allocator, pStatsString, len + 1);
8390  }
8391 }
8392 
8393 #endif // #if VMA_STATS_STRING_ENABLED
8394 
8395 /*
8396 This function is not protected by any mutex because it just reads immutable data.
8397 */
8398 VkResult vmaFindMemoryTypeIndex(
8399  VmaAllocator allocator,
8400  uint32_t memoryTypeBits,
8401  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8402  uint32_t* pMemoryTypeIndex)
8403 {
8404  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8405  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8406  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8407 
8408  if(pAllocationCreateInfo->memoryTypeBits != 0)
8409  {
8410  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
8411  }
8412 
8413  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
8414  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
8415 
8416  // Convert usage to requiredFlags and preferredFlags.
8417  switch(pAllocationCreateInfo->usage)
8418  {
8420  break;
8422  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8423  break;
8425  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
8426  break;
8428  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8429  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8430  break;
8432  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8433  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
8434  break;
8435  default:
8436  break;
8437  }
8438 
8439  *pMemoryTypeIndex = UINT32_MAX;
8440  uint32_t minCost = UINT32_MAX;
8441  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
8442  memTypeIndex < allocator->GetMemoryTypeCount();
8443  ++memTypeIndex, memTypeBit <<= 1)
8444  {
8445  // This memory type is acceptable according to memoryTypeBits bitmask.
8446  if((memTypeBit & memoryTypeBits) != 0)
8447  {
8448  const VkMemoryPropertyFlags currFlags =
8449  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
8450  // This memory type contains requiredFlags.
8451  if((requiredFlags & ~currFlags) == 0)
8452  {
8453  // Calculate cost as number of bits from preferredFlags not present in this memory type.
8454  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
8455  // Remember memory type with lowest cost.
8456  if(currCost < minCost)
8457  {
8458  *pMemoryTypeIndex = memTypeIndex;
8459  if(currCost == 0)
8460  {
8461  return VK_SUCCESS;
8462  }
8463  minCost = currCost;
8464  }
8465  }
8466  }
8467  }
8468  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
8469 }
8470 
8472  VmaAllocator allocator,
8473  const VkBufferCreateInfo* pBufferCreateInfo,
8474  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8475  uint32_t* pMemoryTypeIndex)
8476 {
8477  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8478  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
8479  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8480  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8481 
8482  const VkDevice hDev = allocator->m_hDevice;
8483  VkBuffer hBuffer = VK_NULL_HANDLE;
8484  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
8485  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
8486  if(res == VK_SUCCESS)
8487  {
8488  VkMemoryRequirements memReq = {};
8489  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
8490  hDev, hBuffer, &memReq);
8491 
8492  res = vmaFindMemoryTypeIndex(
8493  allocator,
8494  memReq.memoryTypeBits,
8495  pAllocationCreateInfo,
8496  pMemoryTypeIndex);
8497 
8498  allocator->GetVulkanFunctions().vkDestroyBuffer(
8499  hDev, hBuffer, allocator->GetAllocationCallbacks());
8500  }
8501  return res;
8502 }
8503 
8505  VmaAllocator allocator,
8506  const VkImageCreateInfo* pImageCreateInfo,
8507  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8508  uint32_t* pMemoryTypeIndex)
8509 {
8510  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8511  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
8512  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8513  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8514 
8515  const VkDevice hDev = allocator->m_hDevice;
8516  VkImage hImage = VK_NULL_HANDLE;
8517  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
8518  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
8519  if(res == VK_SUCCESS)
8520  {
8521  VkMemoryRequirements memReq = {};
8522  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
8523  hDev, hImage, &memReq);
8524 
8525  res = vmaFindMemoryTypeIndex(
8526  allocator,
8527  memReq.memoryTypeBits,
8528  pAllocationCreateInfo,
8529  pMemoryTypeIndex);
8530 
8531  allocator->GetVulkanFunctions().vkDestroyImage(
8532  hDev, hImage, allocator->GetAllocationCallbacks());
8533  }
8534  return res;
8535 }
8536 
8537 VkResult vmaCreatePool(
8538  VmaAllocator allocator,
8539  const VmaPoolCreateInfo* pCreateInfo,
8540  VmaPool* pPool)
8541 {
8542  VMA_ASSERT(allocator && pCreateInfo && pPool);
8543 
8544  VMA_DEBUG_LOG("vmaCreatePool");
8545 
8546  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8547 
8548  return allocator->CreatePool(pCreateInfo, pPool);
8549 }
8550 
8551 void vmaDestroyPool(
8552  VmaAllocator allocator,
8553  VmaPool pool)
8554 {
8555  VMA_ASSERT(allocator);
8556 
8557  if(pool == VK_NULL_HANDLE)
8558  {
8559  return;
8560  }
8561 
8562  VMA_DEBUG_LOG("vmaDestroyPool");
8563 
8564  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8565 
8566  allocator->DestroyPool(pool);
8567 }
8568 
8569 void vmaGetPoolStats(
8570  VmaAllocator allocator,
8571  VmaPool pool,
8572  VmaPoolStats* pPoolStats)
8573 {
8574  VMA_ASSERT(allocator && pool && pPoolStats);
8575 
8576  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8577 
8578  allocator->GetPoolStats(pool, pPoolStats);
8579 }
8580 
8582  VmaAllocator allocator,
8583  VmaPool pool,
8584  size_t* pLostAllocationCount)
8585 {
8586  VMA_ASSERT(allocator && pool);
8587 
8588  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8589 
8590  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
8591 }
8592 
8593 VkResult vmaAllocateMemory(
8594  VmaAllocator allocator,
8595  const VkMemoryRequirements* pVkMemoryRequirements,
8596  const VmaAllocationCreateInfo* pCreateInfo,
8597  VmaAllocation* pAllocation,
8598  VmaAllocationInfo* pAllocationInfo)
8599 {
8600  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
8601 
8602  VMA_DEBUG_LOG("vmaAllocateMemory");
8603 
8604  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8605 
8606  VkResult result = allocator->AllocateMemory(
8607  *pVkMemoryRequirements,
8608  false, // requiresDedicatedAllocation
8609  false, // prefersDedicatedAllocation
8610  VK_NULL_HANDLE, // dedicatedBuffer
8611  VK_NULL_HANDLE, // dedicatedImage
8612  *pCreateInfo,
8613  VMA_SUBALLOCATION_TYPE_UNKNOWN,
8614  pAllocation);
8615 
8616  if(pAllocationInfo && result == VK_SUCCESS)
8617  {
8618  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8619  }
8620 
8621  return result;
8622 }
8623 
8625  VmaAllocator allocator,
8626  VkBuffer buffer,
8627  const VmaAllocationCreateInfo* pCreateInfo,
8628  VmaAllocation* pAllocation,
8629  VmaAllocationInfo* pAllocationInfo)
8630 {
8631  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8632 
8633  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
8634 
8635  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8636 
8637  VkMemoryRequirements vkMemReq = {};
8638  bool requiresDedicatedAllocation = false;
8639  bool prefersDedicatedAllocation = false;
8640  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
8641  requiresDedicatedAllocation,
8642  prefersDedicatedAllocation);
8643 
8644  VkResult result = allocator->AllocateMemory(
8645  vkMemReq,
8646  requiresDedicatedAllocation,
8647  prefersDedicatedAllocation,
8648  buffer, // dedicatedBuffer
8649  VK_NULL_HANDLE, // dedicatedImage
8650  *pCreateInfo,
8651  VMA_SUBALLOCATION_TYPE_BUFFER,
8652  pAllocation);
8653 
8654  if(pAllocationInfo && result == VK_SUCCESS)
8655  {
8656  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8657  }
8658 
8659  return result;
8660 }
8661 
8662 VkResult vmaAllocateMemoryForImage(
8663  VmaAllocator allocator,
8664  VkImage image,
8665  const VmaAllocationCreateInfo* pCreateInfo,
8666  VmaAllocation* pAllocation,
8667  VmaAllocationInfo* pAllocationInfo)
8668 {
8669  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8670 
8671  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
8672 
8673  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8674 
8675  VkResult result = AllocateMemoryForImage(
8676  allocator,
8677  image,
8678  pCreateInfo,
8679  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
8680  pAllocation);
8681 
8682  if(pAllocationInfo && result == VK_SUCCESS)
8683  {
8684  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8685  }
8686 
8687  return result;
8688 }
8689 
8690 void vmaFreeMemory(
8691  VmaAllocator allocator,
8692  VmaAllocation allocation)
8693 {
8694  VMA_ASSERT(allocator && allocation);
8695 
8696  VMA_DEBUG_LOG("vmaFreeMemory");
8697 
8698  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8699 
8700  allocator->FreeMemory(allocation);
8701 }
8702 
8704  VmaAllocator allocator,
8705  VmaAllocation allocation,
8706  VmaAllocationInfo* pAllocationInfo)
8707 {
8708  VMA_ASSERT(allocator && allocation && pAllocationInfo);
8709 
8710  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8711 
8712  allocator->GetAllocationInfo(allocation, pAllocationInfo);
8713 }
8714 
8715 VkBool32 vmaTouchAllocation(
8716  VmaAllocator allocator,
8717  VmaAllocation allocation)
8718 {
8719  VMA_ASSERT(allocator && allocation);
8720 
8721  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8722 
8723  return allocator->TouchAllocation(allocation);
8724 }
8725 
8727  VmaAllocator allocator,
8728  VmaAllocation allocation,
8729  void* pUserData)
8730 {
8731  VMA_ASSERT(allocator && allocation);
8732 
8733  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8734 
8735  allocation->SetUserData(allocator, pUserData);
8736 }
8737 
8739  VmaAllocator allocator,
8740  VmaAllocation* pAllocation)
8741 {
8742  VMA_ASSERT(allocator && pAllocation);
8743 
8744  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
8745 
8746  allocator->CreateLostAllocation(pAllocation);
8747 }
8748 
8749 VkResult vmaMapMemory(
8750  VmaAllocator allocator,
8751  VmaAllocation allocation,
8752  void** ppData)
8753 {
8754  VMA_ASSERT(allocator && allocation && ppData);
8755 
8756  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8757 
8758  return allocator->Map(allocation, ppData);
8759 }
8760 
8761 void vmaUnmapMemory(
8762  VmaAllocator allocator,
8763  VmaAllocation allocation)
8764 {
8765  VMA_ASSERT(allocator && allocation);
8766 
8767  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8768 
8769  allocator->Unmap(allocation);
8770 }
8771 
8772 VkResult vmaDefragment(
8773  VmaAllocator allocator,
8774  VmaAllocation* pAllocations,
8775  size_t allocationCount,
8776  VkBool32* pAllocationsChanged,
8777  const VmaDefragmentationInfo *pDefragmentationInfo,
8778  VmaDefragmentationStats* pDefragmentationStats)
8779 {
8780  VMA_ASSERT(allocator && pAllocations);
8781 
8782  VMA_DEBUG_LOG("vmaDefragment");
8783 
8784  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8785 
8786  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
8787 }
8788 
8789 VkResult vmaCreateBuffer(
8790  VmaAllocator allocator,
8791  const VkBufferCreateInfo* pBufferCreateInfo,
8792  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8793  VkBuffer* pBuffer,
8794  VmaAllocation* pAllocation,
8795  VmaAllocationInfo* pAllocationInfo)
8796 {
8797  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
8798 
8799  VMA_DEBUG_LOG("vmaCreateBuffer");
8800 
8801  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8802 
8803  *pBuffer = VK_NULL_HANDLE;
8804  *pAllocation = VK_NULL_HANDLE;
8805 
8806  // 1. Create VkBuffer.
8807  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
8808  allocator->m_hDevice,
8809  pBufferCreateInfo,
8810  allocator->GetAllocationCallbacks(),
8811  pBuffer);
8812  if(res >= 0)
8813  {
8814  // 2. vkGetBufferMemoryRequirements.
8815  VkMemoryRequirements vkMemReq = {};
8816  bool requiresDedicatedAllocation = false;
8817  bool prefersDedicatedAllocation = false;
8818  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
8819  requiresDedicatedAllocation, prefersDedicatedAllocation);
8820 
8821  // Make sure alignment requirements for specific buffer usages reported
8822  // in Physical Device Properties are included in alignment reported by memory requirements.
8823  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
8824  {
8825  VMA_ASSERT(vkMemReq.alignment %
8826  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
8827  }
8828  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
8829  {
8830  VMA_ASSERT(vkMemReq.alignment %
8831  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
8832  }
8833  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
8834  {
8835  VMA_ASSERT(vkMemReq.alignment %
8836  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
8837  }
8838 
8839  // 3. Allocate memory using allocator.
8840  res = allocator->AllocateMemory(
8841  vkMemReq,
8842  requiresDedicatedAllocation,
8843  prefersDedicatedAllocation,
8844  *pBuffer, // dedicatedBuffer
8845  VK_NULL_HANDLE, // dedicatedImage
8846  *pAllocationCreateInfo,
8847  VMA_SUBALLOCATION_TYPE_BUFFER,
8848  pAllocation);
8849  if(res >= 0)
8850  {
8851  // 3. Bind buffer with memory.
8852  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
8853  allocator->m_hDevice,
8854  *pBuffer,
8855  (*pAllocation)->GetMemory(),
8856  (*pAllocation)->GetOffset());
8857  if(res >= 0)
8858  {
8859  // All steps succeeded.
8860  if(pAllocationInfo != VMA_NULL)
8861  {
8862  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8863  }
8864  return VK_SUCCESS;
8865  }
8866  allocator->FreeMemory(*pAllocation);
8867  *pAllocation = VK_NULL_HANDLE;
8868  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8869  *pBuffer = VK_NULL_HANDLE;
8870  return res;
8871  }
8872  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8873  *pBuffer = VK_NULL_HANDLE;
8874  return res;
8875  }
8876  return res;
8877 }
8878 
8879 void vmaDestroyBuffer(
8880  VmaAllocator allocator,
8881  VkBuffer buffer,
8882  VmaAllocation allocation)
8883 {
8884  if(buffer != VK_NULL_HANDLE)
8885  {
8886  VMA_ASSERT(allocator);
8887 
8888  VMA_DEBUG_LOG("vmaDestroyBuffer");
8889 
8890  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8891 
8892  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
8893 
8894  allocator->FreeMemory(allocation);
8895  }
8896 }
8897 
8898 VkResult vmaCreateImage(
8899  VmaAllocator allocator,
8900  const VkImageCreateInfo* pImageCreateInfo,
8901  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8902  VkImage* pImage,
8903  VmaAllocation* pAllocation,
8904  VmaAllocationInfo* pAllocationInfo)
8905 {
8906  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
8907 
8908  VMA_DEBUG_LOG("vmaCreateImage");
8909 
8910  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8911 
8912  *pImage = VK_NULL_HANDLE;
8913  *pAllocation = VK_NULL_HANDLE;
8914 
8915  // 1. Create VkImage.
8916  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
8917  allocator->m_hDevice,
8918  pImageCreateInfo,
8919  allocator->GetAllocationCallbacks(),
8920  pImage);
8921  if(res >= 0)
8922  {
8923  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
8924  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
8925  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
8926 
8927  // 2. Allocate memory using allocator.
8928  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
8929  if(res >= 0)
8930  {
8931  // 3. Bind image with memory.
8932  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
8933  allocator->m_hDevice,
8934  *pImage,
8935  (*pAllocation)->GetMemory(),
8936  (*pAllocation)->GetOffset());
8937  if(res >= 0)
8938  {
8939  // All steps succeeded.
8940  if(pAllocationInfo != VMA_NULL)
8941  {
8942  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8943  }
8944  return VK_SUCCESS;
8945  }
8946  allocator->FreeMemory(*pAllocation);
8947  *pAllocation = VK_NULL_HANDLE;
8948  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8949  *pImage = VK_NULL_HANDLE;
8950  return res;
8951  }
8952  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8953  *pImage = VK_NULL_HANDLE;
8954  return res;
8955  }
8956  return res;
8957 }
8958 
8959 void vmaDestroyImage(
8960  VmaAllocator allocator,
8961  VkImage image,
8962  VmaAllocation allocation)
8963 {
8964  if(image != VK_NULL_HANDLE)
8965  {
8966  VMA_ASSERT(allocator);
8967 
8968  VMA_DEBUG_LOG("vmaDestroyImage");
8969 
8970  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8971 
8972  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
8973 
8974  allocator->FreeMemory(allocation);
8975  }
8976 }
8977 
8978 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:943
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1197
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:968
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:953
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1154
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:947
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1503
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:965
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1678
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1373
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1427
Definition: vk_mem_alloc.h:1234
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:936
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1272
Definition: vk_mem_alloc.h:1181
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:977
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1030
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:962
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1185
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation.
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1095
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:950
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1094
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
Definition: vk_mem_alloc.h:958
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1682
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:994
VmaStatInfo total
Definition: vk_mem_alloc.h:1104
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1690
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1256
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1673
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:951
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:878
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:971
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1381
Definition: vk_mem_alloc.h:1375
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1513
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:948
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1293
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1397
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1433
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:934
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1384
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaMemoryUsage
Definition: vk_mem_alloc.h:1132
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1668
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1686
Definition: vk_mem_alloc.h:1171
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1280
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:949
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1100
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:884
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:905
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:910
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1688
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1267
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1443
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:944
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1083
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1392
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:897
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1241
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1096
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:901
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1387
Definition: vk_mem_alloc.h:1180
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1262
Definition: vk_mem_alloc.h:1253
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1086
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:946
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:1405
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:980
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1436
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1251
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1286
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1018
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1102
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1221
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1095
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:955
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:899
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:954
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1419
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1527
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:974
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1095
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1092
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1424
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1508
Definition: vk_mem_alloc.h:1249
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1684
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:942
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
Definition: vk_mem_alloc.h:957
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1090
Definition: vk_mem_alloc.h:1137
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1377
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1088
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:952
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:956
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1208
Definition: vk_mem_alloc.h:1164
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1522
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:932
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:945
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1489
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1355
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1096
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
TODO finish documentation...
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1103
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1430
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1096
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1494