Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
934 #include <vulkan/vulkan.h>
935 
936 VK_DEFINE_HANDLE(VmaAllocator)
937 
938 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
940  VmaAllocator allocator,
941  uint32_t memoryType,
942  VkDeviceMemory memory,
943  VkDeviceSize size);
945 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
946  VmaAllocator allocator,
947  uint32_t memoryType,
948  VkDeviceMemory memory,
949  VkDeviceSize size);
950 
958 typedef struct VmaDeviceMemoryCallbacks {
964 
994 
997 typedef VkFlags VmaAllocatorCreateFlags;
998 
1003 typedef struct VmaVulkanFunctions {
1004  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1005  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1006  PFN_vkAllocateMemory vkAllocateMemory;
1007  PFN_vkFreeMemory vkFreeMemory;
1008  PFN_vkMapMemory vkMapMemory;
1009  PFN_vkUnmapMemory vkUnmapMemory;
1010  PFN_vkBindBufferMemory vkBindBufferMemory;
1011  PFN_vkBindImageMemory vkBindImageMemory;
1012  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1013  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1014  PFN_vkCreateBuffer vkCreateBuffer;
1015  PFN_vkDestroyBuffer vkDestroyBuffer;
1016  PFN_vkCreateImage vkCreateImage;
1017  PFN_vkDestroyImage vkDestroyImage;
1018  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1019  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1021 
1024 {
1026  VmaAllocatorCreateFlags flags;
1028 
1029  VkPhysicalDevice physicalDevice;
1031 
1032  VkDevice device;
1034 
1037 
1038  const VkAllocationCallbacks* pAllocationCallbacks;
1040 
1079  const VkDeviceSize* pHeapSizeLimit;
1093 
1095 VkResult vmaCreateAllocator(
1096  const VmaAllocatorCreateInfo* pCreateInfo,
1097  VmaAllocator* pAllocator);
1098 
1100 void vmaDestroyAllocator(
1101  VmaAllocator allocator);
1102 
1108  VmaAllocator allocator,
1109  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1110 
1116  VmaAllocator allocator,
1117  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1118 
1126  VmaAllocator allocator,
1127  uint32_t memoryTypeIndex,
1128  VkMemoryPropertyFlags* pFlags);
1129 
1139  VmaAllocator allocator,
1140  uint32_t frameIndex);
1141 
1144 typedef struct VmaStatInfo
1145 {
1147  uint32_t blockCount;
1153  VkDeviceSize usedBytes;
1155  VkDeviceSize unusedBytes;
1156  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1157  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1158 } VmaStatInfo;
1159 
1161 typedef struct VmaStats
1162 {
1163  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1164  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1166 } VmaStats;
1167 
1169 void vmaCalculateStats(
1170  VmaAllocator allocator,
1171  VmaStats* pStats);
1172 
1173 #define VMA_STATS_STRING_ENABLED 1
1174 
1175 #if VMA_STATS_STRING_ENABLED
1176 
1178 
1180 void vmaBuildStatsString(
1181  VmaAllocator allocator,
1182  char** ppStatsString,
1183  VkBool32 detailedMap);
1184 
1185 void vmaFreeStatsString(
1186  VmaAllocator allocator,
1187  char* pStatsString);
1188 
1189 #endif // #if VMA_STATS_STRING_ENABLED
1190 
1191 VK_DEFINE_HANDLE(VmaPool)
1192 
1193 typedef enum VmaMemoryUsage
1194 {
1243 } VmaMemoryUsage;
1244 
1259 
1309 
1313 
1315 {
1317  VmaAllocationCreateFlags flags;
1328  VkMemoryPropertyFlags requiredFlags;
1333  VkMemoryPropertyFlags preferredFlags;
1341  uint32_t memoryTypeBits;
1347  VmaPool pool;
1354  void* pUserData;
1356 
1373 VkResult vmaFindMemoryTypeIndex(
1374  VmaAllocator allocator,
1375  uint32_t memoryTypeBits,
1376  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1377  uint32_t* pMemoryTypeIndex);
1378 
1392  VmaAllocator allocator,
1393  const VkBufferCreateInfo* pBufferCreateInfo,
1394  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1395  uint32_t* pMemoryTypeIndex);
1396 
1410  VmaAllocator allocator,
1411  const VkImageCreateInfo* pImageCreateInfo,
1412  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1413  uint32_t* pMemoryTypeIndex);
1414 
1435 
1438 typedef VkFlags VmaPoolCreateFlags;
1439 
1442 typedef struct VmaPoolCreateInfo {
1448  VmaPoolCreateFlags flags;
1453  VkDeviceSize blockSize;
1482 
1485 typedef struct VmaPoolStats {
1488  VkDeviceSize size;
1491  VkDeviceSize unusedSize;
1504  VkDeviceSize unusedRangeSizeMax;
1505 } VmaPoolStats;
1506 
1513 VkResult vmaCreatePool(
1514  VmaAllocator allocator,
1515  const VmaPoolCreateInfo* pCreateInfo,
1516  VmaPool* pPool);
1517 
1520 void vmaDestroyPool(
1521  VmaAllocator allocator,
1522  VmaPool pool);
1523 
1530 void vmaGetPoolStats(
1531  VmaAllocator allocator,
1532  VmaPool pool,
1533  VmaPoolStats* pPoolStats);
1534 
1542  VmaAllocator allocator,
1543  VmaPool pool,
1544  size_t* pLostAllocationCount);
1545 
1546 VK_DEFINE_HANDLE(VmaAllocation)
1547 
1548 
1550 typedef struct VmaAllocationInfo {
1555  uint32_t memoryType;
1564  VkDeviceMemory deviceMemory;
1569  VkDeviceSize offset;
1574  VkDeviceSize size;
1588  void* pUserData;
1590 
1601 VkResult vmaAllocateMemory(
1602  VmaAllocator allocator,
1603  const VkMemoryRequirements* pVkMemoryRequirements,
1604  const VmaAllocationCreateInfo* pCreateInfo,
1605  VmaAllocation* pAllocation,
1606  VmaAllocationInfo* pAllocationInfo);
1607 
1615  VmaAllocator allocator,
1616  VkBuffer buffer,
1617  const VmaAllocationCreateInfo* pCreateInfo,
1618  VmaAllocation* pAllocation,
1619  VmaAllocationInfo* pAllocationInfo);
1620 
1622 VkResult vmaAllocateMemoryForImage(
1623  VmaAllocator allocator,
1624  VkImage image,
1625  const VmaAllocationCreateInfo* pCreateInfo,
1626  VmaAllocation* pAllocation,
1627  VmaAllocationInfo* pAllocationInfo);
1628 
1630 void vmaFreeMemory(
1631  VmaAllocator allocator,
1632  VmaAllocation allocation);
1633 
1651  VmaAllocator allocator,
1652  VmaAllocation allocation,
1653  VmaAllocationInfo* pAllocationInfo);
1654 
1669 VkBool32 vmaTouchAllocation(
1670  VmaAllocator allocator,
1671  VmaAllocation allocation);
1672 
1687  VmaAllocator allocator,
1688  VmaAllocation allocation,
1689  void* pUserData);
1690 
1702  VmaAllocator allocator,
1703  VmaAllocation* pAllocation);
1704 
1739 VkResult vmaMapMemory(
1740  VmaAllocator allocator,
1741  VmaAllocation allocation,
1742  void** ppData);
1743 
1748 void vmaUnmapMemory(
1749  VmaAllocator allocator,
1750  VmaAllocation allocation);
1751 
1753 typedef struct VmaDefragmentationInfo {
1758  VkDeviceSize maxBytesToMove;
1765 
1767 typedef struct VmaDefragmentationStats {
1769  VkDeviceSize bytesMoved;
1771  VkDeviceSize bytesFreed;
1777 
1860 VkResult vmaDefragment(
1861  VmaAllocator allocator,
1862  VmaAllocation* pAllocations,
1863  size_t allocationCount,
1864  VkBool32* pAllocationsChanged,
1865  const VmaDefragmentationInfo *pDefragmentationInfo,
1866  VmaDefragmentationStats* pDefragmentationStats);
1867 
1894 VkResult vmaCreateBuffer(
1895  VmaAllocator allocator,
1896  const VkBufferCreateInfo* pBufferCreateInfo,
1897  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1898  VkBuffer* pBuffer,
1899  VmaAllocation* pAllocation,
1900  VmaAllocationInfo* pAllocationInfo);
1901 
1913 void vmaDestroyBuffer(
1914  VmaAllocator allocator,
1915  VkBuffer buffer,
1916  VmaAllocation allocation);
1917 
1919 VkResult vmaCreateImage(
1920  VmaAllocator allocator,
1921  const VkImageCreateInfo* pImageCreateInfo,
1922  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1923  VkImage* pImage,
1924  VmaAllocation* pAllocation,
1925  VmaAllocationInfo* pAllocationInfo);
1926 
1938 void vmaDestroyImage(
1939  VmaAllocator allocator,
1940  VkImage image,
1941  VmaAllocation allocation);
1942 
1943 #ifdef __cplusplus
1944 }
1945 #endif
1946 
1947 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1948 
1949 // For Visual Studio IntelliSense.
1950 #ifdef __INTELLISENSE__
1951 #define VMA_IMPLEMENTATION
1952 #endif
1953 
1954 #ifdef VMA_IMPLEMENTATION
1955 #undef VMA_IMPLEMENTATION
1956 
1957 #include <cstdint>
1958 #include <cstdlib>
1959 #include <cstring>
1960 
1961 /*******************************************************************************
1962 CONFIGURATION SECTION
1963 
1964 Define some of these macros before each #include of this header or change them
1965 here if you need other then default behavior depending on your environment.
1966 */
1967 
1968 /*
1969 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1970 internally, like:
1971 
1972  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1973 
1974 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1975 VmaAllocatorCreateInfo::pVulkanFunctions.
1976 */
1977 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
1978 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1979 #endif
1980 
1981 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1982 //#define VMA_USE_STL_CONTAINERS 1
1983 
1984 /* Set this macro to 1 to make the library including and using STL containers:
1985 std::pair, std::vector, std::list, std::unordered_map.
1986 
1987 Set it to 0 or undefined to make the library using its own implementation of
1988 the containers.
1989 */
1990 #if VMA_USE_STL_CONTAINERS
1991  #define VMA_USE_STL_VECTOR 1
1992  #define VMA_USE_STL_UNORDERED_MAP 1
1993  #define VMA_USE_STL_LIST 1
1994 #endif
1995 
1996 #if VMA_USE_STL_VECTOR
1997  #include <vector>
1998 #endif
1999 
2000 #if VMA_USE_STL_UNORDERED_MAP
2001  #include <unordered_map>
2002 #endif
2003 
2004 #if VMA_USE_STL_LIST
2005  #include <list>
2006 #endif
2007 
2008 /*
2009 Following headers are used in this CONFIGURATION section only, so feel free to
2010 remove them if not needed.
2011 */
2012 #include <cassert> // for assert
2013 #include <algorithm> // for min, max
2014 #include <mutex> // for std::mutex
2015 #include <atomic> // for std::atomic
2016 
2017 #if !defined(_WIN32) && !defined(__APPLE__)
2018  #include <malloc.h> // for aligned_alloc()
2019 #endif
2020 
2021 #ifndef VMA_NULL
2022  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2023  #define VMA_NULL nullptr
2024 #endif
2025 
2026 #if defined(__APPLE__) || defined(__ANDROID__)
2027 #include <cstdlib>
2028 void *aligned_alloc(size_t alignment, size_t size)
2029 {
2030  // alignment must be >= sizeof(void*)
2031  if(alignment < sizeof(void*))
2032  {
2033  alignment = sizeof(void*);
2034  }
2035 
2036  void *pointer;
2037  if(posix_memalign(&pointer, alignment, size) == 0)
2038  return pointer;
2039  return VMA_NULL;
2040 }
2041 #endif
2042 
2043 // Normal assert to check for programmer's errors, especially in Debug configuration.
2044 #ifndef VMA_ASSERT
2045  #ifdef _DEBUG
2046  #define VMA_ASSERT(expr) assert(expr)
2047  #else
2048  #define VMA_ASSERT(expr)
2049  #endif
2050 #endif
2051 
2052 // Assert that will be called very often, like inside data structures e.g. operator[].
2053 // Making it non-empty can make program slow.
2054 #ifndef VMA_HEAVY_ASSERT
2055  #ifdef _DEBUG
2056  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2057  #else
2058  #define VMA_HEAVY_ASSERT(expr)
2059  #endif
2060 #endif
2061 
2062 #ifndef VMA_ALIGN_OF
2063  #define VMA_ALIGN_OF(type) (__alignof(type))
2064 #endif
2065 
2066 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2067  #if defined(_WIN32)
2068  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2069  #else
2070  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2071  #endif
2072 #endif
2073 
2074 #ifndef VMA_SYSTEM_FREE
2075  #if defined(_WIN32)
2076  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2077  #else
2078  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2079  #endif
2080 #endif
2081 
2082 #ifndef VMA_MIN
2083  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2084 #endif
2085 
2086 #ifndef VMA_MAX
2087  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2088 #endif
2089 
2090 #ifndef VMA_SWAP
2091  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2092 #endif
2093 
2094 #ifndef VMA_SORT
2095  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2096 #endif
2097 
2098 #ifndef VMA_DEBUG_LOG
2099  #define VMA_DEBUG_LOG(format, ...)
2100  /*
2101  #define VMA_DEBUG_LOG(format, ...) do { \
2102  printf(format, __VA_ARGS__); \
2103  printf("\n"); \
2104  } while(false)
2105  */
2106 #endif
2107 
2108 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2109 #if VMA_STATS_STRING_ENABLED
2110  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2111  {
2112  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2113  }
2114  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2115  {
2116  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2117  }
2118  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2119  {
2120  snprintf(outStr, strLen, "%p", ptr);
2121  }
2122 #endif
2123 
2124 #ifndef VMA_MUTEX
2125  class VmaMutex
2126  {
2127  public:
2128  VmaMutex() { }
2129  ~VmaMutex() { }
2130  void Lock() { m_Mutex.lock(); }
2131  void Unlock() { m_Mutex.unlock(); }
2132  private:
2133  std::mutex m_Mutex;
2134  };
2135  #define VMA_MUTEX VmaMutex
2136 #endif
2137 
2138 /*
2139 If providing your own implementation, you need to implement a subset of std::atomic:
2140 
2141 - Constructor(uint32_t desired)
2142 - uint32_t load() const
2143 - void store(uint32_t desired)
2144 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2145 */
2146 #ifndef VMA_ATOMIC_UINT32
2147  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2148 #endif
2149 
2150 #ifndef VMA_BEST_FIT
2151 
2163  #define VMA_BEST_FIT (1)
2164 #endif
2165 
2166 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2167 
2171  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2172 #endif
2173 
2174 #ifndef VMA_DEBUG_ALIGNMENT
2175 
2179  #define VMA_DEBUG_ALIGNMENT (1)
2180 #endif
2181 
2182 #ifndef VMA_DEBUG_MARGIN
2183 
2187  #define VMA_DEBUG_MARGIN (0)
2188 #endif
2189 
2190 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2191 
2195  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2196 #endif
2197 
2198 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2199 
2203  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2204 #endif
2205 
2206 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2207  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2209 #endif
2210 
2211 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2212  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2214 #endif
2215 
2216 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2217 
2218 /*******************************************************************************
2219 END OF CONFIGURATION
2220 */
2221 
2222 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2223  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2224 
2225 // Returns number of bits set to 1 in (v).
2226 static inline uint32_t VmaCountBitsSet(uint32_t v)
2227 {
2228  uint32_t c = v - ((v >> 1) & 0x55555555);
2229  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2230  c = ((c >> 4) + c) & 0x0F0F0F0F;
2231  c = ((c >> 8) + c) & 0x00FF00FF;
2232  c = ((c >> 16) + c) & 0x0000FFFF;
2233  return c;
2234 }
2235 
2236 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2237 // Use types like uint32_t, uint64_t as T.
2238 template <typename T>
2239 static inline T VmaAlignUp(T val, T align)
2240 {
2241  return (val + align - 1) / align * align;
2242 }
2243 
2244 // Division with mathematical rounding to nearest number.
2245 template <typename T>
2246 inline T VmaRoundDiv(T x, T y)
2247 {
2248  return (x + (y / (T)2)) / y;
2249 }
2250 
2251 #ifndef VMA_SORT
2252 
2253 template<typename Iterator, typename Compare>
2254 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
2255 {
2256  Iterator centerValue = end; --centerValue;
2257  Iterator insertIndex = beg;
2258  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
2259  {
2260  if(cmp(*memTypeIndex, *centerValue))
2261  {
2262  if(insertIndex != memTypeIndex)
2263  {
2264  VMA_SWAP(*memTypeIndex, *insertIndex);
2265  }
2266  ++insertIndex;
2267  }
2268  }
2269  if(insertIndex != centerValue)
2270  {
2271  VMA_SWAP(*insertIndex, *centerValue);
2272  }
2273  return insertIndex;
2274 }
2275 
2276 template<typename Iterator, typename Compare>
2277 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
2278 {
2279  if(beg < end)
2280  {
2281  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
2282  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
2283  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
2284  }
2285 }
2286 
2287 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
2288 
2289 #endif // #ifndef VMA_SORT
2290 
2291 /*
2292 Returns true if two memory blocks occupy overlapping pages.
2293 ResourceA must be in less memory offset than ResourceB.
2294 
2295 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
2296 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
2297 */
2298 static inline bool VmaBlocksOnSamePage(
2299  VkDeviceSize resourceAOffset,
2300  VkDeviceSize resourceASize,
2301  VkDeviceSize resourceBOffset,
2302  VkDeviceSize pageSize)
2303 {
2304  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
2305  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
2306  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
2307  VkDeviceSize resourceBStart = resourceBOffset;
2308  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
2309  return resourceAEndPage == resourceBStartPage;
2310 }
2311 
2312 enum VmaSuballocationType
2313 {
2314  VMA_SUBALLOCATION_TYPE_FREE = 0,
2315  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
2316  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
2317  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
2318  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
2319  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
2320  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
2321 };
2322 
2323 /*
2324 Returns true if given suballocation types could conflict and must respect
2325 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
2326 or linear image and another one is optimal image. If type is unknown, behave
2327 conservatively.
2328 */
2329 static inline bool VmaIsBufferImageGranularityConflict(
2330  VmaSuballocationType suballocType1,
2331  VmaSuballocationType suballocType2)
2332 {
2333  if(suballocType1 > suballocType2)
2334  {
2335  VMA_SWAP(suballocType1, suballocType2);
2336  }
2337 
2338  switch(suballocType1)
2339  {
2340  case VMA_SUBALLOCATION_TYPE_FREE:
2341  return false;
2342  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
2343  return true;
2344  case VMA_SUBALLOCATION_TYPE_BUFFER:
2345  return
2346  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2347  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2348  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
2349  return
2350  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2351  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
2352  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2353  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
2354  return
2355  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2356  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
2357  return false;
2358  default:
2359  VMA_ASSERT(0);
2360  return true;
2361  }
2362 }
2363 
2364 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
2365 struct VmaMutexLock
2366 {
2367 public:
2368  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
2369  m_pMutex(useMutex ? &mutex : VMA_NULL)
2370  {
2371  if(m_pMutex)
2372  {
2373  m_pMutex->Lock();
2374  }
2375  }
2376 
2377  ~VmaMutexLock()
2378  {
2379  if(m_pMutex)
2380  {
2381  m_pMutex->Unlock();
2382  }
2383  }
2384 
2385 private:
2386  VMA_MUTEX* m_pMutex;
2387 };
2388 
2389 #if VMA_DEBUG_GLOBAL_MUTEX
2390  static VMA_MUTEX gDebugGlobalMutex;
2391  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
2392 #else
2393  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
2394 #endif
2395 
2396 // Minimum size of a free suballocation to register it in the free suballocation collection.
2397 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
2398 
2399 /*
2400 Performs binary search and returns iterator to first element that is greater or
2401 equal to (key), according to comparison (cmp).
2402 
2403 Cmp should return true if first argument is less than second argument.
2404 
2405 Returned value is the found element, if present in the collection or place where
2406 new element with value (key) should be inserted.
2407 */
2408 template <typename IterT, typename KeyT, typename CmpT>
2409 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
2410 {
2411  size_t down = 0, up = (end - beg);
2412  while(down < up)
2413  {
2414  const size_t mid = (down + up) / 2;
2415  if(cmp(*(beg+mid), key))
2416  {
2417  down = mid + 1;
2418  }
2419  else
2420  {
2421  up = mid;
2422  }
2423  }
2424  return beg + down;
2425 }
2426 
2428 // Memory allocation
2429 
2430 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
2431 {
2432  if((pAllocationCallbacks != VMA_NULL) &&
2433  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
2434  {
2435  return (*pAllocationCallbacks->pfnAllocation)(
2436  pAllocationCallbacks->pUserData,
2437  size,
2438  alignment,
2439  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2440  }
2441  else
2442  {
2443  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
2444  }
2445 }
2446 
2447 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
2448 {
2449  if((pAllocationCallbacks != VMA_NULL) &&
2450  (pAllocationCallbacks->pfnFree != VMA_NULL))
2451  {
2452  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
2453  }
2454  else
2455  {
2456  VMA_SYSTEM_FREE(ptr);
2457  }
2458 }
2459 
2460 template<typename T>
2461 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
2462 {
2463  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
2464 }
2465 
2466 template<typename T>
2467 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
2468 {
2469  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
2470 }
2471 
2472 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2473 
2474 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2475 
2476 template<typename T>
2477 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2478 {
2479  ptr->~T();
2480  VmaFree(pAllocationCallbacks, ptr);
2481 }
2482 
2483 template<typename T>
2484 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2485 {
2486  if(ptr != VMA_NULL)
2487  {
2488  for(size_t i = count; i--; )
2489  {
2490  ptr[i].~T();
2491  }
2492  VmaFree(pAllocationCallbacks, ptr);
2493  }
2494 }
2495 
2496 // STL-compatible allocator.
2497 template<typename T>
2498 class VmaStlAllocator
2499 {
2500 public:
2501  const VkAllocationCallbacks* const m_pCallbacks;
2502  typedef T value_type;
2503 
2504  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2505  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2506 
2507  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2508  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2509 
2510  template<typename U>
2511  bool operator==(const VmaStlAllocator<U>& rhs) const
2512  {
2513  return m_pCallbacks == rhs.m_pCallbacks;
2514  }
2515  template<typename U>
2516  bool operator!=(const VmaStlAllocator<U>& rhs) const
2517  {
2518  return m_pCallbacks != rhs.m_pCallbacks;
2519  }
2520 
2521  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2522 };
2523 
2524 #if VMA_USE_STL_VECTOR
2525 
2526 #define VmaVector std::vector
2527 
2528 template<typename T, typename allocatorT>
2529 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2530 {
2531  vec.insert(vec.begin() + index, item);
2532 }
2533 
2534 template<typename T, typename allocatorT>
2535 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2536 {
2537  vec.erase(vec.begin() + index);
2538 }
2539 
2540 #else // #if VMA_USE_STL_VECTOR
2541 
2542 /* Class with interface compatible with subset of std::vector.
2543 T must be POD because constructors and destructors are not called and memcpy is
2544 used for these objects. */
2545 template<typename T, typename AllocatorT>
2546 class VmaVector
2547 {
2548 public:
2549  typedef T value_type;
2550 
2551  VmaVector(const AllocatorT& allocator) :
2552  m_Allocator(allocator),
2553  m_pArray(VMA_NULL),
2554  m_Count(0),
2555  m_Capacity(0)
2556  {
2557  }
2558 
2559  VmaVector(size_t count, const AllocatorT& allocator) :
2560  m_Allocator(allocator),
2561  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2562  m_Count(count),
2563  m_Capacity(count)
2564  {
2565  }
2566 
2567  VmaVector(const VmaVector<T, AllocatorT>& src) :
2568  m_Allocator(src.m_Allocator),
2569  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2570  m_Count(src.m_Count),
2571  m_Capacity(src.m_Count)
2572  {
2573  if(m_Count != 0)
2574  {
2575  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2576  }
2577  }
2578 
2579  ~VmaVector()
2580  {
2581  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2582  }
2583 
2584  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2585  {
2586  if(&rhs != this)
2587  {
2588  resize(rhs.m_Count);
2589  if(m_Count != 0)
2590  {
2591  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2592  }
2593  }
2594  return *this;
2595  }
2596 
2597  bool empty() const { return m_Count == 0; }
2598  size_t size() const { return m_Count; }
2599  T* data() { return m_pArray; }
2600  const T* data() const { return m_pArray; }
2601 
2602  T& operator[](size_t index)
2603  {
2604  VMA_HEAVY_ASSERT(index < m_Count);
2605  return m_pArray[index];
2606  }
2607  const T& operator[](size_t index) const
2608  {
2609  VMA_HEAVY_ASSERT(index < m_Count);
2610  return m_pArray[index];
2611  }
2612 
2613  T& front()
2614  {
2615  VMA_HEAVY_ASSERT(m_Count > 0);
2616  return m_pArray[0];
2617  }
2618  const T& front() const
2619  {
2620  VMA_HEAVY_ASSERT(m_Count > 0);
2621  return m_pArray[0];
2622  }
2623  T& back()
2624  {
2625  VMA_HEAVY_ASSERT(m_Count > 0);
2626  return m_pArray[m_Count - 1];
2627  }
2628  const T& back() const
2629  {
2630  VMA_HEAVY_ASSERT(m_Count > 0);
2631  return m_pArray[m_Count - 1];
2632  }
2633 
2634  void reserve(size_t newCapacity, bool freeMemory = false)
2635  {
2636  newCapacity = VMA_MAX(newCapacity, m_Count);
2637 
2638  if((newCapacity < m_Capacity) && !freeMemory)
2639  {
2640  newCapacity = m_Capacity;
2641  }
2642 
2643  if(newCapacity != m_Capacity)
2644  {
2645  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2646  if(m_Count != 0)
2647  {
2648  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2649  }
2650  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2651  m_Capacity = newCapacity;
2652  m_pArray = newArray;
2653  }
2654  }
2655 
2656  void resize(size_t newCount, bool freeMemory = false)
2657  {
2658  size_t newCapacity = m_Capacity;
2659  if(newCount > m_Capacity)
2660  {
2661  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2662  }
2663  else if(freeMemory)
2664  {
2665  newCapacity = newCount;
2666  }
2667 
2668  if(newCapacity != m_Capacity)
2669  {
2670  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2671  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2672  if(elementsToCopy != 0)
2673  {
2674  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2675  }
2676  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2677  m_Capacity = newCapacity;
2678  m_pArray = newArray;
2679  }
2680 
2681  m_Count = newCount;
2682  }
2683 
2684  void clear(bool freeMemory = false)
2685  {
2686  resize(0, freeMemory);
2687  }
2688 
2689  void insert(size_t index, const T& src)
2690  {
2691  VMA_HEAVY_ASSERT(index <= m_Count);
2692  const size_t oldCount = size();
2693  resize(oldCount + 1);
2694  if(index < oldCount)
2695  {
2696  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2697  }
2698  m_pArray[index] = src;
2699  }
2700 
2701  void remove(size_t index)
2702  {
2703  VMA_HEAVY_ASSERT(index < m_Count);
2704  const size_t oldCount = size();
2705  if(index < oldCount - 1)
2706  {
2707  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2708  }
2709  resize(oldCount - 1);
2710  }
2711 
2712  void push_back(const T& src)
2713  {
2714  const size_t newIndex = size();
2715  resize(newIndex + 1);
2716  m_pArray[newIndex] = src;
2717  }
2718 
2719  void pop_back()
2720  {
2721  VMA_HEAVY_ASSERT(m_Count > 0);
2722  resize(size() - 1);
2723  }
2724 
2725  void push_front(const T& src)
2726  {
2727  insert(0, src);
2728  }
2729 
2730  void pop_front()
2731  {
2732  VMA_HEAVY_ASSERT(m_Count > 0);
2733  remove(0);
2734  }
2735 
2736  typedef T* iterator;
2737 
2738  iterator begin() { return m_pArray; }
2739  iterator end() { return m_pArray + m_Count; }
2740 
2741 private:
2742  AllocatorT m_Allocator;
2743  T* m_pArray;
2744  size_t m_Count;
2745  size_t m_Capacity;
2746 };
2747 
2748 template<typename T, typename allocatorT>
2749 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2750 {
2751  vec.insert(index, item);
2752 }
2753 
2754 template<typename T, typename allocatorT>
2755 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2756 {
2757  vec.remove(index);
2758 }
2759 
2760 #endif // #if VMA_USE_STL_VECTOR
2761 
2762 template<typename CmpLess, typename VectorT>
2763 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2764 {
2765  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2766  vector.data(),
2767  vector.data() + vector.size(),
2768  value,
2769  CmpLess()) - vector.data();
2770  VmaVectorInsert(vector, indexToInsert, value);
2771  return indexToInsert;
2772 }
2773 
2774 template<typename CmpLess, typename VectorT>
2775 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2776 {
2777  CmpLess comparator;
2778  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2779  vector.begin(),
2780  vector.end(),
2781  value,
2782  comparator);
2783  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2784  {
2785  size_t indexToRemove = it - vector.begin();
2786  VmaVectorRemove(vector, indexToRemove);
2787  return true;
2788  }
2789  return false;
2790 }
2791 
2792 template<typename CmpLess, typename VectorT>
2793 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2794 {
2795  CmpLess comparator;
2796  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2797  vector.data(),
2798  vector.data() + vector.size(),
2799  value,
2800  comparator);
2801  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2802  {
2803  return it - vector.begin();
2804  }
2805  else
2806  {
2807  return vector.size();
2808  }
2809 }
2810 
2812 // class VmaPoolAllocator
2813 
2814 /*
2815 Allocator for objects of type T using a list of arrays (pools) to speed up
2816 allocation. Number of elements that can be allocated is not bounded because
2817 allocator can create multiple blocks.
2818 */
2819 template<typename T>
2820 class VmaPoolAllocator
2821 {
2822 public:
2823  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2824  ~VmaPoolAllocator();
2825  void Clear();
2826  T* Alloc();
2827  void Free(T* ptr);
2828 
2829 private:
2830  union Item
2831  {
2832  uint32_t NextFreeIndex;
2833  T Value;
2834  };
2835 
2836  struct ItemBlock
2837  {
2838  Item* pItems;
2839  uint32_t FirstFreeIndex;
2840  };
2841 
2842  const VkAllocationCallbacks* m_pAllocationCallbacks;
2843  size_t m_ItemsPerBlock;
2844  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2845 
2846  ItemBlock& CreateNewBlock();
2847 };
2848 
2849 template<typename T>
2850 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2851  m_pAllocationCallbacks(pAllocationCallbacks),
2852  m_ItemsPerBlock(itemsPerBlock),
2853  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2854 {
2855  VMA_ASSERT(itemsPerBlock > 0);
2856 }
2857 
2858 template<typename T>
2859 VmaPoolAllocator<T>::~VmaPoolAllocator()
2860 {
2861  Clear();
2862 }
2863 
2864 template<typename T>
2865 void VmaPoolAllocator<T>::Clear()
2866 {
2867  for(size_t i = m_ItemBlocks.size(); i--; )
2868  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2869  m_ItemBlocks.clear();
2870 }
2871 
2872 template<typename T>
2873 T* VmaPoolAllocator<T>::Alloc()
2874 {
2875  for(size_t i = m_ItemBlocks.size(); i--; )
2876  {
2877  ItemBlock& block = m_ItemBlocks[i];
2878  // This block has some free items: Use first one.
2879  if(block.FirstFreeIndex != UINT32_MAX)
2880  {
2881  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2882  block.FirstFreeIndex = pItem->NextFreeIndex;
2883  return &pItem->Value;
2884  }
2885  }
2886 
2887  // No block has free item: Create new one and use it.
2888  ItemBlock& newBlock = CreateNewBlock();
2889  Item* const pItem = &newBlock.pItems[0];
2890  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2891  return &pItem->Value;
2892 }
2893 
2894 template<typename T>
2895 void VmaPoolAllocator<T>::Free(T* ptr)
2896 {
2897  // Search all memory blocks to find ptr.
2898  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2899  {
2900  ItemBlock& block = m_ItemBlocks[i];
2901 
2902  // Casting to union.
2903  Item* pItemPtr;
2904  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2905 
2906  // Check if pItemPtr is in address range of this block.
2907  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2908  {
2909  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2910  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2911  block.FirstFreeIndex = index;
2912  return;
2913  }
2914  }
2915  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2916 }
2917 
2918 template<typename T>
2919 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2920 {
2921  ItemBlock newBlock = {
2922  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2923 
2924  m_ItemBlocks.push_back(newBlock);
2925 
2926  // Setup singly-linked list of all free items in this block.
2927  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2928  newBlock.pItems[i].NextFreeIndex = i + 1;
2929  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2930  return m_ItemBlocks.back();
2931 }
2932 
2934 // class VmaRawList, VmaList
2935 
2936 #if VMA_USE_STL_LIST
2937 
2938 #define VmaList std::list
2939 
2940 #else // #if VMA_USE_STL_LIST
2941 
2942 template<typename T>
2943 struct VmaListItem
2944 {
2945  VmaListItem* pPrev;
2946  VmaListItem* pNext;
2947  T Value;
2948 };
2949 
2950 // Doubly linked list.
2951 template<typename T>
2952 class VmaRawList
2953 {
2954 public:
2955  typedef VmaListItem<T> ItemType;
2956 
2957  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2958  ~VmaRawList();
2959  void Clear();
2960 
2961  size_t GetCount() const { return m_Count; }
2962  bool IsEmpty() const { return m_Count == 0; }
2963 
2964  ItemType* Front() { return m_pFront; }
2965  const ItemType* Front() const { return m_pFront; }
2966  ItemType* Back() { return m_pBack; }
2967  const ItemType* Back() const { return m_pBack; }
2968 
2969  ItemType* PushBack();
2970  ItemType* PushFront();
2971  ItemType* PushBack(const T& value);
2972  ItemType* PushFront(const T& value);
2973  void PopBack();
2974  void PopFront();
2975 
2976  // Item can be null - it means PushBack.
2977  ItemType* InsertBefore(ItemType* pItem);
2978  // Item can be null - it means PushFront.
2979  ItemType* InsertAfter(ItemType* pItem);
2980 
2981  ItemType* InsertBefore(ItemType* pItem, const T& value);
2982  ItemType* InsertAfter(ItemType* pItem, const T& value);
2983 
2984  void Remove(ItemType* pItem);
2985 
2986 private:
2987  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2988  VmaPoolAllocator<ItemType> m_ItemAllocator;
2989  ItemType* m_pFront;
2990  ItemType* m_pBack;
2991  size_t m_Count;
2992 
2993  // Declared not defined, to block copy constructor and assignment operator.
2994  VmaRawList(const VmaRawList<T>& src);
2995  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2996 };
2997 
2998 template<typename T>
2999 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3000  m_pAllocationCallbacks(pAllocationCallbacks),
3001  m_ItemAllocator(pAllocationCallbacks, 128),
3002  m_pFront(VMA_NULL),
3003  m_pBack(VMA_NULL),
3004  m_Count(0)
3005 {
3006 }
3007 
3008 template<typename T>
3009 VmaRawList<T>::~VmaRawList()
3010 {
3011  // Intentionally not calling Clear, because that would be unnecessary
3012  // computations to return all items to m_ItemAllocator as free.
3013 }
3014 
3015 template<typename T>
3016 void VmaRawList<T>::Clear()
3017 {
3018  if(IsEmpty() == false)
3019  {
3020  ItemType* pItem = m_pBack;
3021  while(pItem != VMA_NULL)
3022  {
3023  ItemType* const pPrevItem = pItem->pPrev;
3024  m_ItemAllocator.Free(pItem);
3025  pItem = pPrevItem;
3026  }
3027  m_pFront = VMA_NULL;
3028  m_pBack = VMA_NULL;
3029  m_Count = 0;
3030  }
3031 }
3032 
3033 template<typename T>
3034 VmaListItem<T>* VmaRawList<T>::PushBack()
3035 {
3036  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3037  pNewItem->pNext = VMA_NULL;
3038  if(IsEmpty())
3039  {
3040  pNewItem->pPrev = VMA_NULL;
3041  m_pFront = pNewItem;
3042  m_pBack = pNewItem;
3043  m_Count = 1;
3044  }
3045  else
3046  {
3047  pNewItem->pPrev = m_pBack;
3048  m_pBack->pNext = pNewItem;
3049  m_pBack = pNewItem;
3050  ++m_Count;
3051  }
3052  return pNewItem;
3053 }
3054 
3055 template<typename T>
3056 VmaListItem<T>* VmaRawList<T>::PushFront()
3057 {
3058  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3059  pNewItem->pPrev = VMA_NULL;
3060  if(IsEmpty())
3061  {
3062  pNewItem->pNext = VMA_NULL;
3063  m_pFront = pNewItem;
3064  m_pBack = pNewItem;
3065  m_Count = 1;
3066  }
3067  else
3068  {
3069  pNewItem->pNext = m_pFront;
3070  m_pFront->pPrev = pNewItem;
3071  m_pFront = pNewItem;
3072  ++m_Count;
3073  }
3074  return pNewItem;
3075 }
3076 
3077 template<typename T>
3078 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3079 {
3080  ItemType* const pNewItem = PushBack();
3081  pNewItem->Value = value;
3082  return pNewItem;
3083 }
3084 
3085 template<typename T>
3086 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3087 {
3088  ItemType* const pNewItem = PushFront();
3089  pNewItem->Value = value;
3090  return pNewItem;
3091 }
3092 
3093 template<typename T>
3094 void VmaRawList<T>::PopBack()
3095 {
3096  VMA_HEAVY_ASSERT(m_Count > 0);
3097  ItemType* const pBackItem = m_pBack;
3098  ItemType* const pPrevItem = pBackItem->pPrev;
3099  if(pPrevItem != VMA_NULL)
3100  {
3101  pPrevItem->pNext = VMA_NULL;
3102  }
3103  m_pBack = pPrevItem;
3104  m_ItemAllocator.Free(pBackItem);
3105  --m_Count;
3106 }
3107 
3108 template<typename T>
3109 void VmaRawList<T>::PopFront()
3110 {
3111  VMA_HEAVY_ASSERT(m_Count > 0);
3112  ItemType* const pFrontItem = m_pFront;
3113  ItemType* const pNextItem = pFrontItem->pNext;
3114  if(pNextItem != VMA_NULL)
3115  {
3116  pNextItem->pPrev = VMA_NULL;
3117  }
3118  m_pFront = pNextItem;
3119  m_ItemAllocator.Free(pFrontItem);
3120  --m_Count;
3121 }
3122 
3123 template<typename T>
3124 void VmaRawList<T>::Remove(ItemType* pItem)
3125 {
3126  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
3127  VMA_HEAVY_ASSERT(m_Count > 0);
3128 
3129  if(pItem->pPrev != VMA_NULL)
3130  {
3131  pItem->pPrev->pNext = pItem->pNext;
3132  }
3133  else
3134  {
3135  VMA_HEAVY_ASSERT(m_pFront == pItem);
3136  m_pFront = pItem->pNext;
3137  }
3138 
3139  if(pItem->pNext != VMA_NULL)
3140  {
3141  pItem->pNext->pPrev = pItem->pPrev;
3142  }
3143  else
3144  {
3145  VMA_HEAVY_ASSERT(m_pBack == pItem);
3146  m_pBack = pItem->pPrev;
3147  }
3148 
3149  m_ItemAllocator.Free(pItem);
3150  --m_Count;
3151 }
3152 
3153 template<typename T>
3154 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
3155 {
3156  if(pItem != VMA_NULL)
3157  {
3158  ItemType* const prevItem = pItem->pPrev;
3159  ItemType* const newItem = m_ItemAllocator.Alloc();
3160  newItem->pPrev = prevItem;
3161  newItem->pNext = pItem;
3162  pItem->pPrev = newItem;
3163  if(prevItem != VMA_NULL)
3164  {
3165  prevItem->pNext = newItem;
3166  }
3167  else
3168  {
3169  VMA_HEAVY_ASSERT(m_pFront == pItem);
3170  m_pFront = newItem;
3171  }
3172  ++m_Count;
3173  return newItem;
3174  }
3175  else
3176  return PushBack();
3177 }
3178 
3179 template<typename T>
3180 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3181 {
3182  if(pItem != VMA_NULL)
3183  {
3184  ItemType* const nextItem = pItem->pNext;
3185  ItemType* const newItem = m_ItemAllocator.Alloc();
3186  newItem->pNext = nextItem;
3187  newItem->pPrev = pItem;
3188  pItem->pNext = newItem;
3189  if(nextItem != VMA_NULL)
3190  {
3191  nextItem->pPrev = newItem;
3192  }
3193  else
3194  {
3195  VMA_HEAVY_ASSERT(m_pBack == pItem);
3196  m_pBack = newItem;
3197  }
3198  ++m_Count;
3199  return newItem;
3200  }
3201  else
3202  return PushFront();
3203 }
3204 
3205 template<typename T>
3206 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
3207 {
3208  ItemType* const newItem = InsertBefore(pItem);
3209  newItem->Value = value;
3210  return newItem;
3211 }
3212 
3213 template<typename T>
3214 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
3215 {
3216  ItemType* const newItem = InsertAfter(pItem);
3217  newItem->Value = value;
3218  return newItem;
3219 }
3220 
3221 template<typename T, typename AllocatorT>
3222 class VmaList
3223 {
3224 public:
3225  class iterator
3226  {
3227  public:
3228  iterator() :
3229  m_pList(VMA_NULL),
3230  m_pItem(VMA_NULL)
3231  {
3232  }
3233 
3234  T& operator*() const
3235  {
3236  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3237  return m_pItem->Value;
3238  }
3239  T* operator->() const
3240  {
3241  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3242  return &m_pItem->Value;
3243  }
3244 
3245  iterator& operator++()
3246  {
3247  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3248  m_pItem = m_pItem->pNext;
3249  return *this;
3250  }
3251  iterator& operator--()
3252  {
3253  if(m_pItem != VMA_NULL)
3254  {
3255  m_pItem = m_pItem->pPrev;
3256  }
3257  else
3258  {
3259  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3260  m_pItem = m_pList->Back();
3261  }
3262  return *this;
3263  }
3264 
3265  iterator operator++(int)
3266  {
3267  iterator result = *this;
3268  ++*this;
3269  return result;
3270  }
3271  iterator operator--(int)
3272  {
3273  iterator result = *this;
3274  --*this;
3275  return result;
3276  }
3277 
3278  bool operator==(const iterator& rhs) const
3279  {
3280  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3281  return m_pItem == rhs.m_pItem;
3282  }
3283  bool operator!=(const iterator& rhs) const
3284  {
3285  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3286  return m_pItem != rhs.m_pItem;
3287  }
3288 
3289  private:
3290  VmaRawList<T>* m_pList;
3291  VmaListItem<T>* m_pItem;
3292 
3293  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
3294  m_pList(pList),
3295  m_pItem(pItem)
3296  {
3297  }
3298 
3299  friend class VmaList<T, AllocatorT>;
3300  };
3301 
3302  class const_iterator
3303  {
3304  public:
3305  const_iterator() :
3306  m_pList(VMA_NULL),
3307  m_pItem(VMA_NULL)
3308  {
3309  }
3310 
3311  const_iterator(const iterator& src) :
3312  m_pList(src.m_pList),
3313  m_pItem(src.m_pItem)
3314  {
3315  }
3316 
3317  const T& operator*() const
3318  {
3319  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3320  return m_pItem->Value;
3321  }
3322  const T* operator->() const
3323  {
3324  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3325  return &m_pItem->Value;
3326  }
3327 
3328  const_iterator& operator++()
3329  {
3330  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3331  m_pItem = m_pItem->pNext;
3332  return *this;
3333  }
3334  const_iterator& operator--()
3335  {
3336  if(m_pItem != VMA_NULL)
3337  {
3338  m_pItem = m_pItem->pPrev;
3339  }
3340  else
3341  {
3342  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3343  m_pItem = m_pList->Back();
3344  }
3345  return *this;
3346  }
3347 
3348  const_iterator operator++(int)
3349  {
3350  const_iterator result = *this;
3351  ++*this;
3352  return result;
3353  }
3354  const_iterator operator--(int)
3355  {
3356  const_iterator result = *this;
3357  --*this;
3358  return result;
3359  }
3360 
3361  bool operator==(const const_iterator& rhs) const
3362  {
3363  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3364  return m_pItem == rhs.m_pItem;
3365  }
3366  bool operator!=(const const_iterator& rhs) const
3367  {
3368  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3369  return m_pItem != rhs.m_pItem;
3370  }
3371 
3372  private:
3373  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
3374  m_pList(pList),
3375  m_pItem(pItem)
3376  {
3377  }
3378 
3379  const VmaRawList<T>* m_pList;
3380  const VmaListItem<T>* m_pItem;
3381 
3382  friend class VmaList<T, AllocatorT>;
3383  };
3384 
3385  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
3386 
3387  bool empty() const { return m_RawList.IsEmpty(); }
3388  size_t size() const { return m_RawList.GetCount(); }
3389 
3390  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
3391  iterator end() { return iterator(&m_RawList, VMA_NULL); }
3392 
3393  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
3394  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
3395 
3396  void clear() { m_RawList.Clear(); }
3397  void push_back(const T& value) { m_RawList.PushBack(value); }
3398  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
3399  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
3400 
3401 private:
3402  VmaRawList<T> m_RawList;
3403 };
3404 
3405 #endif // #if VMA_USE_STL_LIST
3406 
3408 // class VmaMap
3409 
3410 // Unused in this version.
3411 #if 0
3412 
3413 #if VMA_USE_STL_UNORDERED_MAP
3414 
3415 #define VmaPair std::pair
3416 
3417 #define VMA_MAP_TYPE(KeyT, ValueT) \
3418  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
3419 
3420 #else // #if VMA_USE_STL_UNORDERED_MAP
3421 
3422 template<typename T1, typename T2>
3423 struct VmaPair
3424 {
3425  T1 first;
3426  T2 second;
3427 
3428  VmaPair() : first(), second() { }
3429  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
3430 };
3431 
3432 /* Class compatible with subset of interface of std::unordered_map.
3433 KeyT, ValueT must be POD because they will be stored in VmaVector.
3434 */
3435 template<typename KeyT, typename ValueT>
3436 class VmaMap
3437 {
3438 public:
3439  typedef VmaPair<KeyT, ValueT> PairType;
3440  typedef PairType* iterator;
3441 
3442  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
3443 
3444  iterator begin() { return m_Vector.begin(); }
3445  iterator end() { return m_Vector.end(); }
3446 
3447  void insert(const PairType& pair);
3448  iterator find(const KeyT& key);
3449  void erase(iterator it);
3450 
3451 private:
3452  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
3453 };
3454 
3455 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
3456 
3457 template<typename FirstT, typename SecondT>
3458 struct VmaPairFirstLess
3459 {
3460  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
3461  {
3462  return lhs.first < rhs.first;
3463  }
3464  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
3465  {
3466  return lhs.first < rhsFirst;
3467  }
3468 };
3469 
3470 template<typename KeyT, typename ValueT>
3471 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
3472 {
3473  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3474  m_Vector.data(),
3475  m_Vector.data() + m_Vector.size(),
3476  pair,
3477  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3478  VmaVectorInsert(m_Vector, indexToInsert, pair);
3479 }
3480 
3481 template<typename KeyT, typename ValueT>
3482 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3483 {
3484  PairType* it = VmaBinaryFindFirstNotLess(
3485  m_Vector.data(),
3486  m_Vector.data() + m_Vector.size(),
3487  key,
3488  VmaPairFirstLess<KeyT, ValueT>());
3489  if((it != m_Vector.end()) && (it->first == key))
3490  {
3491  return it;
3492  }
3493  else
3494  {
3495  return m_Vector.end();
3496  }
3497 }
3498 
3499 template<typename KeyT, typename ValueT>
3500 void VmaMap<KeyT, ValueT>::erase(iterator it)
3501 {
3502  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3503 }
3504 
3505 #endif // #if VMA_USE_STL_UNORDERED_MAP
3506 
3507 #endif // #if 0
3508 
3510 
3511 class VmaDeviceMemoryBlock;
3512 
3513 struct VmaAllocation_T
3514 {
3515 private:
3516  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3517 
3518  enum FLAGS
3519  {
3520  FLAG_USER_DATA_STRING = 0x01,
3521  };
3522 
3523 public:
3524  enum ALLOCATION_TYPE
3525  {
3526  ALLOCATION_TYPE_NONE,
3527  ALLOCATION_TYPE_BLOCK,
3528  ALLOCATION_TYPE_DEDICATED,
3529  };
3530 
3531  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3532  m_Alignment(1),
3533  m_Size(0),
3534  m_pUserData(VMA_NULL),
3535  m_LastUseFrameIndex(currentFrameIndex),
3536  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3537  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3538  m_MapCount(0),
3539  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3540  {
3541  }
3542 
3543  ~VmaAllocation_T()
3544  {
3545  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3546 
3547  // Check if owned string was freed.
3548  VMA_ASSERT(m_pUserData == VMA_NULL);
3549  }
3550 
3551  void InitBlockAllocation(
3552  VmaPool hPool,
3553  VmaDeviceMemoryBlock* block,
3554  VkDeviceSize offset,
3555  VkDeviceSize alignment,
3556  VkDeviceSize size,
3557  VmaSuballocationType suballocationType,
3558  bool mapped,
3559  bool canBecomeLost)
3560  {
3561  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3562  VMA_ASSERT(block != VMA_NULL);
3563  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3564  m_Alignment = alignment;
3565  m_Size = size;
3566  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3567  m_SuballocationType = (uint8_t)suballocationType;
3568  m_BlockAllocation.m_hPool = hPool;
3569  m_BlockAllocation.m_Block = block;
3570  m_BlockAllocation.m_Offset = offset;
3571  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3572  }
3573 
3574  void InitLost()
3575  {
3576  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3577  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3578  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3579  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3580  m_BlockAllocation.m_Block = VMA_NULL;
3581  m_BlockAllocation.m_Offset = 0;
3582  m_BlockAllocation.m_CanBecomeLost = true;
3583  }
3584 
3585  void ChangeBlockAllocation(
3586  VmaAllocator hAllocator,
3587  VmaDeviceMemoryBlock* block,
3588  VkDeviceSize offset);
3589 
3590  // pMappedData not null means allocation is created with MAPPED flag.
3591  void InitDedicatedAllocation(
3592  uint32_t memoryTypeIndex,
3593  VkDeviceMemory hMemory,
3594  VmaSuballocationType suballocationType,
3595  void* pMappedData,
3596  VkDeviceSize size)
3597  {
3598  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3599  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3600  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3601  m_Alignment = 0;
3602  m_Size = size;
3603  m_SuballocationType = (uint8_t)suballocationType;
3604  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3605  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3606  m_DedicatedAllocation.m_hMemory = hMemory;
3607  m_DedicatedAllocation.m_pMappedData = pMappedData;
3608  }
3609 
3610  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3611  VkDeviceSize GetAlignment() const { return m_Alignment; }
3612  VkDeviceSize GetSize() const { return m_Size; }
3613  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3614  void* GetUserData() const { return m_pUserData; }
3615  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3616  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3617 
3618  VmaDeviceMemoryBlock* GetBlock() const
3619  {
3620  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3621  return m_BlockAllocation.m_Block;
3622  }
3623  VkDeviceSize GetOffset() const;
3624  VkDeviceMemory GetMemory() const;
3625  uint32_t GetMemoryTypeIndex() const;
3626  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3627  void* GetMappedData() const;
3628  bool CanBecomeLost() const;
3629  VmaPool GetPool() const;
3630 
3631  uint32_t GetLastUseFrameIndex() const
3632  {
3633  return m_LastUseFrameIndex.load();
3634  }
3635  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3636  {
3637  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3638  }
3639  /*
3640  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3641  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3642  - Else, returns false.
3643 
3644  If hAllocation is already lost, assert - you should not call it then.
3645  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3646  */
3647  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3648 
3649  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3650  {
3651  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3652  outInfo.blockCount = 1;
3653  outInfo.allocationCount = 1;
3654  outInfo.unusedRangeCount = 0;
3655  outInfo.usedBytes = m_Size;
3656  outInfo.unusedBytes = 0;
3657  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3658  outInfo.unusedRangeSizeMin = UINT64_MAX;
3659  outInfo.unusedRangeSizeMax = 0;
3660  }
3661 
3662  void BlockAllocMap();
3663  void BlockAllocUnmap();
3664  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3665  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3666 
3667 private:
3668  VkDeviceSize m_Alignment;
3669  VkDeviceSize m_Size;
3670  void* m_pUserData;
3671  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3672  uint8_t m_Type; // ALLOCATION_TYPE
3673  uint8_t m_SuballocationType; // VmaSuballocationType
3674  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3675  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
3676  uint8_t m_MapCount;
3677  uint8_t m_Flags; // enum FLAGS
3678 
3679  // Allocation out of VmaDeviceMemoryBlock.
3680  struct BlockAllocation
3681  {
3682  VmaPool m_hPool; // Null if belongs to general memory.
3683  VmaDeviceMemoryBlock* m_Block;
3684  VkDeviceSize m_Offset;
3685  bool m_CanBecomeLost;
3686  };
3687 
3688  // Allocation for an object that has its own private VkDeviceMemory.
3689  struct DedicatedAllocation
3690  {
3691  uint32_t m_MemoryTypeIndex;
3692  VkDeviceMemory m_hMemory;
3693  void* m_pMappedData; // Not null means memory is mapped.
3694  };
3695 
3696  union
3697  {
3698  // Allocation out of VmaDeviceMemoryBlock.
3699  BlockAllocation m_BlockAllocation;
3700  // Allocation for an object that has its own private VkDeviceMemory.
3701  DedicatedAllocation m_DedicatedAllocation;
3702  };
3703 
3704  void FreeUserDataString(VmaAllocator hAllocator);
3705 };
3706 
3707 /*
3708 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3709 allocated memory block or free.
3710 */
3711 struct VmaSuballocation
3712 {
3713  VkDeviceSize offset;
3714  VkDeviceSize size;
3715  VmaAllocation hAllocation;
3716  VmaSuballocationType type;
3717 };
3718 
3719 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3720 
3721 // Cost of one additional allocation lost, as equivalent in bytes.
3722 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3723 
3724 /*
3725 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3726 
3727 If canMakeOtherLost was false:
3728 - item points to a FREE suballocation.
3729 - itemsToMakeLostCount is 0.
3730 
3731 If canMakeOtherLost was true:
3732 - item points to first of sequence of suballocations, which are either FREE,
3733  or point to VmaAllocations that can become lost.
3734 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3735  the requested allocation to succeed.
3736 */
3737 struct VmaAllocationRequest
3738 {
3739  VkDeviceSize offset;
3740  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3741  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3742  VmaSuballocationList::iterator item;
3743  size_t itemsToMakeLostCount;
3744 
3745  VkDeviceSize CalcCost() const
3746  {
3747  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3748  }
3749 };
3750 
3751 /*
3752 Data structure used for bookkeeping of allocations and unused ranges of memory
3753 in a single VkDeviceMemory block.
3754 */
3755 class VmaBlockMetadata
3756 {
3757 public:
3758  VmaBlockMetadata(VmaAllocator hAllocator);
3759  ~VmaBlockMetadata();
3760  void Init(VkDeviceSize size);
3761 
3762  // Validates all data structures inside this object. If not valid, returns false.
3763  bool Validate() const;
3764  VkDeviceSize GetSize() const { return m_Size; }
3765  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3766  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3767  VkDeviceSize GetUnusedRangeSizeMax() const;
3768  // Returns true if this block is empty - contains only single free suballocation.
3769  bool IsEmpty() const;
3770 
3771  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3772  void AddPoolStats(VmaPoolStats& inoutStats) const;
3773 
3774 #if VMA_STATS_STRING_ENABLED
3775  void PrintDetailedMap(class VmaJsonWriter& json) const;
3776 #endif
3777 
3778  // Creates trivial request for case when block is empty.
3779  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3780 
3781  // Tries to find a place for suballocation with given parameters inside this block.
3782  // If succeeded, fills pAllocationRequest and returns true.
3783  // If failed, returns false.
3784  bool CreateAllocationRequest(
3785  uint32_t currentFrameIndex,
3786  uint32_t frameInUseCount,
3787  VkDeviceSize bufferImageGranularity,
3788  VkDeviceSize allocSize,
3789  VkDeviceSize allocAlignment,
3790  VmaSuballocationType allocType,
3791  bool canMakeOtherLost,
3792  VmaAllocationRequest* pAllocationRequest);
3793 
3794  bool MakeRequestedAllocationsLost(
3795  uint32_t currentFrameIndex,
3796  uint32_t frameInUseCount,
3797  VmaAllocationRequest* pAllocationRequest);
3798 
3799  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3800 
3801  // Makes actual allocation based on request. Request must already be checked and valid.
3802  void Alloc(
3803  const VmaAllocationRequest& request,
3804  VmaSuballocationType type,
3805  VkDeviceSize allocSize,
3806  VmaAllocation hAllocation);
3807 
3808  // Frees suballocation assigned to given memory region.
3809  void Free(const VmaAllocation allocation);
3810  void FreeAtOffset(VkDeviceSize offset);
3811 
3812 private:
3813  VkDeviceSize m_Size;
3814  uint32_t m_FreeCount;
3815  VkDeviceSize m_SumFreeSize;
3816  VmaSuballocationList m_Suballocations;
3817  // Suballocations that are free and have size greater than certain threshold.
3818  // Sorted by size, ascending.
3819  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3820 
3821  bool ValidateFreeSuballocationList() const;
3822 
3823  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3824  // If yes, fills pOffset and returns true. If no, returns false.
3825  bool CheckAllocation(
3826  uint32_t currentFrameIndex,
3827  uint32_t frameInUseCount,
3828  VkDeviceSize bufferImageGranularity,
3829  VkDeviceSize allocSize,
3830  VkDeviceSize allocAlignment,
3831  VmaSuballocationType allocType,
3832  VmaSuballocationList::const_iterator suballocItem,
3833  bool canMakeOtherLost,
3834  VkDeviceSize* pOffset,
3835  size_t* itemsToMakeLostCount,
3836  VkDeviceSize* pSumFreeSize,
3837  VkDeviceSize* pSumItemSize) const;
3838  // Given free suballocation, it merges it with following one, which must also be free.
3839  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3840  // Releases given suballocation, making it free.
3841  // Merges it with adjacent free suballocations if applicable.
3842  // Returns iterator to new free suballocation at this place.
3843  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3844  // Given free suballocation, it inserts it into sorted list of
3845  // m_FreeSuballocationsBySize if it's suitable.
3846  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3847  // Given free suballocation, it removes it from sorted list of
3848  // m_FreeSuballocationsBySize if it's suitable.
3849  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3850 };
3851 
3852 // Helper class that represents mapped memory. Synchronized internally.
3853 class VmaDeviceMemoryMapping
3854 {
3855 public:
3856  VmaDeviceMemoryMapping();
3857  ~VmaDeviceMemoryMapping();
3858 
3859  void* GetMappedData() const { return m_pMappedData; }
3860 
3861  // ppData can be null.
3862  VkResult Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData);
3863  void Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count);
3864 
3865 private:
3866  VMA_MUTEX m_Mutex;
3867  uint32_t m_MapCount;
3868  void* m_pMappedData;
3869 };
3870 
3871 /*
3872 Represents a single block of device memory (`VkDeviceMemory`) with all the
3873 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
3874 
3875 Thread-safety: This class must be externally synchronized.
3876 */
3877 class VmaDeviceMemoryBlock
3878 {
3879 public:
3880  uint32_t m_MemoryTypeIndex;
3881  VkDeviceMemory m_hMemory;
3882  VmaDeviceMemoryMapping m_Mapping;
3883  VmaBlockMetadata m_Metadata;
3884 
3885  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3886 
3887  ~VmaDeviceMemoryBlock()
3888  {
3889  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3890  }
3891 
3892  // Always call after construction.
3893  void Init(
3894  uint32_t newMemoryTypeIndex,
3895  VkDeviceMemory newMemory,
3896  VkDeviceSize newSize);
3897  // Always call before destruction.
3898  void Destroy(VmaAllocator allocator);
3899 
3900  // Validates all data structures inside this object. If not valid, returns false.
3901  bool Validate() const;
3902 
3903  // ppData can be null.
3904  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
3905  void Unmap(VmaAllocator hAllocator, uint32_t count);
3906 };
3907 
3908 struct VmaPointerLess
3909 {
3910  bool operator()(const void* lhs, const void* rhs) const
3911  {
3912  return lhs < rhs;
3913  }
3914 };
3915 
3916 class VmaDefragmentator;
3917 
3918 /*
3919 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3920 Vulkan memory type.
3921 
3922 Synchronized internally with a mutex.
3923 */
3924 struct VmaBlockVector
3925 {
3926  VmaBlockVector(
3927  VmaAllocator hAllocator,
3928  uint32_t memoryTypeIndex,
3929  VkDeviceSize preferredBlockSize,
3930  size_t minBlockCount,
3931  size_t maxBlockCount,
3932  VkDeviceSize bufferImageGranularity,
3933  uint32_t frameInUseCount,
3934  bool isCustomPool);
3935  ~VmaBlockVector();
3936 
3937  VkResult CreateMinBlocks();
3938 
3939  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3940  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3941  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3942  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3943 
3944  void GetPoolStats(VmaPoolStats* pStats);
3945 
3946  bool IsEmpty() const { return m_Blocks.empty(); }
3947 
3948  VkResult Allocate(
3949  VmaPool hCurrentPool,
3950  uint32_t currentFrameIndex,
3951  const VkMemoryRequirements& vkMemReq,
3952  const VmaAllocationCreateInfo& createInfo,
3953  VmaSuballocationType suballocType,
3954  VmaAllocation* pAllocation);
3955 
3956  void Free(
3957  VmaAllocation hAllocation);
3958 
3959  // Adds statistics of this BlockVector to pStats.
3960  void AddStats(VmaStats* pStats);
3961 
3962 #if VMA_STATS_STRING_ENABLED
3963  void PrintDetailedMap(class VmaJsonWriter& json);
3964 #endif
3965 
3966  void MakePoolAllocationsLost(
3967  uint32_t currentFrameIndex,
3968  size_t* pLostAllocationCount);
3969 
3970  VmaDefragmentator* EnsureDefragmentator(
3971  VmaAllocator hAllocator,
3972  uint32_t currentFrameIndex);
3973 
3974  VkResult Defragment(
3975  VmaDefragmentationStats* pDefragmentationStats,
3976  VkDeviceSize& maxBytesToMove,
3977  uint32_t& maxAllocationsToMove);
3978 
3979  void DestroyDefragmentator();
3980 
3981 private:
3982  friend class VmaDefragmentator;
3983 
3984  const VmaAllocator m_hAllocator;
3985  const uint32_t m_MemoryTypeIndex;
3986  const VkDeviceSize m_PreferredBlockSize;
3987  const size_t m_MinBlockCount;
3988  const size_t m_MaxBlockCount;
3989  const VkDeviceSize m_BufferImageGranularity;
3990  const uint32_t m_FrameInUseCount;
3991  const bool m_IsCustomPool;
3992  VMA_MUTEX m_Mutex;
3993  // Incrementally sorted by sumFreeSize, ascending.
3994  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3995  /* There can be at most one allocation that is completely empty - a
3996  hysteresis to avoid pessimistic case of alternating creation and destruction
3997  of a VkDeviceMemory. */
3998  bool m_HasEmptyBlock;
3999  VmaDefragmentator* m_pDefragmentator;
4000 
4001  size_t CalcMaxBlockSize() const;
4002 
4003  // Finds and removes given block from vector.
4004  void Remove(VmaDeviceMemoryBlock* pBlock);
4005 
4006  // Performs single step in sorting m_Blocks. They may not be fully sorted
4007  // after this call.
4008  void IncrementallySortBlocks();
4009 
4010  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
4011 };
4012 
4013 struct VmaPool_T
4014 {
4015 public:
4016  VmaBlockVector m_BlockVector;
4017 
4018  // Takes ownership.
4019  VmaPool_T(
4020  VmaAllocator hAllocator,
4021  const VmaPoolCreateInfo& createInfo);
4022  ~VmaPool_T();
4023 
4024  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
4025 
4026 #if VMA_STATS_STRING_ENABLED
4027  //void PrintDetailedMap(class VmaStringBuilder& sb);
4028 #endif
4029 };
4030 
4031 class VmaDefragmentator
4032 {
4033  const VmaAllocator m_hAllocator;
4034  VmaBlockVector* const m_pBlockVector;
4035  uint32_t m_CurrentFrameIndex;
4036  VkDeviceSize m_BytesMoved;
4037  uint32_t m_AllocationsMoved;
4038 
4039  struct AllocationInfo
4040  {
4041  VmaAllocation m_hAllocation;
4042  VkBool32* m_pChanged;
4043 
4044  AllocationInfo() :
4045  m_hAllocation(VK_NULL_HANDLE),
4046  m_pChanged(VMA_NULL)
4047  {
4048  }
4049  };
4050 
4051  struct AllocationInfoSizeGreater
4052  {
4053  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
4054  {
4055  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
4056  }
4057  };
4058 
4059  // Used between AddAllocation and Defragment.
4060  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4061 
4062  struct BlockInfo
4063  {
4064  VmaDeviceMemoryBlock* m_pBlock;
4065  bool m_HasNonMovableAllocations;
4066  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4067 
4068  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
4069  m_pBlock(VMA_NULL),
4070  m_HasNonMovableAllocations(true),
4071  m_Allocations(pAllocationCallbacks),
4072  m_pMappedDataForDefragmentation(VMA_NULL)
4073  {
4074  }
4075 
4076  void CalcHasNonMovableAllocations()
4077  {
4078  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
4079  const size_t defragmentAllocCount = m_Allocations.size();
4080  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
4081  }
4082 
4083  void SortAllocationsBySizeDescecnding()
4084  {
4085  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
4086  }
4087 
4088  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
4089  void Unmap(VmaAllocator hAllocator);
4090 
4091  private:
4092  // Not null if mapped for defragmentation only, not originally mapped.
4093  void* m_pMappedDataForDefragmentation;
4094  };
4095 
4096  struct BlockPointerLess
4097  {
4098  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
4099  {
4100  return pLhsBlockInfo->m_pBlock < pRhsBlock;
4101  }
4102  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4103  {
4104  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
4105  }
4106  };
4107 
4108  // 1. Blocks with some non-movable allocations go first.
4109  // 2. Blocks with smaller sumFreeSize go first.
4110  struct BlockInfoCompareMoveDestination
4111  {
4112  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4113  {
4114  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
4115  {
4116  return true;
4117  }
4118  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
4119  {
4120  return false;
4121  }
4122  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
4123  {
4124  return true;
4125  }
4126  return false;
4127  }
4128  };
4129 
4130  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
4131  BlockInfoVector m_Blocks;
4132 
4133  VkResult DefragmentRound(
4134  VkDeviceSize maxBytesToMove,
4135  uint32_t maxAllocationsToMove);
4136 
4137  static bool MoveMakesSense(
4138  size_t dstBlockIndex, VkDeviceSize dstOffset,
4139  size_t srcBlockIndex, VkDeviceSize srcOffset);
4140 
4141 public:
4142  VmaDefragmentator(
4143  VmaAllocator hAllocator,
4144  VmaBlockVector* pBlockVector,
4145  uint32_t currentFrameIndex);
4146 
4147  ~VmaDefragmentator();
4148 
4149  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
4150  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
4151 
4152  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
4153 
4154  VkResult Defragment(
4155  VkDeviceSize maxBytesToMove,
4156  uint32_t maxAllocationsToMove);
4157 };
4158 
4159 // Main allocator object.
4160 struct VmaAllocator_T
4161 {
4162  bool m_UseMutex;
4163  bool m_UseKhrDedicatedAllocation;
4164  VkDevice m_hDevice;
4165  bool m_AllocationCallbacksSpecified;
4166  VkAllocationCallbacks m_AllocationCallbacks;
4167  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
4168 
4169  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
4170  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
4171  VMA_MUTEX m_HeapSizeLimitMutex;
4172 
4173  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
4174  VkPhysicalDeviceMemoryProperties m_MemProps;
4175 
4176  // Default pools.
4177  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
4178 
4179  // Each vector is sorted by memory (handle value).
4180  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
4181  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
4182  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
4183 
4184  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
4185  ~VmaAllocator_T();
4186 
4187  const VkAllocationCallbacks* GetAllocationCallbacks() const
4188  {
4189  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
4190  }
4191  const VmaVulkanFunctions& GetVulkanFunctions() const
4192  {
4193  return m_VulkanFunctions;
4194  }
4195 
4196  VkDeviceSize GetBufferImageGranularity() const
4197  {
4198  return VMA_MAX(
4199  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
4200  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
4201  }
4202 
4203  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
4204  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
4205 
4206  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
4207  {
4208  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
4209  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
4210  }
4211 
4212  void GetBufferMemoryRequirements(
4213  VkBuffer hBuffer,
4214  VkMemoryRequirements& memReq,
4215  bool& requiresDedicatedAllocation,
4216  bool& prefersDedicatedAllocation) const;
4217  void GetImageMemoryRequirements(
4218  VkImage hImage,
4219  VkMemoryRequirements& memReq,
4220  bool& requiresDedicatedAllocation,
4221  bool& prefersDedicatedAllocation) const;
4222 
4223  // Main allocation function.
4224  VkResult AllocateMemory(
4225  const VkMemoryRequirements& vkMemReq,
4226  bool requiresDedicatedAllocation,
4227  bool prefersDedicatedAllocation,
4228  VkBuffer dedicatedBuffer,
4229  VkImage dedicatedImage,
4230  const VmaAllocationCreateInfo& createInfo,
4231  VmaSuballocationType suballocType,
4232  VmaAllocation* pAllocation);
4233 
4234  // Main deallocation function.
4235  void FreeMemory(const VmaAllocation allocation);
4236 
4237  void CalculateStats(VmaStats* pStats);
4238 
4239 #if VMA_STATS_STRING_ENABLED
4240  void PrintDetailedMap(class VmaJsonWriter& json);
4241 #endif
4242 
4243  VkResult Defragment(
4244  VmaAllocation* pAllocations,
4245  size_t allocationCount,
4246  VkBool32* pAllocationsChanged,
4247  const VmaDefragmentationInfo* pDefragmentationInfo,
4248  VmaDefragmentationStats* pDefragmentationStats);
4249 
4250  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
4251  bool TouchAllocation(VmaAllocation hAllocation);
4252 
4253  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
4254  void DestroyPool(VmaPool pool);
4255  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
4256 
4257  void SetCurrentFrameIndex(uint32_t frameIndex);
4258 
4259  void MakePoolAllocationsLost(
4260  VmaPool hPool,
4261  size_t* pLostAllocationCount);
4262 
4263  void CreateLostAllocation(VmaAllocation* pAllocation);
4264 
4265  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
4266  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
4267 
4268  VkResult Map(VmaAllocation hAllocation, void** ppData);
4269  void Unmap(VmaAllocation hAllocation);
4270 
4271 private:
4272  VkDeviceSize m_PreferredLargeHeapBlockSize;
4273 
4274  VkPhysicalDevice m_PhysicalDevice;
4275  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
4276 
4277  VMA_MUTEX m_PoolsMutex;
4278  // Protected by m_PoolsMutex. Sorted by pointer value.
4279  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
4280 
4281  VmaVulkanFunctions m_VulkanFunctions;
4282 
4283  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
4284 
4285  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
4286 
4287  VkResult AllocateMemoryOfType(
4288  const VkMemoryRequirements& vkMemReq,
4289  bool dedicatedAllocation,
4290  VkBuffer dedicatedBuffer,
4291  VkImage dedicatedImage,
4292  const VmaAllocationCreateInfo& createInfo,
4293  uint32_t memTypeIndex,
4294  VmaSuballocationType suballocType,
4295  VmaAllocation* pAllocation);
4296 
4297  // Allocates and registers new VkDeviceMemory specifically for single allocation.
4298  VkResult AllocateDedicatedMemory(
4299  VkDeviceSize size,
4300  VmaSuballocationType suballocType,
4301  uint32_t memTypeIndex,
4302  bool map,
4303  bool isUserDataString,
4304  void* pUserData,
4305  VkBuffer dedicatedBuffer,
4306  VkImage dedicatedImage,
4307  VmaAllocation* pAllocation);
4308 
4309  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
4310  void FreeDedicatedMemory(VmaAllocation allocation);
4311 };
4312 
4314 // Memory allocation #2 after VmaAllocator_T definition
4315 
4316 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
4317 {
4318  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
4319 }
4320 
4321 static void VmaFree(VmaAllocator hAllocator, void* ptr)
4322 {
4323  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
4324 }
4325 
4326 template<typename T>
4327 static T* VmaAllocate(VmaAllocator hAllocator)
4328 {
4329  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
4330 }
4331 
4332 template<typename T>
4333 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
4334 {
4335  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
4336 }
4337 
4338 template<typename T>
4339 static void vma_delete(VmaAllocator hAllocator, T* ptr)
4340 {
4341  if(ptr != VMA_NULL)
4342  {
4343  ptr->~T();
4344  VmaFree(hAllocator, ptr);
4345  }
4346 }
4347 
4348 template<typename T>
4349 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
4350 {
4351  if(ptr != VMA_NULL)
4352  {
4353  for(size_t i = count; i--; )
4354  ptr[i].~T();
4355  VmaFree(hAllocator, ptr);
4356  }
4357 }
4358 
4360 // VmaStringBuilder
4361 
4362 #if VMA_STATS_STRING_ENABLED
4363 
4364 class VmaStringBuilder
4365 {
4366 public:
4367  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
4368  size_t GetLength() const { return m_Data.size(); }
4369  const char* GetData() const { return m_Data.data(); }
4370 
4371  void Add(char ch) { m_Data.push_back(ch); }
4372  void Add(const char* pStr);
4373  void AddNewLine() { Add('\n'); }
4374  void AddNumber(uint32_t num);
4375  void AddNumber(uint64_t num);
4376  void AddPointer(const void* ptr);
4377 
4378 private:
4379  VmaVector< char, VmaStlAllocator<char> > m_Data;
4380 };
4381 
4382 void VmaStringBuilder::Add(const char* pStr)
4383 {
4384  const size_t strLen = strlen(pStr);
4385  if(strLen > 0)
4386  {
4387  const size_t oldCount = m_Data.size();
4388  m_Data.resize(oldCount + strLen);
4389  memcpy(m_Data.data() + oldCount, pStr, strLen);
4390  }
4391 }
4392 
4393 void VmaStringBuilder::AddNumber(uint32_t num)
4394 {
4395  char buf[11];
4396  VmaUint32ToStr(buf, sizeof(buf), num);
4397  Add(buf);
4398 }
4399 
4400 void VmaStringBuilder::AddNumber(uint64_t num)
4401 {
4402  char buf[21];
4403  VmaUint64ToStr(buf, sizeof(buf), num);
4404  Add(buf);
4405 }
4406 
4407 void VmaStringBuilder::AddPointer(const void* ptr)
4408 {
4409  char buf[21];
4410  VmaPtrToStr(buf, sizeof(buf), ptr);
4411  Add(buf);
4412 }
4413 
4414 #endif // #if VMA_STATS_STRING_ENABLED
4415 
4417 // VmaJsonWriter
4418 
4419 #if VMA_STATS_STRING_ENABLED
4420 
4421 class VmaJsonWriter
4422 {
4423 public:
4424  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
4425  ~VmaJsonWriter();
4426 
4427  void BeginObject(bool singleLine = false);
4428  void EndObject();
4429 
4430  void BeginArray(bool singleLine = false);
4431  void EndArray();
4432 
4433  void WriteString(const char* pStr);
4434  void BeginString(const char* pStr = VMA_NULL);
4435  void ContinueString(const char* pStr);
4436  void ContinueString(uint32_t n);
4437  void ContinueString(uint64_t n);
4438  void ContinueString_Pointer(const void* ptr);
4439  void EndString(const char* pStr = VMA_NULL);
4440 
4441  void WriteNumber(uint32_t n);
4442  void WriteNumber(uint64_t n);
4443  void WriteBool(bool b);
4444  void WriteNull();
4445 
4446 private:
4447  static const char* const INDENT;
4448 
4449  enum COLLECTION_TYPE
4450  {
4451  COLLECTION_TYPE_OBJECT,
4452  COLLECTION_TYPE_ARRAY,
4453  };
4454  struct StackItem
4455  {
4456  COLLECTION_TYPE type;
4457  uint32_t valueCount;
4458  bool singleLineMode;
4459  };
4460 
4461  VmaStringBuilder& m_SB;
4462  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
4463  bool m_InsideString;
4464 
4465  void BeginValue(bool isString);
4466  void WriteIndent(bool oneLess = false);
4467 };
4468 
4469 const char* const VmaJsonWriter::INDENT = " ";
4470 
4471 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
4472  m_SB(sb),
4473  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4474  m_InsideString(false)
4475 {
4476 }
4477 
4478 VmaJsonWriter::~VmaJsonWriter()
4479 {
4480  VMA_ASSERT(!m_InsideString);
4481  VMA_ASSERT(m_Stack.empty());
4482 }
4483 
4484 void VmaJsonWriter::BeginObject(bool singleLine)
4485 {
4486  VMA_ASSERT(!m_InsideString);
4487 
4488  BeginValue(false);
4489  m_SB.Add('{');
4490 
4491  StackItem item;
4492  item.type = COLLECTION_TYPE_OBJECT;
4493  item.valueCount = 0;
4494  item.singleLineMode = singleLine;
4495  m_Stack.push_back(item);
4496 }
4497 
4498 void VmaJsonWriter::EndObject()
4499 {
4500  VMA_ASSERT(!m_InsideString);
4501 
4502  WriteIndent(true);
4503  m_SB.Add('}');
4504 
4505  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4506  m_Stack.pop_back();
4507 }
4508 
4509 void VmaJsonWriter::BeginArray(bool singleLine)
4510 {
4511  VMA_ASSERT(!m_InsideString);
4512 
4513  BeginValue(false);
4514  m_SB.Add('[');
4515 
4516  StackItem item;
4517  item.type = COLLECTION_TYPE_ARRAY;
4518  item.valueCount = 0;
4519  item.singleLineMode = singleLine;
4520  m_Stack.push_back(item);
4521 }
4522 
4523 void VmaJsonWriter::EndArray()
4524 {
4525  VMA_ASSERT(!m_InsideString);
4526 
4527  WriteIndent(true);
4528  m_SB.Add(']');
4529 
4530  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4531  m_Stack.pop_back();
4532 }
4533 
4534 void VmaJsonWriter::WriteString(const char* pStr)
4535 {
4536  BeginString(pStr);
4537  EndString();
4538 }
4539 
4540 void VmaJsonWriter::BeginString(const char* pStr)
4541 {
4542  VMA_ASSERT(!m_InsideString);
4543 
4544  BeginValue(true);
4545  m_SB.Add('"');
4546  m_InsideString = true;
4547  if(pStr != VMA_NULL && pStr[0] != '\0')
4548  {
4549  ContinueString(pStr);
4550  }
4551 }
4552 
4553 void VmaJsonWriter::ContinueString(const char* pStr)
4554 {
4555  VMA_ASSERT(m_InsideString);
4556 
4557  const size_t strLen = strlen(pStr);
4558  for(size_t i = 0; i < strLen; ++i)
4559  {
4560  char ch = pStr[i];
4561  if(ch == '\'')
4562  {
4563  m_SB.Add("\\\\");
4564  }
4565  else if(ch == '"')
4566  {
4567  m_SB.Add("\\\"");
4568  }
4569  else if(ch >= 32)
4570  {
4571  m_SB.Add(ch);
4572  }
4573  else switch(ch)
4574  {
4575  case '\b':
4576  m_SB.Add("\\b");
4577  break;
4578  case '\f':
4579  m_SB.Add("\\f");
4580  break;
4581  case '\n':
4582  m_SB.Add("\\n");
4583  break;
4584  case '\r':
4585  m_SB.Add("\\r");
4586  break;
4587  case '\t':
4588  m_SB.Add("\\t");
4589  break;
4590  default:
4591  VMA_ASSERT(0 && "Character not currently supported.");
4592  break;
4593  }
4594  }
4595 }
4596 
4597 void VmaJsonWriter::ContinueString(uint32_t n)
4598 {
4599  VMA_ASSERT(m_InsideString);
4600  m_SB.AddNumber(n);
4601 }
4602 
4603 void VmaJsonWriter::ContinueString(uint64_t n)
4604 {
4605  VMA_ASSERT(m_InsideString);
4606  m_SB.AddNumber(n);
4607 }
4608 
4609 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4610 {
4611  VMA_ASSERT(m_InsideString);
4612  m_SB.AddPointer(ptr);
4613 }
4614 
4615 void VmaJsonWriter::EndString(const char* pStr)
4616 {
4617  VMA_ASSERT(m_InsideString);
4618  if(pStr != VMA_NULL && pStr[0] != '\0')
4619  {
4620  ContinueString(pStr);
4621  }
4622  m_SB.Add('"');
4623  m_InsideString = false;
4624 }
4625 
4626 void VmaJsonWriter::WriteNumber(uint32_t n)
4627 {
4628  VMA_ASSERT(!m_InsideString);
4629  BeginValue(false);
4630  m_SB.AddNumber(n);
4631 }
4632 
4633 void VmaJsonWriter::WriteNumber(uint64_t n)
4634 {
4635  VMA_ASSERT(!m_InsideString);
4636  BeginValue(false);
4637  m_SB.AddNumber(n);
4638 }
4639 
4640 void VmaJsonWriter::WriteBool(bool b)
4641 {
4642  VMA_ASSERT(!m_InsideString);
4643  BeginValue(false);
4644  m_SB.Add(b ? "true" : "false");
4645 }
4646 
4647 void VmaJsonWriter::WriteNull()
4648 {
4649  VMA_ASSERT(!m_InsideString);
4650  BeginValue(false);
4651  m_SB.Add("null");
4652 }
4653 
4654 void VmaJsonWriter::BeginValue(bool isString)
4655 {
4656  if(!m_Stack.empty())
4657  {
4658  StackItem& currItem = m_Stack.back();
4659  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4660  currItem.valueCount % 2 == 0)
4661  {
4662  VMA_ASSERT(isString);
4663  }
4664 
4665  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4666  currItem.valueCount % 2 != 0)
4667  {
4668  m_SB.Add(": ");
4669  }
4670  else if(currItem.valueCount > 0)
4671  {
4672  m_SB.Add(", ");
4673  WriteIndent();
4674  }
4675  else
4676  {
4677  WriteIndent();
4678  }
4679  ++currItem.valueCount;
4680  }
4681 }
4682 
4683 void VmaJsonWriter::WriteIndent(bool oneLess)
4684 {
4685  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4686  {
4687  m_SB.AddNewLine();
4688 
4689  size_t count = m_Stack.size();
4690  if(count > 0 && oneLess)
4691  {
4692  --count;
4693  }
4694  for(size_t i = 0; i < count; ++i)
4695  {
4696  m_SB.Add(INDENT);
4697  }
4698  }
4699 }
4700 
4701 #endif // #if VMA_STATS_STRING_ENABLED
4702 
4704 
4705 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
4706 {
4707  if(IsUserDataString())
4708  {
4709  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
4710 
4711  FreeUserDataString(hAllocator);
4712 
4713  if(pUserData != VMA_NULL)
4714  {
4715  const char* const newStrSrc = (char*)pUserData;
4716  const size_t newStrLen = strlen(newStrSrc);
4717  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
4718  memcpy(newStrDst, newStrSrc, newStrLen + 1);
4719  m_pUserData = newStrDst;
4720  }
4721  }
4722  else
4723  {
4724  m_pUserData = pUserData;
4725  }
4726 }
4727 
4728 void VmaAllocation_T::ChangeBlockAllocation(
4729  VmaAllocator hAllocator,
4730  VmaDeviceMemoryBlock* block,
4731  VkDeviceSize offset)
4732 {
4733  VMA_ASSERT(block != VMA_NULL);
4734  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4735 
4736  // Move mapping reference counter from old block to new block.
4737  if(block != m_BlockAllocation.m_Block)
4738  {
4739  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
4740  if(IsPersistentMap())
4741  ++mapRefCount;
4742  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
4743  block->Map(hAllocator, mapRefCount, VMA_NULL);
4744  }
4745 
4746  m_BlockAllocation.m_Block = block;
4747  m_BlockAllocation.m_Offset = offset;
4748 }
4749 
4750 VkDeviceSize VmaAllocation_T::GetOffset() const
4751 {
4752  switch(m_Type)
4753  {
4754  case ALLOCATION_TYPE_BLOCK:
4755  return m_BlockAllocation.m_Offset;
4756  case ALLOCATION_TYPE_DEDICATED:
4757  return 0;
4758  default:
4759  VMA_ASSERT(0);
4760  return 0;
4761  }
4762 }
4763 
4764 VkDeviceMemory VmaAllocation_T::GetMemory() const
4765 {
4766  switch(m_Type)
4767  {
4768  case ALLOCATION_TYPE_BLOCK:
4769  return m_BlockAllocation.m_Block->m_hMemory;
4770  case ALLOCATION_TYPE_DEDICATED:
4771  return m_DedicatedAllocation.m_hMemory;
4772  default:
4773  VMA_ASSERT(0);
4774  return VK_NULL_HANDLE;
4775  }
4776 }
4777 
4778 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
4779 {
4780  switch(m_Type)
4781  {
4782  case ALLOCATION_TYPE_BLOCK:
4783  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
4784  case ALLOCATION_TYPE_DEDICATED:
4785  return m_DedicatedAllocation.m_MemoryTypeIndex;
4786  default:
4787  VMA_ASSERT(0);
4788  return UINT32_MAX;
4789  }
4790 }
4791 
4792 void* VmaAllocation_T::GetMappedData() const
4793 {
4794  switch(m_Type)
4795  {
4796  case ALLOCATION_TYPE_BLOCK:
4797  if(m_MapCount != 0)
4798  {
4799  void* pBlockData = m_BlockAllocation.m_Block->m_Mapping.GetMappedData();
4800  VMA_ASSERT(pBlockData != VMA_NULL);
4801  return (char*)pBlockData + m_BlockAllocation.m_Offset;
4802  }
4803  else
4804  {
4805  return VMA_NULL;
4806  }
4807  break;
4808  case ALLOCATION_TYPE_DEDICATED:
4809  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
4810  return m_DedicatedAllocation.m_pMappedData;
4811  default:
4812  VMA_ASSERT(0);
4813  return VMA_NULL;
4814  }
4815 }
4816 
4817 bool VmaAllocation_T::CanBecomeLost() const
4818 {
4819  switch(m_Type)
4820  {
4821  case ALLOCATION_TYPE_BLOCK:
4822  return m_BlockAllocation.m_CanBecomeLost;
4823  case ALLOCATION_TYPE_DEDICATED:
4824  return false;
4825  default:
4826  VMA_ASSERT(0);
4827  return false;
4828  }
4829 }
4830 
4831 VmaPool VmaAllocation_T::GetPool() const
4832 {
4833  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4834  return m_BlockAllocation.m_hPool;
4835 }
4836 
4837 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4838 {
4839  VMA_ASSERT(CanBecomeLost());
4840 
4841  /*
4842  Warning: This is a carefully designed algorithm.
4843  Do not modify unless you really know what you're doing :)
4844  */
4845  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4846  for(;;)
4847  {
4848  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4849  {
4850  VMA_ASSERT(0);
4851  return false;
4852  }
4853  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4854  {
4855  return false;
4856  }
4857  else // Last use time earlier than current time.
4858  {
4859  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4860  {
4861  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4862  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4863  return true;
4864  }
4865  }
4866  }
4867 }
4868 
4869 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
4870 {
4871  VMA_ASSERT(IsUserDataString());
4872  if(m_pUserData != VMA_NULL)
4873  {
4874  char* const oldStr = (char*)m_pUserData;
4875  const size_t oldStrLen = strlen(oldStr);
4876  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
4877  m_pUserData = VMA_NULL;
4878  }
4879 }
4880 
4881 void VmaAllocation_T::BlockAllocMap()
4882 {
4883  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4884 
4885  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4886  {
4887  ++m_MapCount;
4888  }
4889  else
4890  {
4891  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
4892  }
4893 }
4894 
4895 void VmaAllocation_T::BlockAllocUnmap()
4896 {
4897  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4898 
4899  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4900  {
4901  --m_MapCount;
4902  }
4903  else
4904  {
4905  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
4906  }
4907 }
4908 
4909 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
4910 {
4911  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4912 
4913  if(m_MapCount != 0)
4914  {
4915  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4916  {
4917  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
4918  *ppData = m_DedicatedAllocation.m_pMappedData;
4919  ++m_MapCount;
4920  return VK_SUCCESS;
4921  }
4922  else
4923  {
4924  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
4925  return VK_ERROR_MEMORY_MAP_FAILED;
4926  }
4927  }
4928  else
4929  {
4930  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4931  hAllocator->m_hDevice,
4932  m_DedicatedAllocation.m_hMemory,
4933  0, // offset
4934  VK_WHOLE_SIZE,
4935  0, // flags
4936  ppData);
4937  if(result == VK_SUCCESS)
4938  {
4939  m_DedicatedAllocation.m_pMappedData = *ppData;
4940  m_MapCount = 1;
4941  }
4942  return result;
4943  }
4944 }
4945 
4946 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
4947 {
4948  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4949 
4950  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4951  {
4952  --m_MapCount;
4953  if(m_MapCount == 0)
4954  {
4955  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
4956  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
4957  hAllocator->m_hDevice,
4958  m_DedicatedAllocation.m_hMemory);
4959  }
4960  }
4961  else
4962  {
4963  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
4964  }
4965 }
4966 
4967 #if VMA_STATS_STRING_ENABLED
4968 
4969 // Correspond to values of enum VmaSuballocationType.
4970 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4971  "FREE",
4972  "UNKNOWN",
4973  "BUFFER",
4974  "IMAGE_UNKNOWN",
4975  "IMAGE_LINEAR",
4976  "IMAGE_OPTIMAL",
4977 };
4978 
4979 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4980 {
4981  json.BeginObject();
4982 
4983  json.WriteString("Blocks");
4984  json.WriteNumber(stat.blockCount);
4985 
4986  json.WriteString("Allocations");
4987  json.WriteNumber(stat.allocationCount);
4988 
4989  json.WriteString("UnusedRanges");
4990  json.WriteNumber(stat.unusedRangeCount);
4991 
4992  json.WriteString("UsedBytes");
4993  json.WriteNumber(stat.usedBytes);
4994 
4995  json.WriteString("UnusedBytes");
4996  json.WriteNumber(stat.unusedBytes);
4997 
4998  if(stat.allocationCount > 1)
4999  {
5000  json.WriteString("AllocationSize");
5001  json.BeginObject(true);
5002  json.WriteString("Min");
5003  json.WriteNumber(stat.allocationSizeMin);
5004  json.WriteString("Avg");
5005  json.WriteNumber(stat.allocationSizeAvg);
5006  json.WriteString("Max");
5007  json.WriteNumber(stat.allocationSizeMax);
5008  json.EndObject();
5009  }
5010 
5011  if(stat.unusedRangeCount > 1)
5012  {
5013  json.WriteString("UnusedRangeSize");
5014  json.BeginObject(true);
5015  json.WriteString("Min");
5016  json.WriteNumber(stat.unusedRangeSizeMin);
5017  json.WriteString("Avg");
5018  json.WriteNumber(stat.unusedRangeSizeAvg);
5019  json.WriteString("Max");
5020  json.WriteNumber(stat.unusedRangeSizeMax);
5021  json.EndObject();
5022  }
5023 
5024  json.EndObject();
5025 }
5026 
5027 #endif // #if VMA_STATS_STRING_ENABLED
5028 
5029 struct VmaSuballocationItemSizeLess
5030 {
5031  bool operator()(
5032  const VmaSuballocationList::iterator lhs,
5033  const VmaSuballocationList::iterator rhs) const
5034  {
5035  return lhs->size < rhs->size;
5036  }
5037  bool operator()(
5038  const VmaSuballocationList::iterator lhs,
5039  VkDeviceSize rhsSize) const
5040  {
5041  return lhs->size < rhsSize;
5042  }
5043 };
5044 
5046 // class VmaBlockMetadata
5047 
5048 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
5049  m_Size(0),
5050  m_FreeCount(0),
5051  m_SumFreeSize(0),
5052  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
5053  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
5054 {
5055 }
5056 
5057 VmaBlockMetadata::~VmaBlockMetadata()
5058 {
5059 }
5060 
5061 void VmaBlockMetadata::Init(VkDeviceSize size)
5062 {
5063  m_Size = size;
5064  m_FreeCount = 1;
5065  m_SumFreeSize = size;
5066 
5067  VmaSuballocation suballoc = {};
5068  suballoc.offset = 0;
5069  suballoc.size = size;
5070  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5071  suballoc.hAllocation = VK_NULL_HANDLE;
5072 
5073  m_Suballocations.push_back(suballoc);
5074  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
5075  --suballocItem;
5076  m_FreeSuballocationsBySize.push_back(suballocItem);
5077 }
5078 
5079 bool VmaBlockMetadata::Validate() const
5080 {
5081  if(m_Suballocations.empty())
5082  {
5083  return false;
5084  }
5085 
5086  // Expected offset of new suballocation as calculates from previous ones.
5087  VkDeviceSize calculatedOffset = 0;
5088  // Expected number of free suballocations as calculated from traversing their list.
5089  uint32_t calculatedFreeCount = 0;
5090  // Expected sum size of free suballocations as calculated from traversing their list.
5091  VkDeviceSize calculatedSumFreeSize = 0;
5092  // Expected number of free suballocations that should be registered in
5093  // m_FreeSuballocationsBySize calculated from traversing their list.
5094  size_t freeSuballocationsToRegister = 0;
5095  // True if previous visisted suballocation was free.
5096  bool prevFree = false;
5097 
5098  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5099  suballocItem != m_Suballocations.cend();
5100  ++suballocItem)
5101  {
5102  const VmaSuballocation& subAlloc = *suballocItem;
5103 
5104  // Actual offset of this suballocation doesn't match expected one.
5105  if(subAlloc.offset != calculatedOffset)
5106  {
5107  return false;
5108  }
5109 
5110  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
5111  // Two adjacent free suballocations are invalid. They should be merged.
5112  if(prevFree && currFree)
5113  {
5114  return false;
5115  }
5116 
5117  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
5118  {
5119  return false;
5120  }
5121 
5122  if(currFree)
5123  {
5124  calculatedSumFreeSize += subAlloc.size;
5125  ++calculatedFreeCount;
5126  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5127  {
5128  ++freeSuballocationsToRegister;
5129  }
5130  }
5131  else
5132  {
5133  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
5134  {
5135  return false;
5136  }
5137  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
5138  {
5139  return false;
5140  }
5141  }
5142 
5143  calculatedOffset += subAlloc.size;
5144  prevFree = currFree;
5145  }
5146 
5147  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
5148  // match expected one.
5149  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
5150  {
5151  return false;
5152  }
5153 
5154  VkDeviceSize lastSize = 0;
5155  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
5156  {
5157  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
5158 
5159  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
5160  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5161  {
5162  return false;
5163  }
5164  // They must be sorted by size ascending.
5165  if(suballocItem->size < lastSize)
5166  {
5167  return false;
5168  }
5169 
5170  lastSize = suballocItem->size;
5171  }
5172 
5173  // Check if totals match calculacted values.
5174  if(!ValidateFreeSuballocationList() ||
5175  (calculatedOffset != m_Size) ||
5176  (calculatedSumFreeSize != m_SumFreeSize) ||
5177  (calculatedFreeCount != m_FreeCount))
5178  {
5179  return false;
5180  }
5181 
5182  return true;
5183 }
5184 
5185 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
5186 {
5187  if(!m_FreeSuballocationsBySize.empty())
5188  {
5189  return m_FreeSuballocationsBySize.back()->size;
5190  }
5191  else
5192  {
5193  return 0;
5194  }
5195 }
5196 
5197 bool VmaBlockMetadata::IsEmpty() const
5198 {
5199  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
5200 }
5201 
5202 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
5203 {
5204  outInfo.blockCount = 1;
5205 
5206  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5207  outInfo.allocationCount = rangeCount - m_FreeCount;
5208  outInfo.unusedRangeCount = m_FreeCount;
5209 
5210  outInfo.unusedBytes = m_SumFreeSize;
5211  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
5212 
5213  outInfo.allocationSizeMin = UINT64_MAX;
5214  outInfo.allocationSizeMax = 0;
5215  outInfo.unusedRangeSizeMin = UINT64_MAX;
5216  outInfo.unusedRangeSizeMax = 0;
5217 
5218  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5219  suballocItem != m_Suballocations.cend();
5220  ++suballocItem)
5221  {
5222  const VmaSuballocation& suballoc = *suballocItem;
5223  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5224  {
5225  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
5226  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
5227  }
5228  else
5229  {
5230  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
5231  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
5232  }
5233  }
5234 }
5235 
5236 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
5237 {
5238  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5239 
5240  inoutStats.size += m_Size;
5241  inoutStats.unusedSize += m_SumFreeSize;
5242  inoutStats.allocationCount += rangeCount - m_FreeCount;
5243  inoutStats.unusedRangeCount += m_FreeCount;
5244  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
5245 }
5246 
5247 #if VMA_STATS_STRING_ENABLED
5248 
5249 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
5250 {
5251  json.BeginObject();
5252 
5253  json.WriteString("TotalBytes");
5254  json.WriteNumber(m_Size);
5255 
5256  json.WriteString("UnusedBytes");
5257  json.WriteNumber(m_SumFreeSize);
5258 
5259  json.WriteString("Allocations");
5260  json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);
5261 
5262  json.WriteString("UnusedRanges");
5263  json.WriteNumber(m_FreeCount);
5264 
5265  json.WriteString("Suballocations");
5266  json.BeginArray();
5267  size_t i = 0;
5268  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5269  suballocItem != m_Suballocations.cend();
5270  ++suballocItem, ++i)
5271  {
5272  json.BeginObject(true);
5273 
5274  json.WriteString("Type");
5275  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
5276 
5277  json.WriteString("Size");
5278  json.WriteNumber(suballocItem->size);
5279 
5280  json.WriteString("Offset");
5281  json.WriteNumber(suballocItem->offset);
5282 
5283  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5284  {
5285  const void* pUserData = suballocItem->hAllocation->GetUserData();
5286  if(pUserData != VMA_NULL)
5287  {
5288  json.WriteString("UserData");
5289  if(suballocItem->hAllocation->IsUserDataString())
5290  {
5291  json.WriteString((const char*)pUserData);
5292  }
5293  else
5294  {
5295  json.BeginString();
5296  json.ContinueString_Pointer(pUserData);
5297  json.EndString();
5298  }
5299  }
5300  }
5301 
5302  json.EndObject();
5303  }
5304  json.EndArray();
5305 
5306  json.EndObject();
5307 }
5308 
5309 #endif // #if VMA_STATS_STRING_ENABLED
5310 
5311 /*
5312 How many suitable free suballocations to analyze before choosing best one.
5313 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
5314  be chosen.
5315 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
5316  suballocations will be analized and best one will be chosen.
5317 - Any other value is also acceptable.
5318 */
5319 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
5320 
5321 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
5322 {
5323  VMA_ASSERT(IsEmpty());
5324  pAllocationRequest->offset = 0;
5325  pAllocationRequest->sumFreeSize = m_SumFreeSize;
5326  pAllocationRequest->sumItemSize = 0;
5327  pAllocationRequest->item = m_Suballocations.begin();
5328  pAllocationRequest->itemsToMakeLostCount = 0;
5329 }
5330 
5331 bool VmaBlockMetadata::CreateAllocationRequest(
5332  uint32_t currentFrameIndex,
5333  uint32_t frameInUseCount,
5334  VkDeviceSize bufferImageGranularity,
5335  VkDeviceSize allocSize,
5336  VkDeviceSize allocAlignment,
5337  VmaSuballocationType allocType,
5338  bool canMakeOtherLost,
5339  VmaAllocationRequest* pAllocationRequest)
5340 {
5341  VMA_ASSERT(allocSize > 0);
5342  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5343  VMA_ASSERT(pAllocationRequest != VMA_NULL);
5344  VMA_HEAVY_ASSERT(Validate());
5345 
5346  // There is not enough total free space in this block to fullfill the request: Early return.
5347  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
5348  {
5349  return false;
5350  }
5351 
5352  // New algorithm, efficiently searching freeSuballocationsBySize.
5353  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
5354  if(freeSuballocCount > 0)
5355  {
5356  if(VMA_BEST_FIT)
5357  {
5358  // Find first free suballocation with size not less than allocSize.
5359  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5360  m_FreeSuballocationsBySize.data(),
5361  m_FreeSuballocationsBySize.data() + freeSuballocCount,
5362  allocSize,
5363  VmaSuballocationItemSizeLess());
5364  size_t index = it - m_FreeSuballocationsBySize.data();
5365  for(; index < freeSuballocCount; ++index)
5366  {
5367  if(CheckAllocation(
5368  currentFrameIndex,
5369  frameInUseCount,
5370  bufferImageGranularity,
5371  allocSize,
5372  allocAlignment,
5373  allocType,
5374  m_FreeSuballocationsBySize[index],
5375  false, // canMakeOtherLost
5376  &pAllocationRequest->offset,
5377  &pAllocationRequest->itemsToMakeLostCount,
5378  &pAllocationRequest->sumFreeSize,
5379  &pAllocationRequest->sumItemSize))
5380  {
5381  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5382  return true;
5383  }
5384  }
5385  }
5386  else
5387  {
5388  // Search staring from biggest suballocations.
5389  for(size_t index = freeSuballocCount; index--; )
5390  {
5391  if(CheckAllocation(
5392  currentFrameIndex,
5393  frameInUseCount,
5394  bufferImageGranularity,
5395  allocSize,
5396  allocAlignment,
5397  allocType,
5398  m_FreeSuballocationsBySize[index],
5399  false, // canMakeOtherLost
5400  &pAllocationRequest->offset,
5401  &pAllocationRequest->itemsToMakeLostCount,
5402  &pAllocationRequest->sumFreeSize,
5403  &pAllocationRequest->sumItemSize))
5404  {
5405  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5406  return true;
5407  }
5408  }
5409  }
5410  }
5411 
5412  if(canMakeOtherLost)
5413  {
5414  // Brute-force algorithm. TODO: Come up with something better.
5415 
5416  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
5417  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
5418 
5419  VmaAllocationRequest tmpAllocRequest = {};
5420  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
5421  suballocIt != m_Suballocations.end();
5422  ++suballocIt)
5423  {
5424  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
5425  suballocIt->hAllocation->CanBecomeLost())
5426  {
5427  if(CheckAllocation(
5428  currentFrameIndex,
5429  frameInUseCount,
5430  bufferImageGranularity,
5431  allocSize,
5432  allocAlignment,
5433  allocType,
5434  suballocIt,
5435  canMakeOtherLost,
5436  &tmpAllocRequest.offset,
5437  &tmpAllocRequest.itemsToMakeLostCount,
5438  &tmpAllocRequest.sumFreeSize,
5439  &tmpAllocRequest.sumItemSize))
5440  {
5441  tmpAllocRequest.item = suballocIt;
5442 
5443  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
5444  {
5445  *pAllocationRequest = tmpAllocRequest;
5446  }
5447  }
5448  }
5449  }
5450 
5451  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
5452  {
5453  return true;
5454  }
5455  }
5456 
5457  return false;
5458 }
5459 
5460 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
5461  uint32_t currentFrameIndex,
5462  uint32_t frameInUseCount,
5463  VmaAllocationRequest* pAllocationRequest)
5464 {
5465  while(pAllocationRequest->itemsToMakeLostCount > 0)
5466  {
5467  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
5468  {
5469  ++pAllocationRequest->item;
5470  }
5471  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5472  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
5473  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
5474  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5475  {
5476  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
5477  --pAllocationRequest->itemsToMakeLostCount;
5478  }
5479  else
5480  {
5481  return false;
5482  }
5483  }
5484 
5485  VMA_HEAVY_ASSERT(Validate());
5486  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5487  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
5488 
5489  return true;
5490 }
5491 
5492 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5493 {
5494  uint32_t lostAllocationCount = 0;
5495  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
5496  it != m_Suballocations.end();
5497  ++it)
5498  {
5499  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
5500  it->hAllocation->CanBecomeLost() &&
5501  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5502  {
5503  it = FreeSuballocation(it);
5504  ++lostAllocationCount;
5505  }
5506  }
5507  return lostAllocationCount;
5508 }
5509 
5510 void VmaBlockMetadata::Alloc(
5511  const VmaAllocationRequest& request,
5512  VmaSuballocationType type,
5513  VkDeviceSize allocSize,
5514  VmaAllocation hAllocation)
5515 {
5516  VMA_ASSERT(request.item != m_Suballocations.end());
5517  VmaSuballocation& suballoc = *request.item;
5518  // Given suballocation is a free block.
5519  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5520  // Given offset is inside this suballocation.
5521  VMA_ASSERT(request.offset >= suballoc.offset);
5522  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
5523  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
5524  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
5525 
5526  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
5527  // it to become used.
5528  UnregisterFreeSuballocation(request.item);
5529 
5530  suballoc.offset = request.offset;
5531  suballoc.size = allocSize;
5532  suballoc.type = type;
5533  suballoc.hAllocation = hAllocation;
5534 
5535  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
5536  if(paddingEnd)
5537  {
5538  VmaSuballocation paddingSuballoc = {};
5539  paddingSuballoc.offset = request.offset + allocSize;
5540  paddingSuballoc.size = paddingEnd;
5541  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5542  VmaSuballocationList::iterator next = request.item;
5543  ++next;
5544  const VmaSuballocationList::iterator paddingEndItem =
5545  m_Suballocations.insert(next, paddingSuballoc);
5546  RegisterFreeSuballocation(paddingEndItem);
5547  }
5548 
5549  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5550  if(paddingBegin)
5551  {
5552  VmaSuballocation paddingSuballoc = {};
5553  paddingSuballoc.offset = request.offset - paddingBegin;
5554  paddingSuballoc.size = paddingBegin;
5555  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5556  const VmaSuballocationList::iterator paddingBeginItem =
5557  m_Suballocations.insert(request.item, paddingSuballoc);
5558  RegisterFreeSuballocation(paddingBeginItem);
5559  }
5560 
5561  // Update totals.
5562  m_FreeCount = m_FreeCount - 1;
5563  if(paddingBegin > 0)
5564  {
5565  ++m_FreeCount;
5566  }
5567  if(paddingEnd > 0)
5568  {
5569  ++m_FreeCount;
5570  }
5571  m_SumFreeSize -= allocSize;
5572 }
5573 
5574 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5575 {
5576  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5577  suballocItem != m_Suballocations.end();
5578  ++suballocItem)
5579  {
5580  VmaSuballocation& suballoc = *suballocItem;
5581  if(suballoc.hAllocation == allocation)
5582  {
5583  FreeSuballocation(suballocItem);
5584  VMA_HEAVY_ASSERT(Validate());
5585  return;
5586  }
5587  }
5588  VMA_ASSERT(0 && "Not found!");
5589 }
5590 
5591 void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)
5592 {
5593  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5594  suballocItem != m_Suballocations.end();
5595  ++suballocItem)
5596  {
5597  VmaSuballocation& suballoc = *suballocItem;
5598  if(suballoc.offset == offset)
5599  {
5600  FreeSuballocation(suballocItem);
5601  return;
5602  }
5603  }
5604  VMA_ASSERT(0 && "Not found!");
5605 }
5606 
5607 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5608 {
5609  VkDeviceSize lastSize = 0;
5610  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5611  {
5612  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5613 
5614  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5615  {
5616  VMA_ASSERT(0);
5617  return false;
5618  }
5619  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5620  {
5621  VMA_ASSERT(0);
5622  return false;
5623  }
5624  if(it->size < lastSize)
5625  {
5626  VMA_ASSERT(0);
5627  return false;
5628  }
5629 
5630  lastSize = it->size;
5631  }
5632  return true;
5633 }
5634 
5635 bool VmaBlockMetadata::CheckAllocation(
5636  uint32_t currentFrameIndex,
5637  uint32_t frameInUseCount,
5638  VkDeviceSize bufferImageGranularity,
5639  VkDeviceSize allocSize,
5640  VkDeviceSize allocAlignment,
5641  VmaSuballocationType allocType,
5642  VmaSuballocationList::const_iterator suballocItem,
5643  bool canMakeOtherLost,
5644  VkDeviceSize* pOffset,
5645  size_t* itemsToMakeLostCount,
5646  VkDeviceSize* pSumFreeSize,
5647  VkDeviceSize* pSumItemSize) const
5648 {
5649  VMA_ASSERT(allocSize > 0);
5650  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5651  VMA_ASSERT(suballocItem != m_Suballocations.cend());
5652  VMA_ASSERT(pOffset != VMA_NULL);
5653 
5654  *itemsToMakeLostCount = 0;
5655  *pSumFreeSize = 0;
5656  *pSumItemSize = 0;
5657 
5658  if(canMakeOtherLost)
5659  {
5660  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5661  {
5662  *pSumFreeSize = suballocItem->size;
5663  }
5664  else
5665  {
5666  if(suballocItem->hAllocation->CanBecomeLost() &&
5667  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5668  {
5669  ++*itemsToMakeLostCount;
5670  *pSumItemSize = suballocItem->size;
5671  }
5672  else
5673  {
5674  return false;
5675  }
5676  }
5677 
5678  // Remaining size is too small for this request: Early return.
5679  if(m_Size - suballocItem->offset < allocSize)
5680  {
5681  return false;
5682  }
5683 
5684  // Start from offset equal to beginning of this suballocation.
5685  *pOffset = suballocItem->offset;
5686 
5687  // Apply VMA_DEBUG_MARGIN at the beginning.
5688  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5689  {
5690  *pOffset += VMA_DEBUG_MARGIN;
5691  }
5692 
5693  // Apply alignment.
5694  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5695  *pOffset = VmaAlignUp(*pOffset, alignment);
5696 
5697  // Check previous suballocations for BufferImageGranularity conflicts.
5698  // Make bigger alignment if necessary.
5699  if(bufferImageGranularity > 1)
5700  {
5701  bool bufferImageGranularityConflict = false;
5702  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5703  while(prevSuballocItem != m_Suballocations.cbegin())
5704  {
5705  --prevSuballocItem;
5706  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5707  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5708  {
5709  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5710  {
5711  bufferImageGranularityConflict = true;
5712  break;
5713  }
5714  }
5715  else
5716  // Already on previous page.
5717  break;
5718  }
5719  if(bufferImageGranularityConflict)
5720  {
5721  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5722  }
5723  }
5724 
5725  // Now that we have final *pOffset, check if we are past suballocItem.
5726  // If yes, return false - this function should be called for another suballocItem as starting point.
5727  if(*pOffset >= suballocItem->offset + suballocItem->size)
5728  {
5729  return false;
5730  }
5731 
5732  // Calculate padding at the beginning based on current offset.
5733  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
5734 
5735  // Calculate required margin at the end if this is not last suballocation.
5736  VmaSuballocationList::const_iterator next = suballocItem;
5737  ++next;
5738  const VkDeviceSize requiredEndMargin =
5739  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5740 
5741  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
5742  // Another early return check.
5743  if(suballocItem->offset + totalSize > m_Size)
5744  {
5745  return false;
5746  }
5747 
5748  // Advance lastSuballocItem until desired size is reached.
5749  // Update itemsToMakeLostCount.
5750  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
5751  if(totalSize > suballocItem->size)
5752  {
5753  VkDeviceSize remainingSize = totalSize - suballocItem->size;
5754  while(remainingSize > 0)
5755  {
5756  ++lastSuballocItem;
5757  if(lastSuballocItem == m_Suballocations.cend())
5758  {
5759  return false;
5760  }
5761  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5762  {
5763  *pSumFreeSize += lastSuballocItem->size;
5764  }
5765  else
5766  {
5767  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
5768  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
5769  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5770  {
5771  ++*itemsToMakeLostCount;
5772  *pSumItemSize += lastSuballocItem->size;
5773  }
5774  else
5775  {
5776  return false;
5777  }
5778  }
5779  remainingSize = (lastSuballocItem->size < remainingSize) ?
5780  remainingSize - lastSuballocItem->size : 0;
5781  }
5782  }
5783 
5784  // Check next suballocations for BufferImageGranularity conflicts.
5785  // If conflict exists, we must mark more allocations lost or fail.
5786  if(bufferImageGranularity > 1)
5787  {
5788  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
5789  ++nextSuballocItem;
5790  while(nextSuballocItem != m_Suballocations.cend())
5791  {
5792  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5793  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5794  {
5795  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5796  {
5797  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
5798  if(nextSuballoc.hAllocation->CanBecomeLost() &&
5799  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5800  {
5801  ++*itemsToMakeLostCount;
5802  }
5803  else
5804  {
5805  return false;
5806  }
5807  }
5808  }
5809  else
5810  {
5811  // Already on next page.
5812  break;
5813  }
5814  ++nextSuballocItem;
5815  }
5816  }
5817  }
5818  else
5819  {
5820  const VmaSuballocation& suballoc = *suballocItem;
5821  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5822 
5823  *pSumFreeSize = suballoc.size;
5824 
5825  // Size of this suballocation is too small for this request: Early return.
5826  if(suballoc.size < allocSize)
5827  {
5828  return false;
5829  }
5830 
5831  // Start from offset equal to beginning of this suballocation.
5832  *pOffset = suballoc.offset;
5833 
5834  // Apply VMA_DEBUG_MARGIN at the beginning.
5835  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5836  {
5837  *pOffset += VMA_DEBUG_MARGIN;
5838  }
5839 
5840  // Apply alignment.
5841  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5842  *pOffset = VmaAlignUp(*pOffset, alignment);
5843 
5844  // Check previous suballocations for BufferImageGranularity conflicts.
5845  // Make bigger alignment if necessary.
5846  if(bufferImageGranularity > 1)
5847  {
5848  bool bufferImageGranularityConflict = false;
5849  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5850  while(prevSuballocItem != m_Suballocations.cbegin())
5851  {
5852  --prevSuballocItem;
5853  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5854  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5855  {
5856  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5857  {
5858  bufferImageGranularityConflict = true;
5859  break;
5860  }
5861  }
5862  else
5863  // Already on previous page.
5864  break;
5865  }
5866  if(bufferImageGranularityConflict)
5867  {
5868  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5869  }
5870  }
5871 
5872  // Calculate padding at the beginning based on current offset.
5873  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
5874 
5875  // Calculate required margin at the end if this is not last suballocation.
5876  VmaSuballocationList::const_iterator next = suballocItem;
5877  ++next;
5878  const VkDeviceSize requiredEndMargin =
5879  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5880 
5881  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
5882  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
5883  {
5884  return false;
5885  }
5886 
5887  // Check next suballocations for BufferImageGranularity conflicts.
5888  // If conflict exists, allocation cannot be made here.
5889  if(bufferImageGranularity > 1)
5890  {
5891  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
5892  ++nextSuballocItem;
5893  while(nextSuballocItem != m_Suballocations.cend())
5894  {
5895  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5896  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5897  {
5898  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5899  {
5900  return false;
5901  }
5902  }
5903  else
5904  {
5905  // Already on next page.
5906  break;
5907  }
5908  ++nextSuballocItem;
5909  }
5910  }
5911  }
5912 
5913  // All tests passed: Success. pOffset is already filled.
5914  return true;
5915 }
5916 
5917 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
5918 {
5919  VMA_ASSERT(item != m_Suballocations.end());
5920  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5921 
5922  VmaSuballocationList::iterator nextItem = item;
5923  ++nextItem;
5924  VMA_ASSERT(nextItem != m_Suballocations.end());
5925  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
5926 
5927  item->size += nextItem->size;
5928  --m_FreeCount;
5929  m_Suballocations.erase(nextItem);
5930 }
5931 
5932 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
5933 {
5934  // Change this suballocation to be marked as free.
5935  VmaSuballocation& suballoc = *suballocItem;
5936  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5937  suballoc.hAllocation = VK_NULL_HANDLE;
5938 
5939  // Update totals.
5940  ++m_FreeCount;
5941  m_SumFreeSize += suballoc.size;
5942 
5943  // Merge with previous and/or next suballocation if it's also free.
5944  bool mergeWithNext = false;
5945  bool mergeWithPrev = false;
5946 
5947  VmaSuballocationList::iterator nextItem = suballocItem;
5948  ++nextItem;
5949  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
5950  {
5951  mergeWithNext = true;
5952  }
5953 
5954  VmaSuballocationList::iterator prevItem = suballocItem;
5955  if(suballocItem != m_Suballocations.begin())
5956  {
5957  --prevItem;
5958  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5959  {
5960  mergeWithPrev = true;
5961  }
5962  }
5963 
5964  if(mergeWithNext)
5965  {
5966  UnregisterFreeSuballocation(nextItem);
5967  MergeFreeWithNext(suballocItem);
5968  }
5969 
5970  if(mergeWithPrev)
5971  {
5972  UnregisterFreeSuballocation(prevItem);
5973  MergeFreeWithNext(prevItem);
5974  RegisterFreeSuballocation(prevItem);
5975  return prevItem;
5976  }
5977  else
5978  {
5979  RegisterFreeSuballocation(suballocItem);
5980  return suballocItem;
5981  }
5982 }
5983 
5984 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
5985 {
5986  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5987  VMA_ASSERT(item->size > 0);
5988 
5989  // You may want to enable this validation at the beginning or at the end of
5990  // this function, depending on what do you want to check.
5991  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5992 
5993  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5994  {
5995  if(m_FreeSuballocationsBySize.empty())
5996  {
5997  m_FreeSuballocationsBySize.push_back(item);
5998  }
5999  else
6000  {
6001  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
6002  }
6003  }
6004 
6005  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6006 }
6007 
6008 
6009 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
6010 {
6011  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6012  VMA_ASSERT(item->size > 0);
6013 
6014  // You may want to enable this validation at the beginning or at the end of
6015  // this function, depending on what do you want to check.
6016  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6017 
6018  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6019  {
6020  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6021  m_FreeSuballocationsBySize.data(),
6022  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
6023  item,
6024  VmaSuballocationItemSizeLess());
6025  for(size_t index = it - m_FreeSuballocationsBySize.data();
6026  index < m_FreeSuballocationsBySize.size();
6027  ++index)
6028  {
6029  if(m_FreeSuballocationsBySize[index] == item)
6030  {
6031  VmaVectorRemove(m_FreeSuballocationsBySize, index);
6032  return;
6033  }
6034  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
6035  }
6036  VMA_ASSERT(0 && "Not found.");
6037  }
6038 
6039  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6040 }
6041 
6043 // class VmaDeviceMemoryMapping
6044 
6045 VmaDeviceMemoryMapping::VmaDeviceMemoryMapping() :
6046  m_MapCount(0),
6047  m_pMappedData(VMA_NULL)
6048 {
6049 }
6050 
6051 VmaDeviceMemoryMapping::~VmaDeviceMemoryMapping()
6052 {
6053  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
6054 }
6055 
6056 VkResult VmaDeviceMemoryMapping::Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData)
6057 {
6058  if(count == 0)
6059  {
6060  return VK_SUCCESS;
6061  }
6062 
6063  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6064  if(m_MapCount != 0)
6065  {
6066  m_MapCount += count;
6067  VMA_ASSERT(m_pMappedData != VMA_NULL);
6068  if(ppData != VMA_NULL)
6069  {
6070  *ppData = m_pMappedData;
6071  }
6072  return VK_SUCCESS;
6073  }
6074  else
6075  {
6076  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6077  hAllocator->m_hDevice,
6078  hMemory,
6079  0, // offset
6080  VK_WHOLE_SIZE,
6081  0, // flags
6082  &m_pMappedData);
6083  if(result == VK_SUCCESS)
6084  {
6085  if(ppData != VMA_NULL)
6086  {
6087  *ppData = m_pMappedData;
6088  }
6089  m_MapCount = count;
6090  }
6091  return result;
6092  }
6093 }
6094 
6095 void VmaDeviceMemoryMapping::Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count)
6096 {
6097  if(count == 0)
6098  {
6099  return;
6100  }
6101 
6102  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6103  if(m_MapCount >= count)
6104  {
6105  m_MapCount -= count;
6106  if(m_MapCount == 0)
6107  {
6108  m_pMappedData = VMA_NULL;
6109  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, hMemory);
6110  }
6111  }
6112  else
6113  {
6114  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
6115  }
6116 }
6117 
6119 // class VmaDeviceMemoryBlock
6120 
6121 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
6122  m_MemoryTypeIndex(UINT32_MAX),
6123  m_hMemory(VK_NULL_HANDLE),
6124  m_Metadata(hAllocator)
6125 {
6126 }
6127 
6128 void VmaDeviceMemoryBlock::Init(
6129  uint32_t newMemoryTypeIndex,
6130  VkDeviceMemory newMemory,
6131  VkDeviceSize newSize)
6132 {
6133  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6134 
6135  m_MemoryTypeIndex = newMemoryTypeIndex;
6136  m_hMemory = newMemory;
6137 
6138  m_Metadata.Init(newSize);
6139 }
6140 
6141 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
6142 {
6143  // This is the most important assert in the entire library.
6144  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
6145  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
6146 
6147  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
6148  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
6149  m_hMemory = VK_NULL_HANDLE;
6150 }
6151 
6152 bool VmaDeviceMemoryBlock::Validate() const
6153 {
6154  if((m_hMemory == VK_NULL_HANDLE) ||
6155  (m_Metadata.GetSize() == 0))
6156  {
6157  return false;
6158  }
6159 
6160  return m_Metadata.Validate();
6161 }
6162 
6163 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
6164 {
6165  return m_Mapping.Map(hAllocator, m_hMemory, count, ppData);
6166 }
6167 
6168 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
6169 {
6170  m_Mapping.Unmap(hAllocator, m_hMemory, count);
6171 }
6172 
6173 static void InitStatInfo(VmaStatInfo& outInfo)
6174 {
6175  memset(&outInfo, 0, sizeof(outInfo));
6176  outInfo.allocationSizeMin = UINT64_MAX;
6177  outInfo.unusedRangeSizeMin = UINT64_MAX;
6178 }
6179 
6180 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
6181 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
6182 {
6183  inoutInfo.blockCount += srcInfo.blockCount;
6184  inoutInfo.allocationCount += srcInfo.allocationCount;
6185  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
6186  inoutInfo.usedBytes += srcInfo.usedBytes;
6187  inoutInfo.unusedBytes += srcInfo.unusedBytes;
6188  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
6189  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
6190  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
6191  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
6192 }
6193 
6194 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
6195 {
6196  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
6197  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
6198  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
6199  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
6200 }
6201 
6202 VmaPool_T::VmaPool_T(
6203  VmaAllocator hAllocator,
6204  const VmaPoolCreateInfo& createInfo) :
6205  m_BlockVector(
6206  hAllocator,
6207  createInfo.memoryTypeIndex,
6208  createInfo.blockSize,
6209  createInfo.minBlockCount,
6210  createInfo.maxBlockCount,
6211  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
6212  createInfo.frameInUseCount,
6213  true) // isCustomPool
6214 {
6215 }
6216 
6217 VmaPool_T::~VmaPool_T()
6218 {
6219 }
6220 
6221 #if VMA_STATS_STRING_ENABLED
6222 
6223 #endif // #if VMA_STATS_STRING_ENABLED
6224 
6225 VmaBlockVector::VmaBlockVector(
6226  VmaAllocator hAllocator,
6227  uint32_t memoryTypeIndex,
6228  VkDeviceSize preferredBlockSize,
6229  size_t minBlockCount,
6230  size_t maxBlockCount,
6231  VkDeviceSize bufferImageGranularity,
6232  uint32_t frameInUseCount,
6233  bool isCustomPool) :
6234  m_hAllocator(hAllocator),
6235  m_MemoryTypeIndex(memoryTypeIndex),
6236  m_PreferredBlockSize(preferredBlockSize),
6237  m_MinBlockCount(minBlockCount),
6238  m_MaxBlockCount(maxBlockCount),
6239  m_BufferImageGranularity(bufferImageGranularity),
6240  m_FrameInUseCount(frameInUseCount),
6241  m_IsCustomPool(isCustomPool),
6242  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
6243  m_HasEmptyBlock(false),
6244  m_pDefragmentator(VMA_NULL)
6245 {
6246 }
6247 
6248 VmaBlockVector::~VmaBlockVector()
6249 {
6250  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
6251 
6252  for(size_t i = m_Blocks.size(); i--; )
6253  {
6254  m_Blocks[i]->Destroy(m_hAllocator);
6255  vma_delete(m_hAllocator, m_Blocks[i]);
6256  }
6257 }
6258 
6259 VkResult VmaBlockVector::CreateMinBlocks()
6260 {
6261  for(size_t i = 0; i < m_MinBlockCount; ++i)
6262  {
6263  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
6264  if(res != VK_SUCCESS)
6265  {
6266  return res;
6267  }
6268  }
6269  return VK_SUCCESS;
6270 }
6271 
6272 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
6273 {
6274  pStats->size = 0;
6275  pStats->unusedSize = 0;
6276  pStats->allocationCount = 0;
6277  pStats->unusedRangeCount = 0;
6278  pStats->unusedRangeSizeMax = 0;
6279 
6280  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6281 
6282  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6283  {
6284  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6285  VMA_ASSERT(pBlock);
6286  VMA_HEAVY_ASSERT(pBlock->Validate());
6287  pBlock->m_Metadata.AddPoolStats(*pStats);
6288  }
6289 }
6290 
6291 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
6292 
6293 VkResult VmaBlockVector::Allocate(
6294  VmaPool hCurrentPool,
6295  uint32_t currentFrameIndex,
6296  const VkMemoryRequirements& vkMemReq,
6297  const VmaAllocationCreateInfo& createInfo,
6298  VmaSuballocationType suballocType,
6299  VmaAllocation* pAllocation)
6300 {
6301  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
6302  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
6303 
6304  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6305 
6306  // 1. Search existing allocations. Try to allocate without making other allocations lost.
6307  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6308  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6309  {
6310  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6311  VMA_ASSERT(pCurrBlock);
6312  VmaAllocationRequest currRequest = {};
6313  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6314  currentFrameIndex,
6315  m_FrameInUseCount,
6316  m_BufferImageGranularity,
6317  vkMemReq.size,
6318  vkMemReq.alignment,
6319  suballocType,
6320  false, // canMakeOtherLost
6321  &currRequest))
6322  {
6323  // Allocate from pCurrBlock.
6324  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
6325 
6326  if(mapped)
6327  {
6328  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
6329  if(res != VK_SUCCESS)
6330  {
6331  return res;
6332  }
6333  }
6334 
6335  // We no longer have an empty Allocation.
6336  if(pCurrBlock->m_Metadata.IsEmpty())
6337  {
6338  m_HasEmptyBlock = false;
6339  }
6340 
6341  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6342  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
6343  (*pAllocation)->InitBlockAllocation(
6344  hCurrentPool,
6345  pCurrBlock,
6346  currRequest.offset,
6347  vkMemReq.alignment,
6348  vkMemReq.size,
6349  suballocType,
6350  mapped,
6351  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6352  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
6353  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6354  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6355  return VK_SUCCESS;
6356  }
6357  }
6358 
6359  const bool canCreateNewBlock =
6360  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
6361  (m_Blocks.size() < m_MaxBlockCount);
6362 
6363  // 2. Try to create new block.
6364  if(canCreateNewBlock)
6365  {
6366  // Calculate optimal size for new block.
6367  VkDeviceSize newBlockSize = m_PreferredBlockSize;
6368  uint32_t newBlockSizeShift = 0;
6369  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
6370 
6371  // Allocating blocks of other sizes is allowed only in default pools.
6372  // In custom pools block size is fixed.
6373  if(m_IsCustomPool == false)
6374  {
6375  // Allocate 1/8, 1/4, 1/2 as first blocks.
6376  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
6377  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
6378  {
6379  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6380  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= vkMemReq.size * 2)
6381  {
6382  newBlockSize = smallerNewBlockSize;
6383  ++newBlockSizeShift;
6384  }
6385  else
6386  {
6387  break;
6388  }
6389  }
6390  }
6391 
6392  size_t newBlockIndex = 0;
6393  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
6394  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
6395  if(m_IsCustomPool == false)
6396  {
6397  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
6398  {
6399  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6400  if(smallerNewBlockSize >= vkMemReq.size)
6401  {
6402  newBlockSize = smallerNewBlockSize;
6403  ++newBlockSizeShift;
6404  res = CreateBlock(newBlockSize, &newBlockIndex);
6405  }
6406  else
6407  {
6408  break;
6409  }
6410  }
6411  }
6412 
6413  if(res == VK_SUCCESS)
6414  {
6415  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
6416  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
6417 
6418  if(mapped)
6419  {
6420  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
6421  if(res != VK_SUCCESS)
6422  {
6423  return res;
6424  }
6425  }
6426 
6427  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
6428  VmaAllocationRequest allocRequest;
6429  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
6430  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6431  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
6432  (*pAllocation)->InitBlockAllocation(
6433  hCurrentPool,
6434  pBlock,
6435  allocRequest.offset,
6436  vkMemReq.alignment,
6437  vkMemReq.size,
6438  suballocType,
6439  mapped,
6440  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6441  VMA_HEAVY_ASSERT(pBlock->Validate());
6442  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
6443  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6444  return VK_SUCCESS;
6445  }
6446  }
6447 
6448  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
6449 
6450  // 3. Try to allocate from existing blocks with making other allocations lost.
6451  if(canMakeOtherLost)
6452  {
6453  uint32_t tryIndex = 0;
6454  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
6455  {
6456  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
6457  VmaAllocationRequest bestRequest = {};
6458  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
6459 
6460  // 1. Search existing allocations.
6461  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6462  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6463  {
6464  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6465  VMA_ASSERT(pCurrBlock);
6466  VmaAllocationRequest currRequest = {};
6467  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6468  currentFrameIndex,
6469  m_FrameInUseCount,
6470  m_BufferImageGranularity,
6471  vkMemReq.size,
6472  vkMemReq.alignment,
6473  suballocType,
6474  canMakeOtherLost,
6475  &currRequest))
6476  {
6477  const VkDeviceSize currRequestCost = currRequest.CalcCost();
6478  if(pBestRequestBlock == VMA_NULL ||
6479  currRequestCost < bestRequestCost)
6480  {
6481  pBestRequestBlock = pCurrBlock;
6482  bestRequest = currRequest;
6483  bestRequestCost = currRequestCost;
6484 
6485  if(bestRequestCost == 0)
6486  {
6487  break;
6488  }
6489  }
6490  }
6491  }
6492 
6493  if(pBestRequestBlock != VMA_NULL)
6494  {
6495  if(mapped)
6496  {
6497  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
6498  if(res != VK_SUCCESS)
6499  {
6500  return res;
6501  }
6502  }
6503 
6504  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
6505  currentFrameIndex,
6506  m_FrameInUseCount,
6507  &bestRequest))
6508  {
6509  // We no longer have an empty Allocation.
6510  if(pBestRequestBlock->m_Metadata.IsEmpty())
6511  {
6512  m_HasEmptyBlock = false;
6513  }
6514  // Allocate from this pBlock.
6515  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6516  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
6517  (*pAllocation)->InitBlockAllocation(
6518  hCurrentPool,
6519  pBestRequestBlock,
6520  bestRequest.offset,
6521  vkMemReq.alignment,
6522  vkMemReq.size,
6523  suballocType,
6524  mapped,
6525  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6526  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
6527  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6528  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6529  return VK_SUCCESS;
6530  }
6531  // else: Some allocations must have been touched while we are here. Next try.
6532  }
6533  else
6534  {
6535  // Could not find place in any of the blocks - break outer loop.
6536  break;
6537  }
6538  }
6539  /* Maximum number of tries exceeded - a very unlike event when many other
6540  threads are simultaneously touching allocations making it impossible to make
6541  lost at the same time as we try to allocate. */
6542  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
6543  {
6544  return VK_ERROR_TOO_MANY_OBJECTS;
6545  }
6546  }
6547 
6548  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6549 }
6550 
6551 void VmaBlockVector::Free(
6552  VmaAllocation hAllocation)
6553 {
6554  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
6555 
6556  // Scope for lock.
6557  {
6558  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6559 
6560  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
6561 
6562  if(hAllocation->IsPersistentMap())
6563  {
6564  pBlock->m_Mapping.Unmap(m_hAllocator, pBlock->m_hMemory, 1);
6565  }
6566 
6567  pBlock->m_Metadata.Free(hAllocation);
6568  VMA_HEAVY_ASSERT(pBlock->Validate());
6569 
6570  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
6571 
6572  // pBlock became empty after this deallocation.
6573  if(pBlock->m_Metadata.IsEmpty())
6574  {
6575  // Already has empty Allocation. We don't want to have two, so delete this one.
6576  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
6577  {
6578  pBlockToDelete = pBlock;
6579  Remove(pBlock);
6580  }
6581  // We now have first empty Allocation.
6582  else
6583  {
6584  m_HasEmptyBlock = true;
6585  }
6586  }
6587  // pBlock didn't become empty, but we have another empty block - find and free that one.
6588  // (This is optional, heuristics.)
6589  else if(m_HasEmptyBlock)
6590  {
6591  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6592  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6593  {
6594  pBlockToDelete = pLastBlock;
6595  m_Blocks.pop_back();
6596  m_HasEmptyBlock = false;
6597  }
6598  }
6599 
6600  IncrementallySortBlocks();
6601  }
6602 
6603  // Destruction of a free Allocation. Deferred until this point, outside of mutex
6604  // lock, for performance reason.
6605  if(pBlockToDelete != VMA_NULL)
6606  {
6607  VMA_DEBUG_LOG(" Deleted empty allocation");
6608  pBlockToDelete->Destroy(m_hAllocator);
6609  vma_delete(m_hAllocator, pBlockToDelete);
6610  }
6611 }
6612 
6613 size_t VmaBlockVector::CalcMaxBlockSize() const
6614 {
6615  size_t result = 0;
6616  for(size_t i = m_Blocks.size(); i--; )
6617  {
6618  result = VMA_MAX((uint64_t)result, (uint64_t)m_Blocks[i]->m_Metadata.GetSize());
6619  if(result >= m_PreferredBlockSize)
6620  {
6621  break;
6622  }
6623  }
6624  return result;
6625 }
6626 
6627 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
6628 {
6629  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6630  {
6631  if(m_Blocks[blockIndex] == pBlock)
6632  {
6633  VmaVectorRemove(m_Blocks, blockIndex);
6634  return;
6635  }
6636  }
6637  VMA_ASSERT(0);
6638 }
6639 
6640 void VmaBlockVector::IncrementallySortBlocks()
6641 {
6642  // Bubble sort only until first swap.
6643  for(size_t i = 1; i < m_Blocks.size(); ++i)
6644  {
6645  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
6646  {
6647  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
6648  return;
6649  }
6650  }
6651 }
6652 
6653 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
6654 {
6655  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6656  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
6657  allocInfo.allocationSize = blockSize;
6658  VkDeviceMemory mem = VK_NULL_HANDLE;
6659  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
6660  if(res < 0)
6661  {
6662  return res;
6663  }
6664 
6665  // New VkDeviceMemory successfully created.
6666 
6667  // Create new Allocation for it.
6668  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
6669  pBlock->Init(
6670  m_MemoryTypeIndex,
6671  mem,
6672  allocInfo.allocationSize);
6673 
6674  m_Blocks.push_back(pBlock);
6675  if(pNewBlockIndex != VMA_NULL)
6676  {
6677  *pNewBlockIndex = m_Blocks.size() - 1;
6678  }
6679 
6680  return VK_SUCCESS;
6681 }
6682 
6683 #if VMA_STATS_STRING_ENABLED
6684 
6685 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
6686 {
6687  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6688 
6689  json.BeginObject();
6690 
6691  if(m_IsCustomPool)
6692  {
6693  json.WriteString("MemoryTypeIndex");
6694  json.WriteNumber(m_MemoryTypeIndex);
6695 
6696  json.WriteString("BlockSize");
6697  json.WriteNumber(m_PreferredBlockSize);
6698 
6699  json.WriteString("BlockCount");
6700  json.BeginObject(true);
6701  if(m_MinBlockCount > 0)
6702  {
6703  json.WriteString("Min");
6704  json.WriteNumber((uint64_t)m_MinBlockCount);
6705  }
6706  if(m_MaxBlockCount < SIZE_MAX)
6707  {
6708  json.WriteString("Max");
6709  json.WriteNumber((uint64_t)m_MaxBlockCount);
6710  }
6711  json.WriteString("Cur");
6712  json.WriteNumber((uint64_t)m_Blocks.size());
6713  json.EndObject();
6714 
6715  if(m_FrameInUseCount > 0)
6716  {
6717  json.WriteString("FrameInUseCount");
6718  json.WriteNumber(m_FrameInUseCount);
6719  }
6720  }
6721  else
6722  {
6723  json.WriteString("PreferredBlockSize");
6724  json.WriteNumber(m_PreferredBlockSize);
6725  }
6726 
6727  json.WriteString("Blocks");
6728  json.BeginArray();
6729  for(size_t i = 0; i < m_Blocks.size(); ++i)
6730  {
6731  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
6732  }
6733  json.EndArray();
6734 
6735  json.EndObject();
6736 }
6737 
6738 #endif // #if VMA_STATS_STRING_ENABLED
6739 
6740 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
6741  VmaAllocator hAllocator,
6742  uint32_t currentFrameIndex)
6743 {
6744  if(m_pDefragmentator == VMA_NULL)
6745  {
6746  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
6747  hAllocator,
6748  this,
6749  currentFrameIndex);
6750  }
6751 
6752  return m_pDefragmentator;
6753 }
6754 
6755 VkResult VmaBlockVector::Defragment(
6756  VmaDefragmentationStats* pDefragmentationStats,
6757  VkDeviceSize& maxBytesToMove,
6758  uint32_t& maxAllocationsToMove)
6759 {
6760  if(m_pDefragmentator == VMA_NULL)
6761  {
6762  return VK_SUCCESS;
6763  }
6764 
6765  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6766 
6767  // Defragment.
6768  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
6769 
6770  // Accumulate statistics.
6771  if(pDefragmentationStats != VMA_NULL)
6772  {
6773  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
6774  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
6775  pDefragmentationStats->bytesMoved += bytesMoved;
6776  pDefragmentationStats->allocationsMoved += allocationsMoved;
6777  VMA_ASSERT(bytesMoved <= maxBytesToMove);
6778  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
6779  maxBytesToMove -= bytesMoved;
6780  maxAllocationsToMove -= allocationsMoved;
6781  }
6782 
6783  // Free empty blocks.
6784  m_HasEmptyBlock = false;
6785  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
6786  {
6787  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
6788  if(pBlock->m_Metadata.IsEmpty())
6789  {
6790  if(m_Blocks.size() > m_MinBlockCount)
6791  {
6792  if(pDefragmentationStats != VMA_NULL)
6793  {
6794  ++pDefragmentationStats->deviceMemoryBlocksFreed;
6795  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
6796  }
6797 
6798  VmaVectorRemove(m_Blocks, blockIndex);
6799  pBlock->Destroy(m_hAllocator);
6800  vma_delete(m_hAllocator, pBlock);
6801  }
6802  else
6803  {
6804  m_HasEmptyBlock = true;
6805  }
6806  }
6807  }
6808 
6809  return result;
6810 }
6811 
6812 void VmaBlockVector::DestroyDefragmentator()
6813 {
6814  if(m_pDefragmentator != VMA_NULL)
6815  {
6816  vma_delete(m_hAllocator, m_pDefragmentator);
6817  m_pDefragmentator = VMA_NULL;
6818  }
6819 }
6820 
6821 void VmaBlockVector::MakePoolAllocationsLost(
6822  uint32_t currentFrameIndex,
6823  size_t* pLostAllocationCount)
6824 {
6825  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6826  size_t lostAllocationCount = 0;
6827  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6828  {
6829  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6830  VMA_ASSERT(pBlock);
6831  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
6832  }
6833  if(pLostAllocationCount != VMA_NULL)
6834  {
6835  *pLostAllocationCount = lostAllocationCount;
6836  }
6837 }
6838 
6839 void VmaBlockVector::AddStats(VmaStats* pStats)
6840 {
6841  const uint32_t memTypeIndex = m_MemoryTypeIndex;
6842  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
6843 
6844  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6845 
6846  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6847  {
6848  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6849  VMA_ASSERT(pBlock);
6850  VMA_HEAVY_ASSERT(pBlock->Validate());
6851  VmaStatInfo allocationStatInfo;
6852  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
6853  VmaAddStatInfo(pStats->total, allocationStatInfo);
6854  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6855  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6856  }
6857 }
6858 
6860 // VmaDefragmentator members definition
6861 
6862 VmaDefragmentator::VmaDefragmentator(
6863  VmaAllocator hAllocator,
6864  VmaBlockVector* pBlockVector,
6865  uint32_t currentFrameIndex) :
6866  m_hAllocator(hAllocator),
6867  m_pBlockVector(pBlockVector),
6868  m_CurrentFrameIndex(currentFrameIndex),
6869  m_BytesMoved(0),
6870  m_AllocationsMoved(0),
6871  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
6872  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
6873 {
6874 }
6875 
6876 VmaDefragmentator::~VmaDefragmentator()
6877 {
6878  for(size_t i = m_Blocks.size(); i--; )
6879  {
6880  vma_delete(m_hAllocator, m_Blocks[i]);
6881  }
6882 }
6883 
6884 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
6885 {
6886  AllocationInfo allocInfo;
6887  allocInfo.m_hAllocation = hAlloc;
6888  allocInfo.m_pChanged = pChanged;
6889  m_Allocations.push_back(allocInfo);
6890 }
6891 
6892 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
6893 {
6894  // It has already been mapped for defragmentation.
6895  if(m_pMappedDataForDefragmentation)
6896  {
6897  *ppMappedData = m_pMappedDataForDefragmentation;
6898  return VK_SUCCESS;
6899  }
6900 
6901  // It is originally mapped.
6902  if(m_pBlock->m_Mapping.GetMappedData())
6903  {
6904  *ppMappedData = m_pBlock->m_Mapping.GetMappedData();
6905  return VK_SUCCESS;
6906  }
6907 
6908  // Map on first usage.
6909  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
6910  *ppMappedData = m_pMappedDataForDefragmentation;
6911  return res;
6912 }
6913 
6914 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
6915 {
6916  if(m_pMappedDataForDefragmentation != VMA_NULL)
6917  {
6918  m_pBlock->Unmap(hAllocator, 1);
6919  }
6920 }
6921 
6922 VkResult VmaDefragmentator::DefragmentRound(
6923  VkDeviceSize maxBytesToMove,
6924  uint32_t maxAllocationsToMove)
6925 {
6926  if(m_Blocks.empty())
6927  {
6928  return VK_SUCCESS;
6929  }
6930 
6931  size_t srcBlockIndex = m_Blocks.size() - 1;
6932  size_t srcAllocIndex = SIZE_MAX;
6933  for(;;)
6934  {
6935  // 1. Find next allocation to move.
6936  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
6937  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
6938  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
6939  {
6940  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
6941  {
6942  // Finished: no more allocations to process.
6943  if(srcBlockIndex == 0)
6944  {
6945  return VK_SUCCESS;
6946  }
6947  else
6948  {
6949  --srcBlockIndex;
6950  srcAllocIndex = SIZE_MAX;
6951  }
6952  }
6953  else
6954  {
6955  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
6956  }
6957  }
6958 
6959  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
6960  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
6961 
6962  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
6963  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
6964  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
6965  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
6966 
6967  // 2. Try to find new place for this allocation in preceding or current block.
6968  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
6969  {
6970  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
6971  VmaAllocationRequest dstAllocRequest;
6972  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
6973  m_CurrentFrameIndex,
6974  m_pBlockVector->GetFrameInUseCount(),
6975  m_pBlockVector->GetBufferImageGranularity(),
6976  size,
6977  alignment,
6978  suballocType,
6979  false, // canMakeOtherLost
6980  &dstAllocRequest) &&
6981  MoveMakesSense(
6982  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
6983  {
6984  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
6985 
6986  // Reached limit on number of allocations or bytes to move.
6987  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
6988  (m_BytesMoved + size > maxBytesToMove))
6989  {
6990  return VK_INCOMPLETE;
6991  }
6992 
6993  void* pDstMappedData = VMA_NULL;
6994  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
6995  if(res != VK_SUCCESS)
6996  {
6997  return res;
6998  }
6999 
7000  void* pSrcMappedData = VMA_NULL;
7001  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
7002  if(res != VK_SUCCESS)
7003  {
7004  return res;
7005  }
7006 
7007  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
7008  memcpy(
7009  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
7010  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
7011  static_cast<size_t>(size));
7012 
7013  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
7014  pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);
7015 
7016  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
7017 
7018  if(allocInfo.m_pChanged != VMA_NULL)
7019  {
7020  *allocInfo.m_pChanged = VK_TRUE;
7021  }
7022 
7023  ++m_AllocationsMoved;
7024  m_BytesMoved += size;
7025 
7026  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
7027 
7028  break;
7029  }
7030  }
7031 
7032  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
7033 
7034  if(srcAllocIndex > 0)
7035  {
7036  --srcAllocIndex;
7037  }
7038  else
7039  {
7040  if(srcBlockIndex > 0)
7041  {
7042  --srcBlockIndex;
7043  srcAllocIndex = SIZE_MAX;
7044  }
7045  else
7046  {
7047  return VK_SUCCESS;
7048  }
7049  }
7050  }
7051 }
7052 
7053 VkResult VmaDefragmentator::Defragment(
7054  VkDeviceSize maxBytesToMove,
7055  uint32_t maxAllocationsToMove)
7056 {
7057  if(m_Allocations.empty())
7058  {
7059  return VK_SUCCESS;
7060  }
7061 
7062  // Create block info for each block.
7063  const size_t blockCount = m_pBlockVector->m_Blocks.size();
7064  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7065  {
7066  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
7067  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
7068  m_Blocks.push_back(pBlockInfo);
7069  }
7070 
7071  // Sort them by m_pBlock pointer value.
7072  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
7073 
7074  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
7075  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
7076  {
7077  AllocationInfo& allocInfo = m_Allocations[blockIndex];
7078  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
7079  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7080  {
7081  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
7082  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
7083  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
7084  {
7085  (*it)->m_Allocations.push_back(allocInfo);
7086  }
7087  else
7088  {
7089  VMA_ASSERT(0);
7090  }
7091  }
7092  }
7093  m_Allocations.clear();
7094 
7095  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7096  {
7097  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
7098  pBlockInfo->CalcHasNonMovableAllocations();
7099  pBlockInfo->SortAllocationsBySizeDescecnding();
7100  }
7101 
7102  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
7103  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
7104 
7105  // Execute defragmentation rounds (the main part).
7106  VkResult result = VK_SUCCESS;
7107  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
7108  {
7109  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
7110  }
7111 
7112  // Unmap blocks that were mapped for defragmentation.
7113  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7114  {
7115  m_Blocks[blockIndex]->Unmap(m_hAllocator);
7116  }
7117 
7118  return result;
7119 }
7120 
7121 bool VmaDefragmentator::MoveMakesSense(
7122  size_t dstBlockIndex, VkDeviceSize dstOffset,
7123  size_t srcBlockIndex, VkDeviceSize srcOffset)
7124 {
7125  if(dstBlockIndex < srcBlockIndex)
7126  {
7127  return true;
7128  }
7129  if(dstBlockIndex > srcBlockIndex)
7130  {
7131  return false;
7132  }
7133  if(dstOffset < srcOffset)
7134  {
7135  return true;
7136  }
7137  return false;
7138 }
7139 
7141 // VmaAllocator_T
7142 
7143 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
7144  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
7145  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
7146  m_hDevice(pCreateInfo->device),
7147  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
7148  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
7149  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
7150  m_PreferredLargeHeapBlockSize(0),
7151  m_PhysicalDevice(pCreateInfo->physicalDevice),
7152  m_CurrentFrameIndex(0),
7153  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
7154 {
7155  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
7156 
7157  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
7158  memset(&m_MemProps, 0, sizeof(m_MemProps));
7159  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
7160 
7161  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
7162  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
7163 
7164  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7165  {
7166  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
7167  }
7168 
7169  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
7170  {
7171  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
7172  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
7173  }
7174 
7175  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
7176 
7177  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
7178  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
7179 
7180  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
7181  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
7182 
7183  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
7184  {
7185  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
7186  {
7187  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
7188  if(limit != VK_WHOLE_SIZE)
7189  {
7190  m_HeapSizeLimit[heapIndex] = limit;
7191  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
7192  {
7193  m_MemProps.memoryHeaps[heapIndex].size = limit;
7194  }
7195  }
7196  }
7197  }
7198 
7199  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7200  {
7201  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
7202 
7203  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
7204  this,
7205  memTypeIndex,
7206  preferredBlockSize,
7207  0,
7208  SIZE_MAX,
7209  GetBufferImageGranularity(),
7210  pCreateInfo->frameInUseCount,
7211  false); // isCustomPool
7212  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
7213  // becase minBlockCount is 0.
7214  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
7215  }
7216 }
7217 
7218 VmaAllocator_T::~VmaAllocator_T()
7219 {
7220  VMA_ASSERT(m_Pools.empty());
7221 
7222  for(size_t i = GetMemoryTypeCount(); i--; )
7223  {
7224  vma_delete(this, m_pDedicatedAllocations[i]);
7225  vma_delete(this, m_pBlockVectors[i]);
7226  }
7227 }
7228 
7229 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
7230 {
7231 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7232  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
7233  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
7234  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
7235  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
7236  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
7237  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
7238  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
7239  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
7240  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
7241  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
7242  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
7243  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
7244  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
7245  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
7246  if(m_UseKhrDedicatedAllocation)
7247  {
7248  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
7249  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
7250  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
7251  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
7252  }
7253 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7254 
7255 #define VMA_COPY_IF_NOT_NULL(funcName) \
7256  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
7257 
7258  if(pVulkanFunctions != VMA_NULL)
7259  {
7260  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
7261  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
7262  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
7263  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
7264  VMA_COPY_IF_NOT_NULL(vkMapMemory);
7265  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
7266  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
7267  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
7268  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
7269  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
7270  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
7271  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
7272  VMA_COPY_IF_NOT_NULL(vkCreateImage);
7273  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
7274  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
7275  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
7276  }
7277 
7278 #undef VMA_COPY_IF_NOT_NULL
7279 
7280  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
7281  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
7282  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
7283  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
7284  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
7285  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
7286  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
7287  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
7288  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
7289  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
7290  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
7291  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
7292  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
7293  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
7294  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
7295  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
7296  if(m_UseKhrDedicatedAllocation)
7297  {
7298  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
7299  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
7300  }
7301 }
7302 
7303 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
7304 {
7305  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7306  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
7307  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
7308  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
7309 }
7310 
7311 VkResult VmaAllocator_T::AllocateMemoryOfType(
7312  const VkMemoryRequirements& vkMemReq,
7313  bool dedicatedAllocation,
7314  VkBuffer dedicatedBuffer,
7315  VkImage dedicatedImage,
7316  const VmaAllocationCreateInfo& createInfo,
7317  uint32_t memTypeIndex,
7318  VmaSuballocationType suballocType,
7319  VmaAllocation* pAllocation)
7320 {
7321  VMA_ASSERT(pAllocation != VMA_NULL);
7322  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
7323 
7324  VmaAllocationCreateInfo finalCreateInfo = createInfo;
7325 
7326  // If memory type is not HOST_VISIBLE, disable MAPPED.
7327  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7328  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
7329  {
7330  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
7331  }
7332 
7333  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
7334  VMA_ASSERT(blockVector);
7335 
7336  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
7337  bool preferDedicatedMemory =
7338  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
7339  dedicatedAllocation ||
7340  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
7341  vkMemReq.size > preferredBlockSize / 2;
7342 
7343  if(preferDedicatedMemory &&
7344  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
7345  finalCreateInfo.pool == VK_NULL_HANDLE)
7346  {
7348  }
7349 
7350  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
7351  {
7352  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7353  {
7354  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7355  }
7356  else
7357  {
7358  return AllocateDedicatedMemory(
7359  vkMemReq.size,
7360  suballocType,
7361  memTypeIndex,
7362  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7363  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7364  finalCreateInfo.pUserData,
7365  dedicatedBuffer,
7366  dedicatedImage,
7367  pAllocation);
7368  }
7369  }
7370  else
7371  {
7372  VkResult res = blockVector->Allocate(
7373  VK_NULL_HANDLE, // hCurrentPool
7374  m_CurrentFrameIndex.load(),
7375  vkMemReq,
7376  finalCreateInfo,
7377  suballocType,
7378  pAllocation);
7379  if(res == VK_SUCCESS)
7380  {
7381  return res;
7382  }
7383 
7384  // 5. Try dedicated memory.
7385  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7386  {
7387  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7388  }
7389  else
7390  {
7391  res = AllocateDedicatedMemory(
7392  vkMemReq.size,
7393  suballocType,
7394  memTypeIndex,
7395  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7396  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7397  finalCreateInfo.pUserData,
7398  dedicatedBuffer,
7399  dedicatedImage,
7400  pAllocation);
7401  if(res == VK_SUCCESS)
7402  {
7403  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
7404  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
7405  return VK_SUCCESS;
7406  }
7407  else
7408  {
7409  // Everything failed: Return error code.
7410  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7411  return res;
7412  }
7413  }
7414  }
7415 }
7416 
7417 VkResult VmaAllocator_T::AllocateDedicatedMemory(
7418  VkDeviceSize size,
7419  VmaSuballocationType suballocType,
7420  uint32_t memTypeIndex,
7421  bool map,
7422  bool isUserDataString,
7423  void* pUserData,
7424  VkBuffer dedicatedBuffer,
7425  VkImage dedicatedImage,
7426  VmaAllocation* pAllocation)
7427 {
7428  VMA_ASSERT(pAllocation);
7429 
7430  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
7431  allocInfo.memoryTypeIndex = memTypeIndex;
7432  allocInfo.allocationSize = size;
7433 
7434  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
7435  if(m_UseKhrDedicatedAllocation)
7436  {
7437  if(dedicatedBuffer != VK_NULL_HANDLE)
7438  {
7439  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
7440  dedicatedAllocInfo.buffer = dedicatedBuffer;
7441  allocInfo.pNext = &dedicatedAllocInfo;
7442  }
7443  else if(dedicatedImage != VK_NULL_HANDLE)
7444  {
7445  dedicatedAllocInfo.image = dedicatedImage;
7446  allocInfo.pNext = &dedicatedAllocInfo;
7447  }
7448  }
7449 
7450  // Allocate VkDeviceMemory.
7451  VkDeviceMemory hMemory = VK_NULL_HANDLE;
7452  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
7453  if(res < 0)
7454  {
7455  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7456  return res;
7457  }
7458 
7459  void* pMappedData = VMA_NULL;
7460  if(map)
7461  {
7462  res = (*m_VulkanFunctions.vkMapMemory)(
7463  m_hDevice,
7464  hMemory,
7465  0,
7466  VK_WHOLE_SIZE,
7467  0,
7468  &pMappedData);
7469  if(res < 0)
7470  {
7471  VMA_DEBUG_LOG(" vkMapMemory FAILED");
7472  FreeVulkanMemory(memTypeIndex, size, hMemory);
7473  return res;
7474  }
7475  }
7476 
7477  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
7478  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
7479  (*pAllocation)->SetUserData(this, pUserData);
7480 
7481  // Register it in m_pDedicatedAllocations.
7482  {
7483  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7484  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7485  VMA_ASSERT(pDedicatedAllocations);
7486  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
7487  }
7488 
7489  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
7490 
7491  return VK_SUCCESS;
7492 }
7493 
7494 void VmaAllocator_T::GetBufferMemoryRequirements(
7495  VkBuffer hBuffer,
7496  VkMemoryRequirements& memReq,
7497  bool& requiresDedicatedAllocation,
7498  bool& prefersDedicatedAllocation) const
7499 {
7500  if(m_UseKhrDedicatedAllocation)
7501  {
7502  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
7503  memReqInfo.buffer = hBuffer;
7504 
7505  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7506 
7507  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7508  memReq2.pNext = &memDedicatedReq;
7509 
7510  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7511 
7512  memReq = memReq2.memoryRequirements;
7513  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7514  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7515  }
7516  else
7517  {
7518  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
7519  requiresDedicatedAllocation = false;
7520  prefersDedicatedAllocation = false;
7521  }
7522 }
7523 
7524 void VmaAllocator_T::GetImageMemoryRequirements(
7525  VkImage hImage,
7526  VkMemoryRequirements& memReq,
7527  bool& requiresDedicatedAllocation,
7528  bool& prefersDedicatedAllocation) const
7529 {
7530  if(m_UseKhrDedicatedAllocation)
7531  {
7532  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
7533  memReqInfo.image = hImage;
7534 
7535  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7536 
7537  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7538  memReq2.pNext = &memDedicatedReq;
7539 
7540  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7541 
7542  memReq = memReq2.memoryRequirements;
7543  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7544  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7545  }
7546  else
7547  {
7548  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
7549  requiresDedicatedAllocation = false;
7550  prefersDedicatedAllocation = false;
7551  }
7552 }
7553 
7554 VkResult VmaAllocator_T::AllocateMemory(
7555  const VkMemoryRequirements& vkMemReq,
7556  bool requiresDedicatedAllocation,
7557  bool prefersDedicatedAllocation,
7558  VkBuffer dedicatedBuffer,
7559  VkImage dedicatedImage,
7560  const VmaAllocationCreateInfo& createInfo,
7561  VmaSuballocationType suballocType,
7562  VmaAllocation* pAllocation)
7563 {
7564  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
7565  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7566  {
7567  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
7568  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7569  }
7570  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7572  {
7573  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
7574  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7575  }
7576  if(requiresDedicatedAllocation)
7577  {
7578  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7579  {
7580  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
7581  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7582  }
7583  if(createInfo.pool != VK_NULL_HANDLE)
7584  {
7585  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
7586  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7587  }
7588  }
7589  if((createInfo.pool != VK_NULL_HANDLE) &&
7590  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
7591  {
7592  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
7593  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7594  }
7595 
7596  if(createInfo.pool != VK_NULL_HANDLE)
7597  {
7598  return createInfo.pool->m_BlockVector.Allocate(
7599  createInfo.pool,
7600  m_CurrentFrameIndex.load(),
7601  vkMemReq,
7602  createInfo,
7603  suballocType,
7604  pAllocation);
7605  }
7606  else
7607  {
7608  // Bit mask of memory Vulkan types acceptable for this allocation.
7609  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
7610  uint32_t memTypeIndex = UINT32_MAX;
7611  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7612  if(res == VK_SUCCESS)
7613  {
7614  res = AllocateMemoryOfType(
7615  vkMemReq,
7616  requiresDedicatedAllocation || prefersDedicatedAllocation,
7617  dedicatedBuffer,
7618  dedicatedImage,
7619  createInfo,
7620  memTypeIndex,
7621  suballocType,
7622  pAllocation);
7623  // Succeeded on first try.
7624  if(res == VK_SUCCESS)
7625  {
7626  return res;
7627  }
7628  // Allocation from this memory type failed. Try other compatible memory types.
7629  else
7630  {
7631  for(;;)
7632  {
7633  // Remove old memTypeIndex from list of possibilities.
7634  memoryTypeBits &= ~(1u << memTypeIndex);
7635  // Find alternative memTypeIndex.
7636  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7637  if(res == VK_SUCCESS)
7638  {
7639  res = AllocateMemoryOfType(
7640  vkMemReq,
7641  requiresDedicatedAllocation || prefersDedicatedAllocation,
7642  dedicatedBuffer,
7643  dedicatedImage,
7644  createInfo,
7645  memTypeIndex,
7646  suballocType,
7647  pAllocation);
7648  // Allocation from this alternative memory type succeeded.
7649  if(res == VK_SUCCESS)
7650  {
7651  return res;
7652  }
7653  // else: Allocation from this memory type failed. Try next one - next loop iteration.
7654  }
7655  // No other matching memory type index could be found.
7656  else
7657  {
7658  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
7659  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7660  }
7661  }
7662  }
7663  }
7664  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
7665  else
7666  return res;
7667  }
7668 }
7669 
7670 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
7671 {
7672  VMA_ASSERT(allocation);
7673 
7674  if(allocation->CanBecomeLost() == false ||
7675  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7676  {
7677  switch(allocation->GetType())
7678  {
7679  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7680  {
7681  VmaBlockVector* pBlockVector = VMA_NULL;
7682  VmaPool hPool = allocation->GetPool();
7683  if(hPool != VK_NULL_HANDLE)
7684  {
7685  pBlockVector = &hPool->m_BlockVector;
7686  }
7687  else
7688  {
7689  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7690  pBlockVector = m_pBlockVectors[memTypeIndex];
7691  }
7692  pBlockVector->Free(allocation);
7693  }
7694  break;
7695  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7696  FreeDedicatedMemory(allocation);
7697  break;
7698  default:
7699  VMA_ASSERT(0);
7700  }
7701  }
7702 
7703  allocation->SetUserData(this, VMA_NULL);
7704  vma_delete(this, allocation);
7705 }
7706 
7707 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
7708 {
7709  // Initialize.
7710  InitStatInfo(pStats->total);
7711  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
7712  InitStatInfo(pStats->memoryType[i]);
7713  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7714  InitStatInfo(pStats->memoryHeap[i]);
7715 
7716  // Process default pools.
7717  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7718  {
7719  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
7720  VMA_ASSERT(pBlockVector);
7721  pBlockVector->AddStats(pStats);
7722  }
7723 
7724  // Process custom pools.
7725  {
7726  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7727  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
7728  {
7729  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
7730  }
7731  }
7732 
7733  // Process dedicated allocations.
7734  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7735  {
7736  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7737  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7738  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7739  VMA_ASSERT(pDedicatedAllocVector);
7740  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
7741  {
7742  VmaStatInfo allocationStatInfo;
7743  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
7744  VmaAddStatInfo(pStats->total, allocationStatInfo);
7745  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7746  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7747  }
7748  }
7749 
7750  // Postprocess.
7751  VmaPostprocessCalcStatInfo(pStats->total);
7752  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
7753  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
7754  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
7755  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
7756 }
7757 
7758 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
7759 
7760 VkResult VmaAllocator_T::Defragment(
7761  VmaAllocation* pAllocations,
7762  size_t allocationCount,
7763  VkBool32* pAllocationsChanged,
7764  const VmaDefragmentationInfo* pDefragmentationInfo,
7765  VmaDefragmentationStats* pDefragmentationStats)
7766 {
7767  if(pAllocationsChanged != VMA_NULL)
7768  {
7769  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
7770  }
7771  if(pDefragmentationStats != VMA_NULL)
7772  {
7773  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
7774  }
7775 
7776  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
7777 
7778  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
7779 
7780  const size_t poolCount = m_Pools.size();
7781 
7782  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
7783  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
7784  {
7785  VmaAllocation hAlloc = pAllocations[allocIndex];
7786  VMA_ASSERT(hAlloc);
7787  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
7788  // DedicatedAlloc cannot be defragmented.
7789  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
7790  // Only HOST_VISIBLE memory types can be defragmented.
7791  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
7792  // Lost allocation cannot be defragmented.
7793  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
7794  {
7795  VmaBlockVector* pAllocBlockVector = VMA_NULL;
7796 
7797  const VmaPool hAllocPool = hAlloc->GetPool();
7798  // This allocation belongs to custom pool.
7799  if(hAllocPool != VK_NULL_HANDLE)
7800  {
7801  pAllocBlockVector = &hAllocPool->GetBlockVector();
7802  }
7803  // This allocation belongs to general pool.
7804  else
7805  {
7806  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
7807  }
7808 
7809  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
7810 
7811  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
7812  &pAllocationsChanged[allocIndex] : VMA_NULL;
7813  pDefragmentator->AddAllocation(hAlloc, pChanged);
7814  }
7815  }
7816 
7817  VkResult result = VK_SUCCESS;
7818 
7819  // ======== Main processing.
7820 
7821  VkDeviceSize maxBytesToMove = SIZE_MAX;
7822  uint32_t maxAllocationsToMove = UINT32_MAX;
7823  if(pDefragmentationInfo != VMA_NULL)
7824  {
7825  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
7826  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
7827  }
7828 
7829  // Process standard memory.
7830  for(uint32_t memTypeIndex = 0;
7831  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
7832  ++memTypeIndex)
7833  {
7834  // Only HOST_VISIBLE memory types can be defragmented.
7835  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7836  {
7837  result = m_pBlockVectors[memTypeIndex]->Defragment(
7838  pDefragmentationStats,
7839  maxBytesToMove,
7840  maxAllocationsToMove);
7841  }
7842  }
7843 
7844  // Process custom pools.
7845  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
7846  {
7847  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
7848  pDefragmentationStats,
7849  maxBytesToMove,
7850  maxAllocationsToMove);
7851  }
7852 
7853  // ======== Destroy defragmentators.
7854 
7855  // Process custom pools.
7856  for(size_t poolIndex = poolCount; poolIndex--; )
7857  {
7858  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
7859  }
7860 
7861  // Process standard memory.
7862  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
7863  {
7864  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7865  {
7866  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
7867  }
7868  }
7869 
7870  return result;
7871 }
7872 
7873 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
7874 {
7875  if(hAllocation->CanBecomeLost())
7876  {
7877  /*
7878  Warning: This is a carefully designed algorithm.
7879  Do not modify unless you really know what you're doing :)
7880  */
7881  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7882  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7883  for(;;)
7884  {
7885  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7886  {
7887  pAllocationInfo->memoryType = UINT32_MAX;
7888  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
7889  pAllocationInfo->offset = 0;
7890  pAllocationInfo->size = hAllocation->GetSize();
7891  pAllocationInfo->pMappedData = VMA_NULL;
7892  pAllocationInfo->pUserData = hAllocation->GetUserData();
7893  return;
7894  }
7895  else if(localLastUseFrameIndex == localCurrFrameIndex)
7896  {
7897  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7898  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7899  pAllocationInfo->offset = hAllocation->GetOffset();
7900  pAllocationInfo->size = hAllocation->GetSize();
7901  pAllocationInfo->pMappedData = VMA_NULL;
7902  pAllocationInfo->pUserData = hAllocation->GetUserData();
7903  return;
7904  }
7905  else // Last use time earlier than current time.
7906  {
7907  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7908  {
7909  localLastUseFrameIndex = localCurrFrameIndex;
7910  }
7911  }
7912  }
7913  }
7914  else
7915  {
7916  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7917  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7918  pAllocationInfo->offset = hAllocation->GetOffset();
7919  pAllocationInfo->size = hAllocation->GetSize();
7920  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7921  pAllocationInfo->pUserData = hAllocation->GetUserData();
7922  }
7923 }
7924 
7925 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
7926 {
7927  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
7928  if(hAllocation->CanBecomeLost())
7929  {
7930  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7931  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7932  for(;;)
7933  {
7934  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7935  {
7936  return false;
7937  }
7938  else if(localLastUseFrameIndex == localCurrFrameIndex)
7939  {
7940  return true;
7941  }
7942  else // Last use time earlier than current time.
7943  {
7944  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7945  {
7946  localLastUseFrameIndex = localCurrFrameIndex;
7947  }
7948  }
7949  }
7950  }
7951  else
7952  {
7953  return true;
7954  }
7955 }
7956 
7957 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
7958 {
7959  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
7960 
7961  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
7962 
7963  if(newCreateInfo.maxBlockCount == 0)
7964  {
7965  newCreateInfo.maxBlockCount = SIZE_MAX;
7966  }
7967  if(newCreateInfo.blockSize == 0)
7968  {
7969  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
7970  }
7971 
7972  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
7973 
7974  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
7975  if(res != VK_SUCCESS)
7976  {
7977  vma_delete(this, *pPool);
7978  *pPool = VMA_NULL;
7979  return res;
7980  }
7981 
7982  // Add to m_Pools.
7983  {
7984  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7985  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
7986  }
7987 
7988  return VK_SUCCESS;
7989 }
7990 
7991 void VmaAllocator_T::DestroyPool(VmaPool pool)
7992 {
7993  // Remove from m_Pools.
7994  {
7995  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7996  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
7997  VMA_ASSERT(success && "Pool not found in Allocator.");
7998  }
7999 
8000  vma_delete(this, pool);
8001 }
8002 
8003 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
8004 {
8005  pool->m_BlockVector.GetPoolStats(pPoolStats);
8006 }
8007 
8008 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
8009 {
8010  m_CurrentFrameIndex.store(frameIndex);
8011 }
8012 
8013 void VmaAllocator_T::MakePoolAllocationsLost(
8014  VmaPool hPool,
8015  size_t* pLostAllocationCount)
8016 {
8017  hPool->m_BlockVector.MakePoolAllocationsLost(
8018  m_CurrentFrameIndex.load(),
8019  pLostAllocationCount);
8020 }
8021 
8022 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
8023 {
8024  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
8025  (*pAllocation)->InitLost();
8026 }
8027 
8028 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
8029 {
8030  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
8031 
8032  VkResult res;
8033  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8034  {
8035  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8036  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
8037  {
8038  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8039  if(res == VK_SUCCESS)
8040  {
8041  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
8042  }
8043  }
8044  else
8045  {
8046  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
8047  }
8048  }
8049  else
8050  {
8051  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8052  }
8053 
8054  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
8055  {
8056  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
8057  }
8058 
8059  return res;
8060 }
8061 
8062 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
8063 {
8064  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
8065  {
8066  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
8067  }
8068 
8069  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
8070 
8071  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
8072  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8073  {
8074  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8075  m_HeapSizeLimit[heapIndex] += size;
8076  }
8077 }
8078 
8079 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
8080 {
8081  if(hAllocation->CanBecomeLost())
8082  {
8083  return VK_ERROR_MEMORY_MAP_FAILED;
8084  }
8085 
8086  switch(hAllocation->GetType())
8087  {
8088  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8089  {
8090  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8091  char *pBytes = VMA_NULL;
8092  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
8093  if(res == VK_SUCCESS)
8094  {
8095  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
8096  hAllocation->BlockAllocMap();
8097  }
8098  return res;
8099  }
8100  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8101  return hAllocation->DedicatedAllocMap(this, ppData);
8102  default:
8103  VMA_ASSERT(0);
8104  return VK_ERROR_MEMORY_MAP_FAILED;
8105  }
8106 }
8107 
8108 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
8109 {
8110  switch(hAllocation->GetType())
8111  {
8112  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8113  {
8114  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8115  hAllocation->BlockAllocUnmap();
8116  pBlock->Unmap(this, 1);
8117  }
8118  break;
8119  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8120  hAllocation->DedicatedAllocUnmap(this);
8121  break;
8122  default:
8123  VMA_ASSERT(0);
8124  }
8125 }
8126 
8127 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
8128 {
8129  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
8130 
8131  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
8132  {
8133  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8134  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
8135  VMA_ASSERT(pDedicatedAllocations);
8136  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
8137  VMA_ASSERT(success);
8138  }
8139 
8140  VkDeviceMemory hMemory = allocation->GetMemory();
8141 
8142  if(allocation->GetMappedData() != VMA_NULL)
8143  {
8144  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
8145  }
8146 
8147  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
8148 
8149  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
8150 }
8151 
8152 #if VMA_STATS_STRING_ENABLED
8153 
8154 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
8155 {
8156  bool dedicatedAllocationsStarted = false;
8157  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8158  {
8159  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8160  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
8161  VMA_ASSERT(pDedicatedAllocVector);
8162  if(pDedicatedAllocVector->empty() == false)
8163  {
8164  if(dedicatedAllocationsStarted == false)
8165  {
8166  dedicatedAllocationsStarted = true;
8167  json.WriteString("DedicatedAllocations");
8168  json.BeginObject();
8169  }
8170 
8171  json.BeginString("Type ");
8172  json.ContinueString(memTypeIndex);
8173  json.EndString();
8174 
8175  json.BeginArray();
8176 
8177  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
8178  {
8179  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
8180  json.BeginObject(true);
8181 
8182  json.WriteString("Type");
8183  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
8184 
8185  json.WriteString("Size");
8186  json.WriteNumber(hAlloc->GetSize());
8187 
8188  const void* pUserData = hAlloc->GetUserData();
8189  if(pUserData != VMA_NULL)
8190  {
8191  json.WriteString("UserData");
8192  if(hAlloc->IsUserDataString())
8193  {
8194  json.WriteString((const char*)pUserData);
8195  }
8196  else
8197  {
8198  json.BeginString();
8199  json.ContinueString_Pointer(pUserData);
8200  json.EndString();
8201  }
8202  }
8203 
8204  json.EndObject();
8205  }
8206 
8207  json.EndArray();
8208  }
8209  }
8210  if(dedicatedAllocationsStarted)
8211  {
8212  json.EndObject();
8213  }
8214 
8215  {
8216  bool allocationsStarted = false;
8217  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8218  {
8219  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
8220  {
8221  if(allocationsStarted == false)
8222  {
8223  allocationsStarted = true;
8224  json.WriteString("DefaultPools");
8225  json.BeginObject();
8226  }
8227 
8228  json.BeginString("Type ");
8229  json.ContinueString(memTypeIndex);
8230  json.EndString();
8231 
8232  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
8233  }
8234  }
8235  if(allocationsStarted)
8236  {
8237  json.EndObject();
8238  }
8239  }
8240 
8241  {
8242  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8243  const size_t poolCount = m_Pools.size();
8244  if(poolCount > 0)
8245  {
8246  json.WriteString("Pools");
8247  json.BeginArray();
8248  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
8249  {
8250  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
8251  }
8252  json.EndArray();
8253  }
8254  }
8255 }
8256 
8257 #endif // #if VMA_STATS_STRING_ENABLED
8258 
8259 static VkResult AllocateMemoryForImage(
8260  VmaAllocator allocator,
8261  VkImage image,
8262  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8263  VmaSuballocationType suballocType,
8264  VmaAllocation* pAllocation)
8265 {
8266  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
8267 
8268  VkMemoryRequirements vkMemReq = {};
8269  bool requiresDedicatedAllocation = false;
8270  bool prefersDedicatedAllocation = false;
8271  allocator->GetImageMemoryRequirements(image, vkMemReq,
8272  requiresDedicatedAllocation, prefersDedicatedAllocation);
8273 
8274  return allocator->AllocateMemory(
8275  vkMemReq,
8276  requiresDedicatedAllocation,
8277  prefersDedicatedAllocation,
8278  VK_NULL_HANDLE, // dedicatedBuffer
8279  image, // dedicatedImage
8280  *pAllocationCreateInfo,
8281  suballocType,
8282  pAllocation);
8283 }
8284 
8286 // Public interface
8287 
8288 VkResult vmaCreateAllocator(
8289  const VmaAllocatorCreateInfo* pCreateInfo,
8290  VmaAllocator* pAllocator)
8291 {
8292  VMA_ASSERT(pCreateInfo && pAllocator);
8293  VMA_DEBUG_LOG("vmaCreateAllocator");
8294  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
8295  return VK_SUCCESS;
8296 }
8297 
8298 void vmaDestroyAllocator(
8299  VmaAllocator allocator)
8300 {
8301  if(allocator != VK_NULL_HANDLE)
8302  {
8303  VMA_DEBUG_LOG("vmaDestroyAllocator");
8304  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
8305  vma_delete(&allocationCallbacks, allocator);
8306  }
8307 }
8308 
8310  VmaAllocator allocator,
8311  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
8312 {
8313  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
8314  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
8315 }
8316 
8318  VmaAllocator allocator,
8319  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
8320 {
8321  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
8322  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
8323 }
8324 
8326  VmaAllocator allocator,
8327  uint32_t memoryTypeIndex,
8328  VkMemoryPropertyFlags* pFlags)
8329 {
8330  VMA_ASSERT(allocator && pFlags);
8331  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
8332  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
8333 }
8334 
8336  VmaAllocator allocator,
8337  uint32_t frameIndex)
8338 {
8339  VMA_ASSERT(allocator);
8340  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
8341 
8342  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8343 
8344  allocator->SetCurrentFrameIndex(frameIndex);
8345 }
8346 
8347 void vmaCalculateStats(
8348  VmaAllocator allocator,
8349  VmaStats* pStats)
8350 {
8351  VMA_ASSERT(allocator && pStats);
8352  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8353  allocator->CalculateStats(pStats);
8354 }
8355 
8356 #if VMA_STATS_STRING_ENABLED
8357 
8358 void vmaBuildStatsString(
8359  VmaAllocator allocator,
8360  char** ppStatsString,
8361  VkBool32 detailedMap)
8362 {
8363  VMA_ASSERT(allocator && ppStatsString);
8364  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8365 
8366  VmaStringBuilder sb(allocator);
8367  {
8368  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
8369  json.BeginObject();
8370 
8371  VmaStats stats;
8372  allocator->CalculateStats(&stats);
8373 
8374  json.WriteString("Total");
8375  VmaPrintStatInfo(json, stats.total);
8376 
8377  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
8378  {
8379  json.BeginString("Heap ");
8380  json.ContinueString(heapIndex);
8381  json.EndString();
8382  json.BeginObject();
8383 
8384  json.WriteString("Size");
8385  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
8386 
8387  json.WriteString("Flags");
8388  json.BeginArray(true);
8389  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
8390  {
8391  json.WriteString("DEVICE_LOCAL");
8392  }
8393  json.EndArray();
8394 
8395  if(stats.memoryHeap[heapIndex].blockCount > 0)
8396  {
8397  json.WriteString("Stats");
8398  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
8399  }
8400 
8401  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
8402  {
8403  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
8404  {
8405  json.BeginString("Type ");
8406  json.ContinueString(typeIndex);
8407  json.EndString();
8408 
8409  json.BeginObject();
8410 
8411  json.WriteString("Flags");
8412  json.BeginArray(true);
8413  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
8414  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
8415  {
8416  json.WriteString("DEVICE_LOCAL");
8417  }
8418  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8419  {
8420  json.WriteString("HOST_VISIBLE");
8421  }
8422  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
8423  {
8424  json.WriteString("HOST_COHERENT");
8425  }
8426  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
8427  {
8428  json.WriteString("HOST_CACHED");
8429  }
8430  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
8431  {
8432  json.WriteString("LAZILY_ALLOCATED");
8433  }
8434  json.EndArray();
8435 
8436  if(stats.memoryType[typeIndex].blockCount > 0)
8437  {
8438  json.WriteString("Stats");
8439  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
8440  }
8441 
8442  json.EndObject();
8443  }
8444  }
8445 
8446  json.EndObject();
8447  }
8448  if(detailedMap == VK_TRUE)
8449  {
8450  allocator->PrintDetailedMap(json);
8451  }
8452 
8453  json.EndObject();
8454  }
8455 
8456  const size_t len = sb.GetLength();
8457  char* const pChars = vma_new_array(allocator, char, len + 1);
8458  if(len > 0)
8459  {
8460  memcpy(pChars, sb.GetData(), len);
8461  }
8462  pChars[len] = '\0';
8463  *ppStatsString = pChars;
8464 }
8465 
8466 void vmaFreeStatsString(
8467  VmaAllocator allocator,
8468  char* pStatsString)
8469 {
8470  if(pStatsString != VMA_NULL)
8471  {
8472  VMA_ASSERT(allocator);
8473  size_t len = strlen(pStatsString);
8474  vma_delete_array(allocator, pStatsString, len + 1);
8475  }
8476 }
8477 
8478 #endif // #if VMA_STATS_STRING_ENABLED
8479 
8480 /*
8481 This function is not protected by any mutex because it just reads immutable data.
8482 */
8483 VkResult vmaFindMemoryTypeIndex(
8484  VmaAllocator allocator,
8485  uint32_t memoryTypeBits,
8486  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8487  uint32_t* pMemoryTypeIndex)
8488 {
8489  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8490  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8491  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8492 
8493  if(pAllocationCreateInfo->memoryTypeBits != 0)
8494  {
8495  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
8496  }
8497 
8498  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
8499  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
8500 
8501  // Convert usage to requiredFlags and preferredFlags.
8502  switch(pAllocationCreateInfo->usage)
8503  {
8505  break;
8507  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8508  break;
8510  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
8511  break;
8513  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8514  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8515  break;
8517  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8518  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
8519  break;
8520  default:
8521  break;
8522  }
8523 
8524  *pMemoryTypeIndex = UINT32_MAX;
8525  uint32_t minCost = UINT32_MAX;
8526  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
8527  memTypeIndex < allocator->GetMemoryTypeCount();
8528  ++memTypeIndex, memTypeBit <<= 1)
8529  {
8530  // This memory type is acceptable according to memoryTypeBits bitmask.
8531  if((memTypeBit & memoryTypeBits) != 0)
8532  {
8533  const VkMemoryPropertyFlags currFlags =
8534  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
8535  // This memory type contains requiredFlags.
8536  if((requiredFlags & ~currFlags) == 0)
8537  {
8538  // Calculate cost as number of bits from preferredFlags not present in this memory type.
8539  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
8540  // Remember memory type with lowest cost.
8541  if(currCost < minCost)
8542  {
8543  *pMemoryTypeIndex = memTypeIndex;
8544  if(currCost == 0)
8545  {
8546  return VK_SUCCESS;
8547  }
8548  minCost = currCost;
8549  }
8550  }
8551  }
8552  }
8553  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
8554 }
8555 
8557  VmaAllocator allocator,
8558  const VkBufferCreateInfo* pBufferCreateInfo,
8559  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8560  uint32_t* pMemoryTypeIndex)
8561 {
8562  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8563  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
8564  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8565  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8566 
8567  const VkDevice hDev = allocator->m_hDevice;
8568  VkBuffer hBuffer = VK_NULL_HANDLE;
8569  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
8570  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
8571  if(res == VK_SUCCESS)
8572  {
8573  VkMemoryRequirements memReq = {};
8574  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
8575  hDev, hBuffer, &memReq);
8576 
8577  res = vmaFindMemoryTypeIndex(
8578  allocator,
8579  memReq.memoryTypeBits,
8580  pAllocationCreateInfo,
8581  pMemoryTypeIndex);
8582 
8583  allocator->GetVulkanFunctions().vkDestroyBuffer(
8584  hDev, hBuffer, allocator->GetAllocationCallbacks());
8585  }
8586  return res;
8587 }
8588 
8590  VmaAllocator allocator,
8591  const VkImageCreateInfo* pImageCreateInfo,
8592  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8593  uint32_t* pMemoryTypeIndex)
8594 {
8595  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8596  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
8597  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8598  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8599 
8600  const VkDevice hDev = allocator->m_hDevice;
8601  VkImage hImage = VK_NULL_HANDLE;
8602  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
8603  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
8604  if(res == VK_SUCCESS)
8605  {
8606  VkMemoryRequirements memReq = {};
8607  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
8608  hDev, hImage, &memReq);
8609 
8610  res = vmaFindMemoryTypeIndex(
8611  allocator,
8612  memReq.memoryTypeBits,
8613  pAllocationCreateInfo,
8614  pMemoryTypeIndex);
8615 
8616  allocator->GetVulkanFunctions().vkDestroyImage(
8617  hDev, hImage, allocator->GetAllocationCallbacks());
8618  }
8619  return res;
8620 }
8621 
8622 VkResult vmaCreatePool(
8623  VmaAllocator allocator,
8624  const VmaPoolCreateInfo* pCreateInfo,
8625  VmaPool* pPool)
8626 {
8627  VMA_ASSERT(allocator && pCreateInfo && pPool);
8628 
8629  VMA_DEBUG_LOG("vmaCreatePool");
8630 
8631  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8632 
8633  return allocator->CreatePool(pCreateInfo, pPool);
8634 }
8635 
8636 void vmaDestroyPool(
8637  VmaAllocator allocator,
8638  VmaPool pool)
8639 {
8640  VMA_ASSERT(allocator);
8641 
8642  if(pool == VK_NULL_HANDLE)
8643  {
8644  return;
8645  }
8646 
8647  VMA_DEBUG_LOG("vmaDestroyPool");
8648 
8649  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8650 
8651  allocator->DestroyPool(pool);
8652 }
8653 
8654 void vmaGetPoolStats(
8655  VmaAllocator allocator,
8656  VmaPool pool,
8657  VmaPoolStats* pPoolStats)
8658 {
8659  VMA_ASSERT(allocator && pool && pPoolStats);
8660 
8661  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8662 
8663  allocator->GetPoolStats(pool, pPoolStats);
8664 }
8665 
8667  VmaAllocator allocator,
8668  VmaPool pool,
8669  size_t* pLostAllocationCount)
8670 {
8671  VMA_ASSERT(allocator && pool);
8672 
8673  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8674 
8675  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
8676 }
8677 
8678 VkResult vmaAllocateMemory(
8679  VmaAllocator allocator,
8680  const VkMemoryRequirements* pVkMemoryRequirements,
8681  const VmaAllocationCreateInfo* pCreateInfo,
8682  VmaAllocation* pAllocation,
8683  VmaAllocationInfo* pAllocationInfo)
8684 {
8685  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
8686 
8687  VMA_DEBUG_LOG("vmaAllocateMemory");
8688 
8689  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8690 
8691  VkResult result = allocator->AllocateMemory(
8692  *pVkMemoryRequirements,
8693  false, // requiresDedicatedAllocation
8694  false, // prefersDedicatedAllocation
8695  VK_NULL_HANDLE, // dedicatedBuffer
8696  VK_NULL_HANDLE, // dedicatedImage
8697  *pCreateInfo,
8698  VMA_SUBALLOCATION_TYPE_UNKNOWN,
8699  pAllocation);
8700 
8701  if(pAllocationInfo && result == VK_SUCCESS)
8702  {
8703  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8704  }
8705 
8706  return result;
8707 }
8708 
8710  VmaAllocator allocator,
8711  VkBuffer buffer,
8712  const VmaAllocationCreateInfo* pCreateInfo,
8713  VmaAllocation* pAllocation,
8714  VmaAllocationInfo* pAllocationInfo)
8715 {
8716  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8717 
8718  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
8719 
8720  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8721 
8722  VkMemoryRequirements vkMemReq = {};
8723  bool requiresDedicatedAllocation = false;
8724  bool prefersDedicatedAllocation = false;
8725  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
8726  requiresDedicatedAllocation,
8727  prefersDedicatedAllocation);
8728 
8729  VkResult result = allocator->AllocateMemory(
8730  vkMemReq,
8731  requiresDedicatedAllocation,
8732  prefersDedicatedAllocation,
8733  buffer, // dedicatedBuffer
8734  VK_NULL_HANDLE, // dedicatedImage
8735  *pCreateInfo,
8736  VMA_SUBALLOCATION_TYPE_BUFFER,
8737  pAllocation);
8738 
8739  if(pAllocationInfo && result == VK_SUCCESS)
8740  {
8741  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8742  }
8743 
8744  return result;
8745 }
8746 
8747 VkResult vmaAllocateMemoryForImage(
8748  VmaAllocator allocator,
8749  VkImage image,
8750  const VmaAllocationCreateInfo* pCreateInfo,
8751  VmaAllocation* pAllocation,
8752  VmaAllocationInfo* pAllocationInfo)
8753 {
8754  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8755 
8756  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
8757 
8758  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8759 
8760  VkResult result = AllocateMemoryForImage(
8761  allocator,
8762  image,
8763  pCreateInfo,
8764  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
8765  pAllocation);
8766 
8767  if(pAllocationInfo && result == VK_SUCCESS)
8768  {
8769  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8770  }
8771 
8772  return result;
8773 }
8774 
8775 void vmaFreeMemory(
8776  VmaAllocator allocator,
8777  VmaAllocation allocation)
8778 {
8779  VMA_ASSERT(allocator && allocation);
8780 
8781  VMA_DEBUG_LOG("vmaFreeMemory");
8782 
8783  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8784 
8785  allocator->FreeMemory(allocation);
8786 }
8787 
8789  VmaAllocator allocator,
8790  VmaAllocation allocation,
8791  VmaAllocationInfo* pAllocationInfo)
8792 {
8793  VMA_ASSERT(allocator && allocation && pAllocationInfo);
8794 
8795  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8796 
8797  allocator->GetAllocationInfo(allocation, pAllocationInfo);
8798 }
8799 
8800 VkBool32 vmaTouchAllocation(
8801  VmaAllocator allocator,
8802  VmaAllocation allocation)
8803 {
8804  VMA_ASSERT(allocator && allocation);
8805 
8806  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8807 
8808  return allocator->TouchAllocation(allocation);
8809 }
8810 
8812  VmaAllocator allocator,
8813  VmaAllocation allocation,
8814  void* pUserData)
8815 {
8816  VMA_ASSERT(allocator && allocation);
8817 
8818  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8819 
8820  allocation->SetUserData(allocator, pUserData);
8821 }
8822 
8824  VmaAllocator allocator,
8825  VmaAllocation* pAllocation)
8826 {
8827  VMA_ASSERT(allocator && pAllocation);
8828 
8829  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
8830 
8831  allocator->CreateLostAllocation(pAllocation);
8832 }
8833 
8834 VkResult vmaMapMemory(
8835  VmaAllocator allocator,
8836  VmaAllocation allocation,
8837  void** ppData)
8838 {
8839  VMA_ASSERT(allocator && allocation && ppData);
8840 
8841  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8842 
8843  return allocator->Map(allocation, ppData);
8844 }
8845 
8846 void vmaUnmapMemory(
8847  VmaAllocator allocator,
8848  VmaAllocation allocation)
8849 {
8850  VMA_ASSERT(allocator && allocation);
8851 
8852  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8853 
8854  allocator->Unmap(allocation);
8855 }
8856 
8857 VkResult vmaDefragment(
8858  VmaAllocator allocator,
8859  VmaAllocation* pAllocations,
8860  size_t allocationCount,
8861  VkBool32* pAllocationsChanged,
8862  const VmaDefragmentationInfo *pDefragmentationInfo,
8863  VmaDefragmentationStats* pDefragmentationStats)
8864 {
8865  VMA_ASSERT(allocator && pAllocations);
8866 
8867  VMA_DEBUG_LOG("vmaDefragment");
8868 
8869  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8870 
8871  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
8872 }
8873 
8874 VkResult vmaCreateBuffer(
8875  VmaAllocator allocator,
8876  const VkBufferCreateInfo* pBufferCreateInfo,
8877  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8878  VkBuffer* pBuffer,
8879  VmaAllocation* pAllocation,
8880  VmaAllocationInfo* pAllocationInfo)
8881 {
8882  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
8883 
8884  VMA_DEBUG_LOG("vmaCreateBuffer");
8885 
8886  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8887 
8888  *pBuffer = VK_NULL_HANDLE;
8889  *pAllocation = VK_NULL_HANDLE;
8890 
8891  // 1. Create VkBuffer.
8892  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
8893  allocator->m_hDevice,
8894  pBufferCreateInfo,
8895  allocator->GetAllocationCallbacks(),
8896  pBuffer);
8897  if(res >= 0)
8898  {
8899  // 2. vkGetBufferMemoryRequirements.
8900  VkMemoryRequirements vkMemReq = {};
8901  bool requiresDedicatedAllocation = false;
8902  bool prefersDedicatedAllocation = false;
8903  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
8904  requiresDedicatedAllocation, prefersDedicatedAllocation);
8905 
8906  // Make sure alignment requirements for specific buffer usages reported
8907  // in Physical Device Properties are included in alignment reported by memory requirements.
8908  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
8909  {
8910  VMA_ASSERT(vkMemReq.alignment %
8911  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
8912  }
8913  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
8914  {
8915  VMA_ASSERT(vkMemReq.alignment %
8916  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
8917  }
8918  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
8919  {
8920  VMA_ASSERT(vkMemReq.alignment %
8921  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
8922  }
8923 
8924  // 3. Allocate memory using allocator.
8925  res = allocator->AllocateMemory(
8926  vkMemReq,
8927  requiresDedicatedAllocation,
8928  prefersDedicatedAllocation,
8929  *pBuffer, // dedicatedBuffer
8930  VK_NULL_HANDLE, // dedicatedImage
8931  *pAllocationCreateInfo,
8932  VMA_SUBALLOCATION_TYPE_BUFFER,
8933  pAllocation);
8934  if(res >= 0)
8935  {
8936  // 3. Bind buffer with memory.
8937  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
8938  allocator->m_hDevice,
8939  *pBuffer,
8940  (*pAllocation)->GetMemory(),
8941  (*pAllocation)->GetOffset());
8942  if(res >= 0)
8943  {
8944  // All steps succeeded.
8945  if(pAllocationInfo != VMA_NULL)
8946  {
8947  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8948  }
8949  return VK_SUCCESS;
8950  }
8951  allocator->FreeMemory(*pAllocation);
8952  *pAllocation = VK_NULL_HANDLE;
8953  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8954  *pBuffer = VK_NULL_HANDLE;
8955  return res;
8956  }
8957  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8958  *pBuffer = VK_NULL_HANDLE;
8959  return res;
8960  }
8961  return res;
8962 }
8963 
8964 void vmaDestroyBuffer(
8965  VmaAllocator allocator,
8966  VkBuffer buffer,
8967  VmaAllocation allocation)
8968 {
8969  if(buffer != VK_NULL_HANDLE)
8970  {
8971  VMA_ASSERT(allocator);
8972 
8973  VMA_DEBUG_LOG("vmaDestroyBuffer");
8974 
8975  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8976 
8977  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
8978 
8979  allocator->FreeMemory(allocation);
8980  }
8981 }
8982 
8983 VkResult vmaCreateImage(
8984  VmaAllocator allocator,
8985  const VkImageCreateInfo* pImageCreateInfo,
8986  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8987  VkImage* pImage,
8988  VmaAllocation* pAllocation,
8989  VmaAllocationInfo* pAllocationInfo)
8990 {
8991  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
8992 
8993  VMA_DEBUG_LOG("vmaCreateImage");
8994 
8995  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8996 
8997  *pImage = VK_NULL_HANDLE;
8998  *pAllocation = VK_NULL_HANDLE;
8999 
9000  // 1. Create VkImage.
9001  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
9002  allocator->m_hDevice,
9003  pImageCreateInfo,
9004  allocator->GetAllocationCallbacks(),
9005  pImage);
9006  if(res >= 0)
9007  {
9008  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
9009  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
9010  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
9011 
9012  // 2. Allocate memory using allocator.
9013  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
9014  if(res >= 0)
9015  {
9016  // 3. Bind image with memory.
9017  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
9018  allocator->m_hDevice,
9019  *pImage,
9020  (*pAllocation)->GetMemory(),
9021  (*pAllocation)->GetOffset());
9022  if(res >= 0)
9023  {
9024  // All steps succeeded.
9025  if(pAllocationInfo != VMA_NULL)
9026  {
9027  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9028  }
9029  return VK_SUCCESS;
9030  }
9031  allocator->FreeMemory(*pAllocation);
9032  *pAllocation = VK_NULL_HANDLE;
9033  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
9034  *pImage = VK_NULL_HANDLE;
9035  return res;
9036  }
9037  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
9038  *pImage = VK_NULL_HANDLE;
9039  return res;
9040  }
9041  return res;
9042 }
9043 
9044 void vmaDestroyImage(
9045  VmaAllocator allocator,
9046  VkImage image,
9047  VmaAllocation allocation)
9048 {
9049  if(image != VK_NULL_HANDLE)
9050  {
9051  VMA_ASSERT(allocator);
9052 
9053  VMA_DEBUG_LOG("vmaDestroyImage");
9054 
9055  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9056 
9057  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
9058 
9059  allocator->FreeMemory(allocation);
9060  }
9061 }
9062 
9063 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1004
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1258
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1029
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1014
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1215
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1008
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1564
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1026
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1763
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1434
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1488
Definition: vk_mem_alloc.h:1295
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:997
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1333
Definition: vk_mem_alloc.h:1242
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1038
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1091
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1023
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1246
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1156
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1011
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1155
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
Definition: vk_mem_alloc.h:1019
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1767
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1055
VmaStatInfo total
Definition: vk_mem_alloc.h:1165
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1775
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1317
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1758
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1012
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:939
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1032
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1442
Definition: vk_mem_alloc.h:1436
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1574
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1009
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1354
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1458
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1494
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:995
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1445
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaMemoryUsage
Definition: vk_mem_alloc.h:1193
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1753
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1771
Definition: vk_mem_alloc.h:1232
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1341
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1010
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1161
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:945
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:966
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:971
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1773
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1328
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1504
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1005
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1144
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1453
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:958
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1302
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1157
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:962
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1448
Definition: vk_mem_alloc.h:1241
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1323
Definition: vk_mem_alloc.h:1314
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1147
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1007
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:1466
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1041
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1497
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1312
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1347
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1079
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1163
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1282
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1156
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1016
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:960
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1015
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1480
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1588
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1035
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1156
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1153
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1485
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1569
Definition: vk_mem_alloc.h:1310
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1769
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1003
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
Definition: vk_mem_alloc.h:1018
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1151
Definition: vk_mem_alloc.h:1198
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1438
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1149
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1013
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1017
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1269
Definition: vk_mem_alloc.h:1225
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1583
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:993
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1006
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1550
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1416
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1157
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1164
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1491
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1157
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1555