Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
943 #include <vulkan/vulkan.h>
944 
945 VK_DEFINE_HANDLE(VmaAllocator)
946 
947 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
949  VmaAllocator allocator,
950  uint32_t memoryType,
951  VkDeviceMemory memory,
952  VkDeviceSize size);
954 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
955  VmaAllocator allocator,
956  uint32_t memoryType,
957  VkDeviceMemory memory,
958  VkDeviceSize size);
959 
967 typedef struct VmaDeviceMemoryCallbacks {
973 
1003 
1006 typedef VkFlags VmaAllocatorCreateFlags;
1007 
1012 typedef struct VmaVulkanFunctions {
1013  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1014  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1015  PFN_vkAllocateMemory vkAllocateMemory;
1016  PFN_vkFreeMemory vkFreeMemory;
1017  PFN_vkMapMemory vkMapMemory;
1018  PFN_vkUnmapMemory vkUnmapMemory;
1019  PFN_vkBindBufferMemory vkBindBufferMemory;
1020  PFN_vkBindImageMemory vkBindImageMemory;
1021  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1022  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1023  PFN_vkCreateBuffer vkCreateBuffer;
1024  PFN_vkDestroyBuffer vkDestroyBuffer;
1025  PFN_vkCreateImage vkCreateImage;
1026  PFN_vkDestroyImage vkDestroyImage;
1027  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1028  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1030 
1033 {
1035  VmaAllocatorCreateFlags flags;
1037 
1038  VkPhysicalDevice physicalDevice;
1040 
1041  VkDevice device;
1043 
1046 
1047  const VkAllocationCallbacks* pAllocationCallbacks;
1049 
1088  const VkDeviceSize* pHeapSizeLimit;
1102 
1104 VkResult vmaCreateAllocator(
1105  const VmaAllocatorCreateInfo* pCreateInfo,
1106  VmaAllocator* pAllocator);
1107 
1109 void vmaDestroyAllocator(
1110  VmaAllocator allocator);
1111 
1117  VmaAllocator allocator,
1118  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1119 
1125  VmaAllocator allocator,
1126  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1127 
1135  VmaAllocator allocator,
1136  uint32_t memoryTypeIndex,
1137  VkMemoryPropertyFlags* pFlags);
1138 
1148  VmaAllocator allocator,
1149  uint32_t frameIndex);
1150 
1153 typedef struct VmaStatInfo
1154 {
1156  uint32_t blockCount;
1162  VkDeviceSize usedBytes;
1164  VkDeviceSize unusedBytes;
1165  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1166  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1167 } VmaStatInfo;
1168 
1170 typedef struct VmaStats
1171 {
1172  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1173  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1175 } VmaStats;
1176 
1178 void vmaCalculateStats(
1179  VmaAllocator allocator,
1180  VmaStats* pStats);
1181 
1182 #define VMA_STATS_STRING_ENABLED 1
1183 
1184 #if VMA_STATS_STRING_ENABLED
1185 
1187 
1189 void vmaBuildStatsString(
1190  VmaAllocator allocator,
1191  char** ppStatsString,
1192  VkBool32 detailedMap);
1193 
1194 void vmaFreeStatsString(
1195  VmaAllocator allocator,
1196  char* pStatsString);
1197 
1198 #endif // #if VMA_STATS_STRING_ENABLED
1199 
1200 VK_DEFINE_HANDLE(VmaPool)
1201 
1202 typedef enum VmaMemoryUsage
1203 {
1252 } VmaMemoryUsage;
1253 
1268 
1318 
1322 
1324 {
1326  VmaAllocationCreateFlags flags;
1337  VkMemoryPropertyFlags requiredFlags;
1342  VkMemoryPropertyFlags preferredFlags;
1350  uint32_t memoryTypeBits;
1356  VmaPool pool;
1363  void* pUserData;
1365 
1382 VkResult vmaFindMemoryTypeIndex(
1383  VmaAllocator allocator,
1384  uint32_t memoryTypeBits,
1385  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1386  uint32_t* pMemoryTypeIndex);
1387 
1401  VmaAllocator allocator,
1402  const VkBufferCreateInfo* pBufferCreateInfo,
1403  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1404  uint32_t* pMemoryTypeIndex);
1405 
1419  VmaAllocator allocator,
1420  const VkImageCreateInfo* pImageCreateInfo,
1421  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1422  uint32_t* pMemoryTypeIndex);
1423 
1444 
1447 typedef VkFlags VmaPoolCreateFlags;
1448 
1451 typedef struct VmaPoolCreateInfo {
1457  VmaPoolCreateFlags flags;
1462  VkDeviceSize blockSize;
1491 
1494 typedef struct VmaPoolStats {
1497  VkDeviceSize size;
1500  VkDeviceSize unusedSize;
1513  VkDeviceSize unusedRangeSizeMax;
1514 } VmaPoolStats;
1515 
1522 VkResult vmaCreatePool(
1523  VmaAllocator allocator,
1524  const VmaPoolCreateInfo* pCreateInfo,
1525  VmaPool* pPool);
1526 
1529 void vmaDestroyPool(
1530  VmaAllocator allocator,
1531  VmaPool pool);
1532 
1539 void vmaGetPoolStats(
1540  VmaAllocator allocator,
1541  VmaPool pool,
1542  VmaPoolStats* pPoolStats);
1543 
1551  VmaAllocator allocator,
1552  VmaPool pool,
1553  size_t* pLostAllocationCount);
1554 
1555 VK_DEFINE_HANDLE(VmaAllocation)
1556 
1557 
1559 typedef struct VmaAllocationInfo {
1564  uint32_t memoryType;
1573  VkDeviceMemory deviceMemory;
1578  VkDeviceSize offset;
1583  VkDeviceSize size;
1597  void* pUserData;
1599 
1610 VkResult vmaAllocateMemory(
1611  VmaAllocator allocator,
1612  const VkMemoryRequirements* pVkMemoryRequirements,
1613  const VmaAllocationCreateInfo* pCreateInfo,
1614  VmaAllocation* pAllocation,
1615  VmaAllocationInfo* pAllocationInfo);
1616 
1624  VmaAllocator allocator,
1625  VkBuffer buffer,
1626  const VmaAllocationCreateInfo* pCreateInfo,
1627  VmaAllocation* pAllocation,
1628  VmaAllocationInfo* pAllocationInfo);
1629 
1631 VkResult vmaAllocateMemoryForImage(
1632  VmaAllocator allocator,
1633  VkImage image,
1634  const VmaAllocationCreateInfo* pCreateInfo,
1635  VmaAllocation* pAllocation,
1636  VmaAllocationInfo* pAllocationInfo);
1637 
1639 void vmaFreeMemory(
1640  VmaAllocator allocator,
1641  VmaAllocation allocation);
1642 
1660  VmaAllocator allocator,
1661  VmaAllocation allocation,
1662  VmaAllocationInfo* pAllocationInfo);
1663 
1678 VkBool32 vmaTouchAllocation(
1679  VmaAllocator allocator,
1680  VmaAllocation allocation);
1681 
1696  VmaAllocator allocator,
1697  VmaAllocation allocation,
1698  void* pUserData);
1699 
1711  VmaAllocator allocator,
1712  VmaAllocation* pAllocation);
1713 
1748 VkResult vmaMapMemory(
1749  VmaAllocator allocator,
1750  VmaAllocation allocation,
1751  void** ppData);
1752 
1757 void vmaUnmapMemory(
1758  VmaAllocator allocator,
1759  VmaAllocation allocation);
1760 
1762 typedef struct VmaDefragmentationInfo {
1767  VkDeviceSize maxBytesToMove;
1774 
1776 typedef struct VmaDefragmentationStats {
1778  VkDeviceSize bytesMoved;
1780  VkDeviceSize bytesFreed;
1786 
1869 VkResult vmaDefragment(
1870  VmaAllocator allocator,
1871  VmaAllocation* pAllocations,
1872  size_t allocationCount,
1873  VkBool32* pAllocationsChanged,
1874  const VmaDefragmentationInfo *pDefragmentationInfo,
1875  VmaDefragmentationStats* pDefragmentationStats);
1876 
1903 VkResult vmaCreateBuffer(
1904  VmaAllocator allocator,
1905  const VkBufferCreateInfo* pBufferCreateInfo,
1906  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1907  VkBuffer* pBuffer,
1908  VmaAllocation* pAllocation,
1909  VmaAllocationInfo* pAllocationInfo);
1910 
1922 void vmaDestroyBuffer(
1923  VmaAllocator allocator,
1924  VkBuffer buffer,
1925  VmaAllocation allocation);
1926 
1928 VkResult vmaCreateImage(
1929  VmaAllocator allocator,
1930  const VkImageCreateInfo* pImageCreateInfo,
1931  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1932  VkImage* pImage,
1933  VmaAllocation* pAllocation,
1934  VmaAllocationInfo* pAllocationInfo);
1935 
1947 void vmaDestroyImage(
1948  VmaAllocator allocator,
1949  VkImage image,
1950  VmaAllocation allocation);
1951 
1952 #ifdef __cplusplus
1953 }
1954 #endif
1955 
1956 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1957 
1958 // For Visual Studio IntelliSense.
1959 #ifdef __INTELLISENSE__
1960 #define VMA_IMPLEMENTATION
1961 #endif
1962 
1963 #ifdef VMA_IMPLEMENTATION
1964 #undef VMA_IMPLEMENTATION
1965 
1966 #include <cstdint>
1967 #include <cstdlib>
1968 #include <cstring>
1969 
1970 /*******************************************************************************
1971 CONFIGURATION SECTION
1972 
1973 Define some of these macros before each #include of this header or change them
1974 here if you need other then default behavior depending on your environment.
1975 */
1976 
1977 /*
1978 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1979 internally, like:
1980 
1981  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1982 
1983 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1984 VmaAllocatorCreateInfo::pVulkanFunctions.
1985 */
1986 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
1987 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1988 #endif
1989 
1990 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1991 //#define VMA_USE_STL_CONTAINERS 1
1992 
1993 /* Set this macro to 1 to make the library including and using STL containers:
1994 std::pair, std::vector, std::list, std::unordered_map.
1995 
1996 Set it to 0 or undefined to make the library using its own implementation of
1997 the containers.
1998 */
1999 #if VMA_USE_STL_CONTAINERS
2000  #define VMA_USE_STL_VECTOR 1
2001  #define VMA_USE_STL_UNORDERED_MAP 1
2002  #define VMA_USE_STL_LIST 1
2003 #endif
2004 
2005 #if VMA_USE_STL_VECTOR
2006  #include <vector>
2007 #endif
2008 
2009 #if VMA_USE_STL_UNORDERED_MAP
2010  #include <unordered_map>
2011 #endif
2012 
2013 #if VMA_USE_STL_LIST
2014  #include <list>
2015 #endif
2016 
2017 /*
2018 Following headers are used in this CONFIGURATION section only, so feel free to
2019 remove them if not needed.
2020 */
2021 #include <cassert> // for assert
2022 #include <algorithm> // for min, max
2023 #include <mutex> // for std::mutex
2024 #include <atomic> // for std::atomic
2025 
2026 #if !defined(_WIN32) && !defined(__APPLE__)
2027  #include <malloc.h> // for aligned_alloc()
2028 #endif
2029 
2030 #ifndef VMA_NULL
2031  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2032  #define VMA_NULL nullptr
2033 #endif
2034 
2035 #if defined(__APPLE__) || defined(__ANDROID__)
2036 #include <cstdlib>
2037 void *aligned_alloc(size_t alignment, size_t size)
2038 {
2039  // alignment must be >= sizeof(void*)
2040  if(alignment < sizeof(void*))
2041  {
2042  alignment = sizeof(void*);
2043  }
2044 
2045  void *pointer;
2046  if(posix_memalign(&pointer, alignment, size) == 0)
2047  return pointer;
2048  return VMA_NULL;
2049 }
2050 #endif
2051 
2052 // Normal assert to check for programmer's errors, especially in Debug configuration.
2053 #ifndef VMA_ASSERT
2054  #ifdef _DEBUG
2055  #define VMA_ASSERT(expr) assert(expr)
2056  #else
2057  #define VMA_ASSERT(expr)
2058  #endif
2059 #endif
2060 
2061 // Assert that will be called very often, like inside data structures e.g. operator[].
2062 // Making it non-empty can make program slow.
2063 #ifndef VMA_HEAVY_ASSERT
2064  #ifdef _DEBUG
2065  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2066  #else
2067  #define VMA_HEAVY_ASSERT(expr)
2068  #endif
2069 #endif
2070 
2071 #ifndef VMA_ALIGN_OF
2072  #define VMA_ALIGN_OF(type) (__alignof(type))
2073 #endif
2074 
2075 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2076  #if defined(_WIN32)
2077  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2078  #else
2079  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2080  #endif
2081 #endif
2082 
2083 #ifndef VMA_SYSTEM_FREE
2084  #if defined(_WIN32)
2085  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2086  #else
2087  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2088  #endif
2089 #endif
2090 
2091 #ifndef VMA_MIN
2092  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2093 #endif
2094 
2095 #ifndef VMA_MAX
2096  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2097 #endif
2098 
2099 #ifndef VMA_SWAP
2100  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2101 #endif
2102 
2103 #ifndef VMA_SORT
2104  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2105 #endif
2106 
2107 #ifndef VMA_DEBUG_LOG
2108  #define VMA_DEBUG_LOG(format, ...)
2109  /*
2110  #define VMA_DEBUG_LOG(format, ...) do { \
2111  printf(format, __VA_ARGS__); \
2112  printf("\n"); \
2113  } while(false)
2114  */
2115 #endif
2116 
2117 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2118 #if VMA_STATS_STRING_ENABLED
2119  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2120  {
2121  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2122  }
2123  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2124  {
2125  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2126  }
2127  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2128  {
2129  snprintf(outStr, strLen, "%p", ptr);
2130  }
2131 #endif
2132 
2133 #ifndef VMA_MUTEX
2134  class VmaMutex
2135  {
2136  public:
2137  VmaMutex() { }
2138  ~VmaMutex() { }
2139  void Lock() { m_Mutex.lock(); }
2140  void Unlock() { m_Mutex.unlock(); }
2141  private:
2142  std::mutex m_Mutex;
2143  };
2144  #define VMA_MUTEX VmaMutex
2145 #endif
2146 
2147 /*
2148 If providing your own implementation, you need to implement a subset of std::atomic:
2149 
2150 - Constructor(uint32_t desired)
2151 - uint32_t load() const
2152 - void store(uint32_t desired)
2153 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2154 */
2155 #ifndef VMA_ATOMIC_UINT32
2156  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2157 #endif
2158 
2159 #ifndef VMA_BEST_FIT
2160 
2172  #define VMA_BEST_FIT (1)
2173 #endif
2174 
2175 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2176 
2180  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2181 #endif
2182 
2183 #ifndef VMA_DEBUG_ALIGNMENT
2184 
2188  #define VMA_DEBUG_ALIGNMENT (1)
2189 #endif
2190 
2191 #ifndef VMA_DEBUG_MARGIN
2192 
2196  #define VMA_DEBUG_MARGIN (0)
2197 #endif
2198 
2199 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2200 
2204  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2205 #endif
2206 
2207 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2208 
2212  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2213 #endif
2214 
2215 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2216  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2218 #endif
2219 
2220 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2221  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2223 #endif
2224 
2225 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2226 
2227 /*******************************************************************************
2228 END OF CONFIGURATION
2229 */
2230 
2231 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2232  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2233 
2234 // Returns number of bits set to 1 in (v).
2235 static inline uint32_t VmaCountBitsSet(uint32_t v)
2236 {
2237  uint32_t c = v - ((v >> 1) & 0x55555555);
2238  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2239  c = ((c >> 4) + c) & 0x0F0F0F0F;
2240  c = ((c >> 8) + c) & 0x00FF00FF;
2241  c = ((c >> 16) + c) & 0x0000FFFF;
2242  return c;
2243 }
2244 
2245 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2246 // Use types like uint32_t, uint64_t as T.
2247 template <typename T>
2248 static inline T VmaAlignUp(T val, T align)
2249 {
2250  return (val + align - 1) / align * align;
2251 }
2252 
2253 // Division with mathematical rounding to nearest number.
2254 template <typename T>
2255 inline T VmaRoundDiv(T x, T y)
2256 {
2257  return (x + (y / (T)2)) / y;
2258 }
2259 
2260 #ifndef VMA_SORT
2261 
2262 template<typename Iterator, typename Compare>
2263 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
2264 {
2265  Iterator centerValue = end; --centerValue;
2266  Iterator insertIndex = beg;
2267  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
2268  {
2269  if(cmp(*memTypeIndex, *centerValue))
2270  {
2271  if(insertIndex != memTypeIndex)
2272  {
2273  VMA_SWAP(*memTypeIndex, *insertIndex);
2274  }
2275  ++insertIndex;
2276  }
2277  }
2278  if(insertIndex != centerValue)
2279  {
2280  VMA_SWAP(*insertIndex, *centerValue);
2281  }
2282  return insertIndex;
2283 }
2284 
2285 template<typename Iterator, typename Compare>
2286 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
2287 {
2288  if(beg < end)
2289  {
2290  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
2291  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
2292  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
2293  }
2294 }
2295 
2296 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
2297 
2298 #endif // #ifndef VMA_SORT
2299 
2300 /*
2301 Returns true if two memory blocks occupy overlapping pages.
2302 ResourceA must be in less memory offset than ResourceB.
2303 
2304 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
2305 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
2306 */
2307 static inline bool VmaBlocksOnSamePage(
2308  VkDeviceSize resourceAOffset,
2309  VkDeviceSize resourceASize,
2310  VkDeviceSize resourceBOffset,
2311  VkDeviceSize pageSize)
2312 {
2313  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
2314  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
2315  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
2316  VkDeviceSize resourceBStart = resourceBOffset;
2317  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
2318  return resourceAEndPage == resourceBStartPage;
2319 }
2320 
2321 enum VmaSuballocationType
2322 {
2323  VMA_SUBALLOCATION_TYPE_FREE = 0,
2324  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
2325  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
2326  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
2327  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
2328  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
2329  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
2330 };
2331 
2332 /*
2333 Returns true if given suballocation types could conflict and must respect
2334 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
2335 or linear image and another one is optimal image. If type is unknown, behave
2336 conservatively.
2337 */
2338 static inline bool VmaIsBufferImageGranularityConflict(
2339  VmaSuballocationType suballocType1,
2340  VmaSuballocationType suballocType2)
2341 {
2342  if(suballocType1 > suballocType2)
2343  {
2344  VMA_SWAP(suballocType1, suballocType2);
2345  }
2346 
2347  switch(suballocType1)
2348  {
2349  case VMA_SUBALLOCATION_TYPE_FREE:
2350  return false;
2351  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
2352  return true;
2353  case VMA_SUBALLOCATION_TYPE_BUFFER:
2354  return
2355  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2356  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2357  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
2358  return
2359  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2360  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
2361  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2362  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
2363  return
2364  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2365  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
2366  return false;
2367  default:
2368  VMA_ASSERT(0);
2369  return true;
2370  }
2371 }
2372 
2373 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
2374 struct VmaMutexLock
2375 {
2376 public:
2377  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
2378  m_pMutex(useMutex ? &mutex : VMA_NULL)
2379  {
2380  if(m_pMutex)
2381  {
2382  m_pMutex->Lock();
2383  }
2384  }
2385 
2386  ~VmaMutexLock()
2387  {
2388  if(m_pMutex)
2389  {
2390  m_pMutex->Unlock();
2391  }
2392  }
2393 
2394 private:
2395  VMA_MUTEX* m_pMutex;
2396 };
2397 
2398 #if VMA_DEBUG_GLOBAL_MUTEX
2399  static VMA_MUTEX gDebugGlobalMutex;
2400  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
2401 #else
2402  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
2403 #endif
2404 
2405 // Minimum size of a free suballocation to register it in the free suballocation collection.
2406 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
2407 
2408 /*
2409 Performs binary search and returns iterator to first element that is greater or
2410 equal to (key), according to comparison (cmp).
2411 
2412 Cmp should return true if first argument is less than second argument.
2413 
2414 Returned value is the found element, if present in the collection or place where
2415 new element with value (key) should be inserted.
2416 */
2417 template <typename IterT, typename KeyT, typename CmpT>
2418 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
2419 {
2420  size_t down = 0, up = (end - beg);
2421  while(down < up)
2422  {
2423  const size_t mid = (down + up) / 2;
2424  if(cmp(*(beg+mid), key))
2425  {
2426  down = mid + 1;
2427  }
2428  else
2429  {
2430  up = mid;
2431  }
2432  }
2433  return beg + down;
2434 }
2435 
2437 // Memory allocation
2438 
2439 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
2440 {
2441  if((pAllocationCallbacks != VMA_NULL) &&
2442  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
2443  {
2444  return (*pAllocationCallbacks->pfnAllocation)(
2445  pAllocationCallbacks->pUserData,
2446  size,
2447  alignment,
2448  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2449  }
2450  else
2451  {
2452  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
2453  }
2454 }
2455 
2456 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
2457 {
2458  if((pAllocationCallbacks != VMA_NULL) &&
2459  (pAllocationCallbacks->pfnFree != VMA_NULL))
2460  {
2461  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
2462  }
2463  else
2464  {
2465  VMA_SYSTEM_FREE(ptr);
2466  }
2467 }
2468 
2469 template<typename T>
2470 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
2471 {
2472  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
2473 }
2474 
2475 template<typename T>
2476 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
2477 {
2478  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
2479 }
2480 
2481 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2482 
2483 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2484 
2485 template<typename T>
2486 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2487 {
2488  ptr->~T();
2489  VmaFree(pAllocationCallbacks, ptr);
2490 }
2491 
2492 template<typename T>
2493 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2494 {
2495  if(ptr != VMA_NULL)
2496  {
2497  for(size_t i = count; i--; )
2498  {
2499  ptr[i].~T();
2500  }
2501  VmaFree(pAllocationCallbacks, ptr);
2502  }
2503 }
2504 
2505 // STL-compatible allocator.
2506 template<typename T>
2507 class VmaStlAllocator
2508 {
2509 public:
2510  const VkAllocationCallbacks* const m_pCallbacks;
2511  typedef T value_type;
2512 
2513  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2514  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2515 
2516  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2517  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2518 
2519  template<typename U>
2520  bool operator==(const VmaStlAllocator<U>& rhs) const
2521  {
2522  return m_pCallbacks == rhs.m_pCallbacks;
2523  }
2524  template<typename U>
2525  bool operator!=(const VmaStlAllocator<U>& rhs) const
2526  {
2527  return m_pCallbacks != rhs.m_pCallbacks;
2528  }
2529 
2530  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2531 };
2532 
2533 #if VMA_USE_STL_VECTOR
2534 
2535 #define VmaVector std::vector
2536 
2537 template<typename T, typename allocatorT>
2538 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2539 {
2540  vec.insert(vec.begin() + index, item);
2541 }
2542 
2543 template<typename T, typename allocatorT>
2544 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2545 {
2546  vec.erase(vec.begin() + index);
2547 }
2548 
2549 #else // #if VMA_USE_STL_VECTOR
2550 
2551 /* Class with interface compatible with subset of std::vector.
2552 T must be POD because constructors and destructors are not called and memcpy is
2553 used for these objects. */
2554 template<typename T, typename AllocatorT>
2555 class VmaVector
2556 {
2557 public:
2558  typedef T value_type;
2559 
2560  VmaVector(const AllocatorT& allocator) :
2561  m_Allocator(allocator),
2562  m_pArray(VMA_NULL),
2563  m_Count(0),
2564  m_Capacity(0)
2565  {
2566  }
2567 
2568  VmaVector(size_t count, const AllocatorT& allocator) :
2569  m_Allocator(allocator),
2570  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2571  m_Count(count),
2572  m_Capacity(count)
2573  {
2574  }
2575 
2576  VmaVector(const VmaVector<T, AllocatorT>& src) :
2577  m_Allocator(src.m_Allocator),
2578  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2579  m_Count(src.m_Count),
2580  m_Capacity(src.m_Count)
2581  {
2582  if(m_Count != 0)
2583  {
2584  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2585  }
2586  }
2587 
2588  ~VmaVector()
2589  {
2590  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2591  }
2592 
2593  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2594  {
2595  if(&rhs != this)
2596  {
2597  resize(rhs.m_Count);
2598  if(m_Count != 0)
2599  {
2600  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2601  }
2602  }
2603  return *this;
2604  }
2605 
2606  bool empty() const { return m_Count == 0; }
2607  size_t size() const { return m_Count; }
2608  T* data() { return m_pArray; }
2609  const T* data() const { return m_pArray; }
2610 
2611  T& operator[](size_t index)
2612  {
2613  VMA_HEAVY_ASSERT(index < m_Count);
2614  return m_pArray[index];
2615  }
2616  const T& operator[](size_t index) const
2617  {
2618  VMA_HEAVY_ASSERT(index < m_Count);
2619  return m_pArray[index];
2620  }
2621 
2622  T& front()
2623  {
2624  VMA_HEAVY_ASSERT(m_Count > 0);
2625  return m_pArray[0];
2626  }
2627  const T& front() const
2628  {
2629  VMA_HEAVY_ASSERT(m_Count > 0);
2630  return m_pArray[0];
2631  }
2632  T& back()
2633  {
2634  VMA_HEAVY_ASSERT(m_Count > 0);
2635  return m_pArray[m_Count - 1];
2636  }
2637  const T& back() const
2638  {
2639  VMA_HEAVY_ASSERT(m_Count > 0);
2640  return m_pArray[m_Count - 1];
2641  }
2642 
2643  void reserve(size_t newCapacity, bool freeMemory = false)
2644  {
2645  newCapacity = VMA_MAX(newCapacity, m_Count);
2646 
2647  if((newCapacity < m_Capacity) && !freeMemory)
2648  {
2649  newCapacity = m_Capacity;
2650  }
2651 
2652  if(newCapacity != m_Capacity)
2653  {
2654  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2655  if(m_Count != 0)
2656  {
2657  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2658  }
2659  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2660  m_Capacity = newCapacity;
2661  m_pArray = newArray;
2662  }
2663  }
2664 
2665  void resize(size_t newCount, bool freeMemory = false)
2666  {
2667  size_t newCapacity = m_Capacity;
2668  if(newCount > m_Capacity)
2669  {
2670  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2671  }
2672  else if(freeMemory)
2673  {
2674  newCapacity = newCount;
2675  }
2676 
2677  if(newCapacity != m_Capacity)
2678  {
2679  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2680  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2681  if(elementsToCopy != 0)
2682  {
2683  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2684  }
2685  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2686  m_Capacity = newCapacity;
2687  m_pArray = newArray;
2688  }
2689 
2690  m_Count = newCount;
2691  }
2692 
2693  void clear(bool freeMemory = false)
2694  {
2695  resize(0, freeMemory);
2696  }
2697 
2698  void insert(size_t index, const T& src)
2699  {
2700  VMA_HEAVY_ASSERT(index <= m_Count);
2701  const size_t oldCount = size();
2702  resize(oldCount + 1);
2703  if(index < oldCount)
2704  {
2705  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2706  }
2707  m_pArray[index] = src;
2708  }
2709 
2710  void remove(size_t index)
2711  {
2712  VMA_HEAVY_ASSERT(index < m_Count);
2713  const size_t oldCount = size();
2714  if(index < oldCount - 1)
2715  {
2716  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2717  }
2718  resize(oldCount - 1);
2719  }
2720 
2721  void push_back(const T& src)
2722  {
2723  const size_t newIndex = size();
2724  resize(newIndex + 1);
2725  m_pArray[newIndex] = src;
2726  }
2727 
2728  void pop_back()
2729  {
2730  VMA_HEAVY_ASSERT(m_Count > 0);
2731  resize(size() - 1);
2732  }
2733 
2734  void push_front(const T& src)
2735  {
2736  insert(0, src);
2737  }
2738 
2739  void pop_front()
2740  {
2741  VMA_HEAVY_ASSERT(m_Count > 0);
2742  remove(0);
2743  }
2744 
2745  typedef T* iterator;
2746 
2747  iterator begin() { return m_pArray; }
2748  iterator end() { return m_pArray + m_Count; }
2749 
2750 private:
2751  AllocatorT m_Allocator;
2752  T* m_pArray;
2753  size_t m_Count;
2754  size_t m_Capacity;
2755 };
2756 
2757 template<typename T, typename allocatorT>
2758 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2759 {
2760  vec.insert(index, item);
2761 }
2762 
2763 template<typename T, typename allocatorT>
2764 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2765 {
2766  vec.remove(index);
2767 }
2768 
2769 #endif // #if VMA_USE_STL_VECTOR
2770 
2771 template<typename CmpLess, typename VectorT>
2772 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2773 {
2774  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2775  vector.data(),
2776  vector.data() + vector.size(),
2777  value,
2778  CmpLess()) - vector.data();
2779  VmaVectorInsert(vector, indexToInsert, value);
2780  return indexToInsert;
2781 }
2782 
2783 template<typename CmpLess, typename VectorT>
2784 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2785 {
2786  CmpLess comparator;
2787  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2788  vector.begin(),
2789  vector.end(),
2790  value,
2791  comparator);
2792  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2793  {
2794  size_t indexToRemove = it - vector.begin();
2795  VmaVectorRemove(vector, indexToRemove);
2796  return true;
2797  }
2798  return false;
2799 }
2800 
2801 template<typename CmpLess, typename VectorT>
2802 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2803 {
2804  CmpLess comparator;
2805  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2806  vector.data(),
2807  vector.data() + vector.size(),
2808  value,
2809  comparator);
2810  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2811  {
2812  return it - vector.begin();
2813  }
2814  else
2815  {
2816  return vector.size();
2817  }
2818 }
2819 
2821 // class VmaPoolAllocator
2822 
2823 /*
2824 Allocator for objects of type T using a list of arrays (pools) to speed up
2825 allocation. Number of elements that can be allocated is not bounded because
2826 allocator can create multiple blocks.
2827 */
2828 template<typename T>
2829 class VmaPoolAllocator
2830 {
2831 public:
2832  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2833  ~VmaPoolAllocator();
2834  void Clear();
2835  T* Alloc();
2836  void Free(T* ptr);
2837 
2838 private:
2839  union Item
2840  {
2841  uint32_t NextFreeIndex;
2842  T Value;
2843  };
2844 
2845  struct ItemBlock
2846  {
2847  Item* pItems;
2848  uint32_t FirstFreeIndex;
2849  };
2850 
2851  const VkAllocationCallbacks* m_pAllocationCallbacks;
2852  size_t m_ItemsPerBlock;
2853  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2854 
2855  ItemBlock& CreateNewBlock();
2856 };
2857 
2858 template<typename T>
2859 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2860  m_pAllocationCallbacks(pAllocationCallbacks),
2861  m_ItemsPerBlock(itemsPerBlock),
2862  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2863 {
2864  VMA_ASSERT(itemsPerBlock > 0);
2865 }
2866 
2867 template<typename T>
2868 VmaPoolAllocator<T>::~VmaPoolAllocator()
2869 {
2870  Clear();
2871 }
2872 
2873 template<typename T>
2874 void VmaPoolAllocator<T>::Clear()
2875 {
2876  for(size_t i = m_ItemBlocks.size(); i--; )
2877  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2878  m_ItemBlocks.clear();
2879 }
2880 
2881 template<typename T>
2882 T* VmaPoolAllocator<T>::Alloc()
2883 {
2884  for(size_t i = m_ItemBlocks.size(); i--; )
2885  {
2886  ItemBlock& block = m_ItemBlocks[i];
2887  // This block has some free items: Use first one.
2888  if(block.FirstFreeIndex != UINT32_MAX)
2889  {
2890  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2891  block.FirstFreeIndex = pItem->NextFreeIndex;
2892  return &pItem->Value;
2893  }
2894  }
2895 
2896  // No block has free item: Create new one and use it.
2897  ItemBlock& newBlock = CreateNewBlock();
2898  Item* const pItem = &newBlock.pItems[0];
2899  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2900  return &pItem->Value;
2901 }
2902 
2903 template<typename T>
2904 void VmaPoolAllocator<T>::Free(T* ptr)
2905 {
2906  // Search all memory blocks to find ptr.
2907  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2908  {
2909  ItemBlock& block = m_ItemBlocks[i];
2910 
2911  // Casting to union.
2912  Item* pItemPtr;
2913  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2914 
2915  // Check if pItemPtr is in address range of this block.
2916  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2917  {
2918  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2919  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2920  block.FirstFreeIndex = index;
2921  return;
2922  }
2923  }
2924  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2925 }
2926 
2927 template<typename T>
2928 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2929 {
2930  ItemBlock newBlock = {
2931  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2932 
2933  m_ItemBlocks.push_back(newBlock);
2934 
2935  // Setup singly-linked list of all free items in this block.
2936  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2937  newBlock.pItems[i].NextFreeIndex = i + 1;
2938  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2939  return m_ItemBlocks.back();
2940 }
2941 
2943 // class VmaRawList, VmaList
2944 
2945 #if VMA_USE_STL_LIST
2946 
2947 #define VmaList std::list
2948 
2949 #else // #if VMA_USE_STL_LIST
2950 
2951 template<typename T>
2952 struct VmaListItem
2953 {
2954  VmaListItem* pPrev;
2955  VmaListItem* pNext;
2956  T Value;
2957 };
2958 
2959 // Doubly linked list.
2960 template<typename T>
2961 class VmaRawList
2962 {
2963 public:
2964  typedef VmaListItem<T> ItemType;
2965 
2966  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2967  ~VmaRawList();
2968  void Clear();
2969 
2970  size_t GetCount() const { return m_Count; }
2971  bool IsEmpty() const { return m_Count == 0; }
2972 
2973  ItemType* Front() { return m_pFront; }
2974  const ItemType* Front() const { return m_pFront; }
2975  ItemType* Back() { return m_pBack; }
2976  const ItemType* Back() const { return m_pBack; }
2977 
2978  ItemType* PushBack();
2979  ItemType* PushFront();
2980  ItemType* PushBack(const T& value);
2981  ItemType* PushFront(const T& value);
2982  void PopBack();
2983  void PopFront();
2984 
2985  // Item can be null - it means PushBack.
2986  ItemType* InsertBefore(ItemType* pItem);
2987  // Item can be null - it means PushFront.
2988  ItemType* InsertAfter(ItemType* pItem);
2989 
2990  ItemType* InsertBefore(ItemType* pItem, const T& value);
2991  ItemType* InsertAfter(ItemType* pItem, const T& value);
2992 
2993  void Remove(ItemType* pItem);
2994 
2995 private:
2996  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2997  VmaPoolAllocator<ItemType> m_ItemAllocator;
2998  ItemType* m_pFront;
2999  ItemType* m_pBack;
3000  size_t m_Count;
3001 
3002  // Declared not defined, to block copy constructor and assignment operator.
3003  VmaRawList(const VmaRawList<T>& src);
3004  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
3005 };
3006 
3007 template<typename T>
3008 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3009  m_pAllocationCallbacks(pAllocationCallbacks),
3010  m_ItemAllocator(pAllocationCallbacks, 128),
3011  m_pFront(VMA_NULL),
3012  m_pBack(VMA_NULL),
3013  m_Count(0)
3014 {
3015 }
3016 
3017 template<typename T>
3018 VmaRawList<T>::~VmaRawList()
3019 {
3020  // Intentionally not calling Clear, because that would be unnecessary
3021  // computations to return all items to m_ItemAllocator as free.
3022 }
3023 
3024 template<typename T>
3025 void VmaRawList<T>::Clear()
3026 {
3027  if(IsEmpty() == false)
3028  {
3029  ItemType* pItem = m_pBack;
3030  while(pItem != VMA_NULL)
3031  {
3032  ItemType* const pPrevItem = pItem->pPrev;
3033  m_ItemAllocator.Free(pItem);
3034  pItem = pPrevItem;
3035  }
3036  m_pFront = VMA_NULL;
3037  m_pBack = VMA_NULL;
3038  m_Count = 0;
3039  }
3040 }
3041 
3042 template<typename T>
3043 VmaListItem<T>* VmaRawList<T>::PushBack()
3044 {
3045  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3046  pNewItem->pNext = VMA_NULL;
3047  if(IsEmpty())
3048  {
3049  pNewItem->pPrev = VMA_NULL;
3050  m_pFront = pNewItem;
3051  m_pBack = pNewItem;
3052  m_Count = 1;
3053  }
3054  else
3055  {
3056  pNewItem->pPrev = m_pBack;
3057  m_pBack->pNext = pNewItem;
3058  m_pBack = pNewItem;
3059  ++m_Count;
3060  }
3061  return pNewItem;
3062 }
3063 
3064 template<typename T>
3065 VmaListItem<T>* VmaRawList<T>::PushFront()
3066 {
3067  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3068  pNewItem->pPrev = VMA_NULL;
3069  if(IsEmpty())
3070  {
3071  pNewItem->pNext = VMA_NULL;
3072  m_pFront = pNewItem;
3073  m_pBack = pNewItem;
3074  m_Count = 1;
3075  }
3076  else
3077  {
3078  pNewItem->pNext = m_pFront;
3079  m_pFront->pPrev = pNewItem;
3080  m_pFront = pNewItem;
3081  ++m_Count;
3082  }
3083  return pNewItem;
3084 }
3085 
3086 template<typename T>
3087 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3088 {
3089  ItemType* const pNewItem = PushBack();
3090  pNewItem->Value = value;
3091  return pNewItem;
3092 }
3093 
3094 template<typename T>
3095 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3096 {
3097  ItemType* const pNewItem = PushFront();
3098  pNewItem->Value = value;
3099  return pNewItem;
3100 }
3101 
3102 template<typename T>
3103 void VmaRawList<T>::PopBack()
3104 {
3105  VMA_HEAVY_ASSERT(m_Count > 0);
3106  ItemType* const pBackItem = m_pBack;
3107  ItemType* const pPrevItem = pBackItem->pPrev;
3108  if(pPrevItem != VMA_NULL)
3109  {
3110  pPrevItem->pNext = VMA_NULL;
3111  }
3112  m_pBack = pPrevItem;
3113  m_ItemAllocator.Free(pBackItem);
3114  --m_Count;
3115 }
3116 
3117 template<typename T>
3118 void VmaRawList<T>::PopFront()
3119 {
3120  VMA_HEAVY_ASSERT(m_Count > 0);
3121  ItemType* const pFrontItem = m_pFront;
3122  ItemType* const pNextItem = pFrontItem->pNext;
3123  if(pNextItem != VMA_NULL)
3124  {
3125  pNextItem->pPrev = VMA_NULL;
3126  }
3127  m_pFront = pNextItem;
3128  m_ItemAllocator.Free(pFrontItem);
3129  --m_Count;
3130 }
3131 
3132 template<typename T>
3133 void VmaRawList<T>::Remove(ItemType* pItem)
3134 {
3135  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
3136  VMA_HEAVY_ASSERT(m_Count > 0);
3137 
3138  if(pItem->pPrev != VMA_NULL)
3139  {
3140  pItem->pPrev->pNext = pItem->pNext;
3141  }
3142  else
3143  {
3144  VMA_HEAVY_ASSERT(m_pFront == pItem);
3145  m_pFront = pItem->pNext;
3146  }
3147 
3148  if(pItem->pNext != VMA_NULL)
3149  {
3150  pItem->pNext->pPrev = pItem->pPrev;
3151  }
3152  else
3153  {
3154  VMA_HEAVY_ASSERT(m_pBack == pItem);
3155  m_pBack = pItem->pPrev;
3156  }
3157 
3158  m_ItemAllocator.Free(pItem);
3159  --m_Count;
3160 }
3161 
3162 template<typename T>
3163 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
3164 {
3165  if(pItem != VMA_NULL)
3166  {
3167  ItemType* const prevItem = pItem->pPrev;
3168  ItemType* const newItem = m_ItemAllocator.Alloc();
3169  newItem->pPrev = prevItem;
3170  newItem->pNext = pItem;
3171  pItem->pPrev = newItem;
3172  if(prevItem != VMA_NULL)
3173  {
3174  prevItem->pNext = newItem;
3175  }
3176  else
3177  {
3178  VMA_HEAVY_ASSERT(m_pFront == pItem);
3179  m_pFront = newItem;
3180  }
3181  ++m_Count;
3182  return newItem;
3183  }
3184  else
3185  return PushBack();
3186 }
3187 
3188 template<typename T>
3189 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3190 {
3191  if(pItem != VMA_NULL)
3192  {
3193  ItemType* const nextItem = pItem->pNext;
3194  ItemType* const newItem = m_ItemAllocator.Alloc();
3195  newItem->pNext = nextItem;
3196  newItem->pPrev = pItem;
3197  pItem->pNext = newItem;
3198  if(nextItem != VMA_NULL)
3199  {
3200  nextItem->pPrev = newItem;
3201  }
3202  else
3203  {
3204  VMA_HEAVY_ASSERT(m_pBack == pItem);
3205  m_pBack = newItem;
3206  }
3207  ++m_Count;
3208  return newItem;
3209  }
3210  else
3211  return PushFront();
3212 }
3213 
3214 template<typename T>
3215 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
3216 {
3217  ItemType* const newItem = InsertBefore(pItem);
3218  newItem->Value = value;
3219  return newItem;
3220 }
3221 
3222 template<typename T>
3223 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
3224 {
3225  ItemType* const newItem = InsertAfter(pItem);
3226  newItem->Value = value;
3227  return newItem;
3228 }
3229 
3230 template<typename T, typename AllocatorT>
3231 class VmaList
3232 {
3233 public:
3234  class iterator
3235  {
3236  public:
3237  iterator() :
3238  m_pList(VMA_NULL),
3239  m_pItem(VMA_NULL)
3240  {
3241  }
3242 
3243  T& operator*() const
3244  {
3245  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3246  return m_pItem->Value;
3247  }
3248  T* operator->() const
3249  {
3250  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3251  return &m_pItem->Value;
3252  }
3253 
3254  iterator& operator++()
3255  {
3256  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3257  m_pItem = m_pItem->pNext;
3258  return *this;
3259  }
3260  iterator& operator--()
3261  {
3262  if(m_pItem != VMA_NULL)
3263  {
3264  m_pItem = m_pItem->pPrev;
3265  }
3266  else
3267  {
3268  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3269  m_pItem = m_pList->Back();
3270  }
3271  return *this;
3272  }
3273 
3274  iterator operator++(int)
3275  {
3276  iterator result = *this;
3277  ++*this;
3278  return result;
3279  }
3280  iterator operator--(int)
3281  {
3282  iterator result = *this;
3283  --*this;
3284  return result;
3285  }
3286 
3287  bool operator==(const iterator& rhs) const
3288  {
3289  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3290  return m_pItem == rhs.m_pItem;
3291  }
3292  bool operator!=(const iterator& rhs) const
3293  {
3294  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3295  return m_pItem != rhs.m_pItem;
3296  }
3297 
3298  private:
3299  VmaRawList<T>* m_pList;
3300  VmaListItem<T>* m_pItem;
3301 
3302  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
3303  m_pList(pList),
3304  m_pItem(pItem)
3305  {
3306  }
3307 
3308  friend class VmaList<T, AllocatorT>;
3309  };
3310 
3311  class const_iterator
3312  {
3313  public:
3314  const_iterator() :
3315  m_pList(VMA_NULL),
3316  m_pItem(VMA_NULL)
3317  {
3318  }
3319 
3320  const_iterator(const iterator& src) :
3321  m_pList(src.m_pList),
3322  m_pItem(src.m_pItem)
3323  {
3324  }
3325 
3326  const T& operator*() const
3327  {
3328  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3329  return m_pItem->Value;
3330  }
3331  const T* operator->() const
3332  {
3333  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3334  return &m_pItem->Value;
3335  }
3336 
3337  const_iterator& operator++()
3338  {
3339  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3340  m_pItem = m_pItem->pNext;
3341  return *this;
3342  }
3343  const_iterator& operator--()
3344  {
3345  if(m_pItem != VMA_NULL)
3346  {
3347  m_pItem = m_pItem->pPrev;
3348  }
3349  else
3350  {
3351  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3352  m_pItem = m_pList->Back();
3353  }
3354  return *this;
3355  }
3356 
3357  const_iterator operator++(int)
3358  {
3359  const_iterator result = *this;
3360  ++*this;
3361  return result;
3362  }
3363  const_iterator operator--(int)
3364  {
3365  const_iterator result = *this;
3366  --*this;
3367  return result;
3368  }
3369 
3370  bool operator==(const const_iterator& rhs) const
3371  {
3372  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3373  return m_pItem == rhs.m_pItem;
3374  }
3375  bool operator!=(const const_iterator& rhs) const
3376  {
3377  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3378  return m_pItem != rhs.m_pItem;
3379  }
3380 
3381  private:
3382  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
3383  m_pList(pList),
3384  m_pItem(pItem)
3385  {
3386  }
3387 
3388  const VmaRawList<T>* m_pList;
3389  const VmaListItem<T>* m_pItem;
3390 
3391  friend class VmaList<T, AllocatorT>;
3392  };
3393 
3394  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
3395 
3396  bool empty() const { return m_RawList.IsEmpty(); }
3397  size_t size() const { return m_RawList.GetCount(); }
3398 
3399  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
3400  iterator end() { return iterator(&m_RawList, VMA_NULL); }
3401 
3402  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
3403  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
3404 
3405  void clear() { m_RawList.Clear(); }
3406  void push_back(const T& value) { m_RawList.PushBack(value); }
3407  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
3408  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
3409 
3410 private:
3411  VmaRawList<T> m_RawList;
3412 };
3413 
3414 #endif // #if VMA_USE_STL_LIST
3415 
3417 // class VmaMap
3418 
3419 // Unused in this version.
3420 #if 0
3421 
3422 #if VMA_USE_STL_UNORDERED_MAP
3423 
3424 #define VmaPair std::pair
3425 
3426 #define VMA_MAP_TYPE(KeyT, ValueT) \
3427  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
3428 
3429 #else // #if VMA_USE_STL_UNORDERED_MAP
3430 
3431 template<typename T1, typename T2>
3432 struct VmaPair
3433 {
3434  T1 first;
3435  T2 second;
3436 
3437  VmaPair() : first(), second() { }
3438  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
3439 };
3440 
3441 /* Class compatible with subset of interface of std::unordered_map.
3442 KeyT, ValueT must be POD because they will be stored in VmaVector.
3443 */
3444 template<typename KeyT, typename ValueT>
3445 class VmaMap
3446 {
3447 public:
3448  typedef VmaPair<KeyT, ValueT> PairType;
3449  typedef PairType* iterator;
3450 
3451  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
3452 
3453  iterator begin() { return m_Vector.begin(); }
3454  iterator end() { return m_Vector.end(); }
3455 
3456  void insert(const PairType& pair);
3457  iterator find(const KeyT& key);
3458  void erase(iterator it);
3459 
3460 private:
3461  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
3462 };
3463 
3464 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
3465 
3466 template<typename FirstT, typename SecondT>
3467 struct VmaPairFirstLess
3468 {
3469  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
3470  {
3471  return lhs.first < rhs.first;
3472  }
3473  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
3474  {
3475  return lhs.first < rhsFirst;
3476  }
3477 };
3478 
3479 template<typename KeyT, typename ValueT>
3480 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
3481 {
3482  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3483  m_Vector.data(),
3484  m_Vector.data() + m_Vector.size(),
3485  pair,
3486  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3487  VmaVectorInsert(m_Vector, indexToInsert, pair);
3488 }
3489 
3490 template<typename KeyT, typename ValueT>
3491 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3492 {
3493  PairType* it = VmaBinaryFindFirstNotLess(
3494  m_Vector.data(),
3495  m_Vector.data() + m_Vector.size(),
3496  key,
3497  VmaPairFirstLess<KeyT, ValueT>());
3498  if((it != m_Vector.end()) && (it->first == key))
3499  {
3500  return it;
3501  }
3502  else
3503  {
3504  return m_Vector.end();
3505  }
3506 }
3507 
3508 template<typename KeyT, typename ValueT>
3509 void VmaMap<KeyT, ValueT>::erase(iterator it)
3510 {
3511  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3512 }
3513 
3514 #endif // #if VMA_USE_STL_UNORDERED_MAP
3515 
3516 #endif // #if 0
3517 
3519 
3520 class VmaDeviceMemoryBlock;
3521 
3522 struct VmaAllocation_T
3523 {
3524 private:
3525  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3526 
3527  enum FLAGS
3528  {
3529  FLAG_USER_DATA_STRING = 0x01,
3530  };
3531 
3532 public:
3533  enum ALLOCATION_TYPE
3534  {
3535  ALLOCATION_TYPE_NONE,
3536  ALLOCATION_TYPE_BLOCK,
3537  ALLOCATION_TYPE_DEDICATED,
3538  };
3539 
3540  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3541  m_Alignment(1),
3542  m_Size(0),
3543  m_pUserData(VMA_NULL),
3544  m_LastUseFrameIndex(currentFrameIndex),
3545  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3546  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3547  m_MapCount(0),
3548  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3549  {
3550  }
3551 
3552  ~VmaAllocation_T()
3553  {
3554  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3555 
3556  // Check if owned string was freed.
3557  VMA_ASSERT(m_pUserData == VMA_NULL);
3558  }
3559 
3560  void InitBlockAllocation(
3561  VmaPool hPool,
3562  VmaDeviceMemoryBlock* block,
3563  VkDeviceSize offset,
3564  VkDeviceSize alignment,
3565  VkDeviceSize size,
3566  VmaSuballocationType suballocationType,
3567  bool mapped,
3568  bool canBecomeLost)
3569  {
3570  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3571  VMA_ASSERT(block != VMA_NULL);
3572  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3573  m_Alignment = alignment;
3574  m_Size = size;
3575  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3576  m_SuballocationType = (uint8_t)suballocationType;
3577  m_BlockAllocation.m_hPool = hPool;
3578  m_BlockAllocation.m_Block = block;
3579  m_BlockAllocation.m_Offset = offset;
3580  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3581  }
3582 
3583  void InitLost()
3584  {
3585  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3586  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3587  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3588  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3589  m_BlockAllocation.m_Block = VMA_NULL;
3590  m_BlockAllocation.m_Offset = 0;
3591  m_BlockAllocation.m_CanBecomeLost = true;
3592  }
3593 
3594  void ChangeBlockAllocation(
3595  VmaAllocator hAllocator,
3596  VmaDeviceMemoryBlock* block,
3597  VkDeviceSize offset);
3598 
3599  // pMappedData not null means allocation is created with MAPPED flag.
3600  void InitDedicatedAllocation(
3601  uint32_t memoryTypeIndex,
3602  VkDeviceMemory hMemory,
3603  VmaSuballocationType suballocationType,
3604  void* pMappedData,
3605  VkDeviceSize size)
3606  {
3607  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3608  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3609  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3610  m_Alignment = 0;
3611  m_Size = size;
3612  m_SuballocationType = (uint8_t)suballocationType;
3613  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3614  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3615  m_DedicatedAllocation.m_hMemory = hMemory;
3616  m_DedicatedAllocation.m_pMappedData = pMappedData;
3617  }
3618 
3619  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3620  VkDeviceSize GetAlignment() const { return m_Alignment; }
3621  VkDeviceSize GetSize() const { return m_Size; }
3622  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3623  void* GetUserData() const { return m_pUserData; }
3624  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3625  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3626 
3627  VmaDeviceMemoryBlock* GetBlock() const
3628  {
3629  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3630  return m_BlockAllocation.m_Block;
3631  }
3632  VkDeviceSize GetOffset() const;
3633  VkDeviceMemory GetMemory() const;
3634  uint32_t GetMemoryTypeIndex() const;
3635  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3636  void* GetMappedData() const;
3637  bool CanBecomeLost() const;
3638  VmaPool GetPool() const;
3639 
3640  uint32_t GetLastUseFrameIndex() const
3641  {
3642  return m_LastUseFrameIndex.load();
3643  }
3644  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3645  {
3646  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3647  }
3648  /*
3649  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3650  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3651  - Else, returns false.
3652 
3653  If hAllocation is already lost, assert - you should not call it then.
3654  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3655  */
3656  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3657 
3658  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3659  {
3660  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3661  outInfo.blockCount = 1;
3662  outInfo.allocationCount = 1;
3663  outInfo.unusedRangeCount = 0;
3664  outInfo.usedBytes = m_Size;
3665  outInfo.unusedBytes = 0;
3666  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3667  outInfo.unusedRangeSizeMin = UINT64_MAX;
3668  outInfo.unusedRangeSizeMax = 0;
3669  }
3670 
3671  void BlockAllocMap();
3672  void BlockAllocUnmap();
3673  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3674  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3675 
3676 private:
3677  VkDeviceSize m_Alignment;
3678  VkDeviceSize m_Size;
3679  void* m_pUserData;
3680  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3681  uint8_t m_Type; // ALLOCATION_TYPE
3682  uint8_t m_SuballocationType; // VmaSuballocationType
3683  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3684  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
3685  uint8_t m_MapCount;
3686  uint8_t m_Flags; // enum FLAGS
3687 
3688  // Allocation out of VmaDeviceMemoryBlock.
3689  struct BlockAllocation
3690  {
3691  VmaPool m_hPool; // Null if belongs to general memory.
3692  VmaDeviceMemoryBlock* m_Block;
3693  VkDeviceSize m_Offset;
3694  bool m_CanBecomeLost;
3695  };
3696 
3697  // Allocation for an object that has its own private VkDeviceMemory.
3698  struct DedicatedAllocation
3699  {
3700  uint32_t m_MemoryTypeIndex;
3701  VkDeviceMemory m_hMemory;
3702  void* m_pMappedData; // Not null means memory is mapped.
3703  };
3704 
3705  union
3706  {
3707  // Allocation out of VmaDeviceMemoryBlock.
3708  BlockAllocation m_BlockAllocation;
3709  // Allocation for an object that has its own private VkDeviceMemory.
3710  DedicatedAllocation m_DedicatedAllocation;
3711  };
3712 
3713  void FreeUserDataString(VmaAllocator hAllocator);
3714 };
3715 
3716 /*
3717 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3718 allocated memory block or free.
3719 */
3720 struct VmaSuballocation
3721 {
3722  VkDeviceSize offset;
3723  VkDeviceSize size;
3724  VmaAllocation hAllocation;
3725  VmaSuballocationType type;
3726 };
3727 
3728 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3729 
3730 // Cost of one additional allocation lost, as equivalent in bytes.
3731 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3732 
3733 /*
3734 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3735 
3736 If canMakeOtherLost was false:
3737 - item points to a FREE suballocation.
3738 - itemsToMakeLostCount is 0.
3739 
3740 If canMakeOtherLost was true:
3741 - item points to first of sequence of suballocations, which are either FREE,
3742  or point to VmaAllocations that can become lost.
3743 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3744  the requested allocation to succeed.
3745 */
3746 struct VmaAllocationRequest
3747 {
3748  VkDeviceSize offset;
3749  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3750  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3751  VmaSuballocationList::iterator item;
3752  size_t itemsToMakeLostCount;
3753 
3754  VkDeviceSize CalcCost() const
3755  {
3756  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3757  }
3758 };
3759 
3760 /*
3761 Data structure used for bookkeeping of allocations and unused ranges of memory
3762 in a single VkDeviceMemory block.
3763 */
3764 class VmaBlockMetadata
3765 {
3766 public:
3767  VmaBlockMetadata(VmaAllocator hAllocator);
3768  ~VmaBlockMetadata();
3769  void Init(VkDeviceSize size);
3770 
3771  // Validates all data structures inside this object. If not valid, returns false.
3772  bool Validate() const;
3773  VkDeviceSize GetSize() const { return m_Size; }
3774  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3775  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3776  VkDeviceSize GetUnusedRangeSizeMax() const;
3777  // Returns true if this block is empty - contains only single free suballocation.
3778  bool IsEmpty() const;
3779 
3780  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3781  void AddPoolStats(VmaPoolStats& inoutStats) const;
3782 
3783 #if VMA_STATS_STRING_ENABLED
3784  void PrintDetailedMap(class VmaJsonWriter& json) const;
3785 #endif
3786 
3787  // Creates trivial request for case when block is empty.
3788  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3789 
3790  // Tries to find a place for suballocation with given parameters inside this block.
3791  // If succeeded, fills pAllocationRequest and returns true.
3792  // If failed, returns false.
3793  bool CreateAllocationRequest(
3794  uint32_t currentFrameIndex,
3795  uint32_t frameInUseCount,
3796  VkDeviceSize bufferImageGranularity,
3797  VkDeviceSize allocSize,
3798  VkDeviceSize allocAlignment,
3799  VmaSuballocationType allocType,
3800  bool canMakeOtherLost,
3801  VmaAllocationRequest* pAllocationRequest);
3802 
3803  bool MakeRequestedAllocationsLost(
3804  uint32_t currentFrameIndex,
3805  uint32_t frameInUseCount,
3806  VmaAllocationRequest* pAllocationRequest);
3807 
3808  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3809 
3810  // Makes actual allocation based on request. Request must already be checked and valid.
3811  void Alloc(
3812  const VmaAllocationRequest& request,
3813  VmaSuballocationType type,
3814  VkDeviceSize allocSize,
3815  VmaAllocation hAllocation);
3816 
3817  // Frees suballocation assigned to given memory region.
3818  void Free(const VmaAllocation allocation);
3819  void FreeAtOffset(VkDeviceSize offset);
3820 
3821 private:
3822  VkDeviceSize m_Size;
3823  uint32_t m_FreeCount;
3824  VkDeviceSize m_SumFreeSize;
3825  VmaSuballocationList m_Suballocations;
3826  // Suballocations that are free and have size greater than certain threshold.
3827  // Sorted by size, ascending.
3828  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3829 
3830  bool ValidateFreeSuballocationList() const;
3831 
3832  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3833  // If yes, fills pOffset and returns true. If no, returns false.
3834  bool CheckAllocation(
3835  uint32_t currentFrameIndex,
3836  uint32_t frameInUseCount,
3837  VkDeviceSize bufferImageGranularity,
3838  VkDeviceSize allocSize,
3839  VkDeviceSize allocAlignment,
3840  VmaSuballocationType allocType,
3841  VmaSuballocationList::const_iterator suballocItem,
3842  bool canMakeOtherLost,
3843  VkDeviceSize* pOffset,
3844  size_t* itemsToMakeLostCount,
3845  VkDeviceSize* pSumFreeSize,
3846  VkDeviceSize* pSumItemSize) const;
3847  // Given free suballocation, it merges it with following one, which must also be free.
3848  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3849  // Releases given suballocation, making it free.
3850  // Merges it with adjacent free suballocations if applicable.
3851  // Returns iterator to new free suballocation at this place.
3852  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3853  // Given free suballocation, it inserts it into sorted list of
3854  // m_FreeSuballocationsBySize if it's suitable.
3855  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3856  // Given free suballocation, it removes it from sorted list of
3857  // m_FreeSuballocationsBySize if it's suitable.
3858  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3859 };
3860 
3861 // Helper class that represents mapped memory. Synchronized internally.
3862 class VmaDeviceMemoryMapping
3863 {
3864 public:
3865  VmaDeviceMemoryMapping();
3866  ~VmaDeviceMemoryMapping();
3867 
3868  void* GetMappedData() const { return m_pMappedData; }
3869 
3870  // ppData can be null.
3871  VkResult Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData);
3872  void Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count);
3873 
3874 private:
3875  VMA_MUTEX m_Mutex;
3876  uint32_t m_MapCount;
3877  void* m_pMappedData;
3878 };
3879 
3880 /*
3881 Represents a single block of device memory (`VkDeviceMemory`) with all the
3882 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
3883 
3884 Thread-safety: This class must be externally synchronized.
3885 */
3886 class VmaDeviceMemoryBlock
3887 {
3888 public:
3889  uint32_t m_MemoryTypeIndex;
3890  VkDeviceMemory m_hMemory;
3891  VmaDeviceMemoryMapping m_Mapping;
3892  VmaBlockMetadata m_Metadata;
3893 
3894  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3895 
3896  ~VmaDeviceMemoryBlock()
3897  {
3898  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3899  }
3900 
3901  // Always call after construction.
3902  void Init(
3903  uint32_t newMemoryTypeIndex,
3904  VkDeviceMemory newMemory,
3905  VkDeviceSize newSize);
3906  // Always call before destruction.
3907  void Destroy(VmaAllocator allocator);
3908 
3909  // Validates all data structures inside this object. If not valid, returns false.
3910  bool Validate() const;
3911 
3912  // ppData can be null.
3913  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
3914  void Unmap(VmaAllocator hAllocator, uint32_t count);
3915 };
3916 
3917 struct VmaPointerLess
3918 {
3919  bool operator()(const void* lhs, const void* rhs) const
3920  {
3921  return lhs < rhs;
3922  }
3923 };
3924 
3925 class VmaDefragmentator;
3926 
3927 /*
3928 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3929 Vulkan memory type.
3930 
3931 Synchronized internally with a mutex.
3932 */
3933 struct VmaBlockVector
3934 {
3935  VmaBlockVector(
3936  VmaAllocator hAllocator,
3937  uint32_t memoryTypeIndex,
3938  VkDeviceSize preferredBlockSize,
3939  size_t minBlockCount,
3940  size_t maxBlockCount,
3941  VkDeviceSize bufferImageGranularity,
3942  uint32_t frameInUseCount,
3943  bool isCustomPool);
3944  ~VmaBlockVector();
3945 
3946  VkResult CreateMinBlocks();
3947 
3948  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3949  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3950  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3951  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3952 
3953  void GetPoolStats(VmaPoolStats* pStats);
3954 
3955  bool IsEmpty() const { return m_Blocks.empty(); }
3956 
3957  VkResult Allocate(
3958  VmaPool hCurrentPool,
3959  uint32_t currentFrameIndex,
3960  const VkMemoryRequirements& vkMemReq,
3961  const VmaAllocationCreateInfo& createInfo,
3962  VmaSuballocationType suballocType,
3963  VmaAllocation* pAllocation);
3964 
3965  void Free(
3966  VmaAllocation hAllocation);
3967 
3968  // Adds statistics of this BlockVector to pStats.
3969  void AddStats(VmaStats* pStats);
3970 
3971 #if VMA_STATS_STRING_ENABLED
3972  void PrintDetailedMap(class VmaJsonWriter& json);
3973 #endif
3974 
3975  void MakePoolAllocationsLost(
3976  uint32_t currentFrameIndex,
3977  size_t* pLostAllocationCount);
3978 
3979  VmaDefragmentator* EnsureDefragmentator(
3980  VmaAllocator hAllocator,
3981  uint32_t currentFrameIndex);
3982 
3983  VkResult Defragment(
3984  VmaDefragmentationStats* pDefragmentationStats,
3985  VkDeviceSize& maxBytesToMove,
3986  uint32_t& maxAllocationsToMove);
3987 
3988  void DestroyDefragmentator();
3989 
3990 private:
3991  friend class VmaDefragmentator;
3992 
3993  const VmaAllocator m_hAllocator;
3994  const uint32_t m_MemoryTypeIndex;
3995  const VkDeviceSize m_PreferredBlockSize;
3996  const size_t m_MinBlockCount;
3997  const size_t m_MaxBlockCount;
3998  const VkDeviceSize m_BufferImageGranularity;
3999  const uint32_t m_FrameInUseCount;
4000  const bool m_IsCustomPool;
4001  VMA_MUTEX m_Mutex;
4002  // Incrementally sorted by sumFreeSize, ascending.
4003  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
4004  /* There can be at most one allocation that is completely empty - a
4005  hysteresis to avoid pessimistic case of alternating creation and destruction
4006  of a VkDeviceMemory. */
4007  bool m_HasEmptyBlock;
4008  VmaDefragmentator* m_pDefragmentator;
4009 
4010  size_t CalcMaxBlockSize() const;
4011 
4012  // Finds and removes given block from vector.
4013  void Remove(VmaDeviceMemoryBlock* pBlock);
4014 
4015  // Performs single step in sorting m_Blocks. They may not be fully sorted
4016  // after this call.
4017  void IncrementallySortBlocks();
4018 
4019  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
4020 };
4021 
4022 struct VmaPool_T
4023 {
4024 public:
4025  VmaBlockVector m_BlockVector;
4026 
4027  // Takes ownership.
4028  VmaPool_T(
4029  VmaAllocator hAllocator,
4030  const VmaPoolCreateInfo& createInfo);
4031  ~VmaPool_T();
4032 
4033  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
4034 
4035 #if VMA_STATS_STRING_ENABLED
4036  //void PrintDetailedMap(class VmaStringBuilder& sb);
4037 #endif
4038 };
4039 
4040 class VmaDefragmentator
4041 {
4042  const VmaAllocator m_hAllocator;
4043  VmaBlockVector* const m_pBlockVector;
4044  uint32_t m_CurrentFrameIndex;
4045  VkDeviceSize m_BytesMoved;
4046  uint32_t m_AllocationsMoved;
4047 
4048  struct AllocationInfo
4049  {
4050  VmaAllocation m_hAllocation;
4051  VkBool32* m_pChanged;
4052 
4053  AllocationInfo() :
4054  m_hAllocation(VK_NULL_HANDLE),
4055  m_pChanged(VMA_NULL)
4056  {
4057  }
4058  };
4059 
4060  struct AllocationInfoSizeGreater
4061  {
4062  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
4063  {
4064  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
4065  }
4066  };
4067 
4068  // Used between AddAllocation and Defragment.
4069  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4070 
4071  struct BlockInfo
4072  {
4073  VmaDeviceMemoryBlock* m_pBlock;
4074  bool m_HasNonMovableAllocations;
4075  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4076 
4077  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
4078  m_pBlock(VMA_NULL),
4079  m_HasNonMovableAllocations(true),
4080  m_Allocations(pAllocationCallbacks),
4081  m_pMappedDataForDefragmentation(VMA_NULL)
4082  {
4083  }
4084 
4085  void CalcHasNonMovableAllocations()
4086  {
4087  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
4088  const size_t defragmentAllocCount = m_Allocations.size();
4089  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
4090  }
4091 
4092  void SortAllocationsBySizeDescecnding()
4093  {
4094  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
4095  }
4096 
4097  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
4098  void Unmap(VmaAllocator hAllocator);
4099 
4100  private:
4101  // Not null if mapped for defragmentation only, not originally mapped.
4102  void* m_pMappedDataForDefragmentation;
4103  };
4104 
4105  struct BlockPointerLess
4106  {
4107  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
4108  {
4109  return pLhsBlockInfo->m_pBlock < pRhsBlock;
4110  }
4111  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4112  {
4113  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
4114  }
4115  };
4116 
4117  // 1. Blocks with some non-movable allocations go first.
4118  // 2. Blocks with smaller sumFreeSize go first.
4119  struct BlockInfoCompareMoveDestination
4120  {
4121  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4122  {
4123  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
4124  {
4125  return true;
4126  }
4127  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
4128  {
4129  return false;
4130  }
4131  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
4132  {
4133  return true;
4134  }
4135  return false;
4136  }
4137  };
4138 
4139  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
4140  BlockInfoVector m_Blocks;
4141 
4142  VkResult DefragmentRound(
4143  VkDeviceSize maxBytesToMove,
4144  uint32_t maxAllocationsToMove);
4145 
4146  static bool MoveMakesSense(
4147  size_t dstBlockIndex, VkDeviceSize dstOffset,
4148  size_t srcBlockIndex, VkDeviceSize srcOffset);
4149 
4150 public:
4151  VmaDefragmentator(
4152  VmaAllocator hAllocator,
4153  VmaBlockVector* pBlockVector,
4154  uint32_t currentFrameIndex);
4155 
4156  ~VmaDefragmentator();
4157 
4158  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
4159  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
4160 
4161  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
4162 
4163  VkResult Defragment(
4164  VkDeviceSize maxBytesToMove,
4165  uint32_t maxAllocationsToMove);
4166 };
4167 
4168 // Main allocator object.
4169 struct VmaAllocator_T
4170 {
4171  bool m_UseMutex;
4172  bool m_UseKhrDedicatedAllocation;
4173  VkDevice m_hDevice;
4174  bool m_AllocationCallbacksSpecified;
4175  VkAllocationCallbacks m_AllocationCallbacks;
4176  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
4177 
4178  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
4179  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
4180  VMA_MUTEX m_HeapSizeLimitMutex;
4181 
4182  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
4183  VkPhysicalDeviceMemoryProperties m_MemProps;
4184 
4185  // Default pools.
4186  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
4187 
4188  // Each vector is sorted by memory (handle value).
4189  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
4190  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
4191  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
4192 
4193  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
4194  ~VmaAllocator_T();
4195 
4196  const VkAllocationCallbacks* GetAllocationCallbacks() const
4197  {
4198  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
4199  }
4200  const VmaVulkanFunctions& GetVulkanFunctions() const
4201  {
4202  return m_VulkanFunctions;
4203  }
4204 
4205  VkDeviceSize GetBufferImageGranularity() const
4206  {
4207  return VMA_MAX(
4208  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
4209  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
4210  }
4211 
4212  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
4213  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
4214 
4215  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
4216  {
4217  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
4218  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
4219  }
4220 
4221  void GetBufferMemoryRequirements(
4222  VkBuffer hBuffer,
4223  VkMemoryRequirements& memReq,
4224  bool& requiresDedicatedAllocation,
4225  bool& prefersDedicatedAllocation) const;
4226  void GetImageMemoryRequirements(
4227  VkImage hImage,
4228  VkMemoryRequirements& memReq,
4229  bool& requiresDedicatedAllocation,
4230  bool& prefersDedicatedAllocation) const;
4231 
4232  // Main allocation function.
4233  VkResult AllocateMemory(
4234  const VkMemoryRequirements& vkMemReq,
4235  bool requiresDedicatedAllocation,
4236  bool prefersDedicatedAllocation,
4237  VkBuffer dedicatedBuffer,
4238  VkImage dedicatedImage,
4239  const VmaAllocationCreateInfo& createInfo,
4240  VmaSuballocationType suballocType,
4241  VmaAllocation* pAllocation);
4242 
4243  // Main deallocation function.
4244  void FreeMemory(const VmaAllocation allocation);
4245 
4246  void CalculateStats(VmaStats* pStats);
4247 
4248 #if VMA_STATS_STRING_ENABLED
4249  void PrintDetailedMap(class VmaJsonWriter& json);
4250 #endif
4251 
4252  VkResult Defragment(
4253  VmaAllocation* pAllocations,
4254  size_t allocationCount,
4255  VkBool32* pAllocationsChanged,
4256  const VmaDefragmentationInfo* pDefragmentationInfo,
4257  VmaDefragmentationStats* pDefragmentationStats);
4258 
4259  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
4260  bool TouchAllocation(VmaAllocation hAllocation);
4261 
4262  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
4263  void DestroyPool(VmaPool pool);
4264  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
4265 
4266  void SetCurrentFrameIndex(uint32_t frameIndex);
4267 
4268  void MakePoolAllocationsLost(
4269  VmaPool hPool,
4270  size_t* pLostAllocationCount);
4271 
4272  void CreateLostAllocation(VmaAllocation* pAllocation);
4273 
4274  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
4275  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
4276 
4277  VkResult Map(VmaAllocation hAllocation, void** ppData);
4278  void Unmap(VmaAllocation hAllocation);
4279 
4280 private:
4281  VkDeviceSize m_PreferredLargeHeapBlockSize;
4282 
4283  VkPhysicalDevice m_PhysicalDevice;
4284  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
4285 
4286  VMA_MUTEX m_PoolsMutex;
4287  // Protected by m_PoolsMutex. Sorted by pointer value.
4288  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
4289 
4290  VmaVulkanFunctions m_VulkanFunctions;
4291 
4292  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
4293 
4294  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
4295 
4296  VkResult AllocateMemoryOfType(
4297  const VkMemoryRequirements& vkMemReq,
4298  bool dedicatedAllocation,
4299  VkBuffer dedicatedBuffer,
4300  VkImage dedicatedImage,
4301  const VmaAllocationCreateInfo& createInfo,
4302  uint32_t memTypeIndex,
4303  VmaSuballocationType suballocType,
4304  VmaAllocation* pAllocation);
4305 
4306  // Allocates and registers new VkDeviceMemory specifically for single allocation.
4307  VkResult AllocateDedicatedMemory(
4308  VkDeviceSize size,
4309  VmaSuballocationType suballocType,
4310  uint32_t memTypeIndex,
4311  bool map,
4312  bool isUserDataString,
4313  void* pUserData,
4314  VkBuffer dedicatedBuffer,
4315  VkImage dedicatedImage,
4316  VmaAllocation* pAllocation);
4317 
4318  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
4319  void FreeDedicatedMemory(VmaAllocation allocation);
4320 };
4321 
4323 // Memory allocation #2 after VmaAllocator_T definition
4324 
4325 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
4326 {
4327  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
4328 }
4329 
4330 static void VmaFree(VmaAllocator hAllocator, void* ptr)
4331 {
4332  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
4333 }
4334 
4335 template<typename T>
4336 static T* VmaAllocate(VmaAllocator hAllocator)
4337 {
4338  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
4339 }
4340 
4341 template<typename T>
4342 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
4343 {
4344  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
4345 }
4346 
4347 template<typename T>
4348 static void vma_delete(VmaAllocator hAllocator, T* ptr)
4349 {
4350  if(ptr != VMA_NULL)
4351  {
4352  ptr->~T();
4353  VmaFree(hAllocator, ptr);
4354  }
4355 }
4356 
4357 template<typename T>
4358 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
4359 {
4360  if(ptr != VMA_NULL)
4361  {
4362  for(size_t i = count; i--; )
4363  ptr[i].~T();
4364  VmaFree(hAllocator, ptr);
4365  }
4366 }
4367 
4369 // VmaStringBuilder
4370 
4371 #if VMA_STATS_STRING_ENABLED
4372 
4373 class VmaStringBuilder
4374 {
4375 public:
4376  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
4377  size_t GetLength() const { return m_Data.size(); }
4378  const char* GetData() const { return m_Data.data(); }
4379 
4380  void Add(char ch) { m_Data.push_back(ch); }
4381  void Add(const char* pStr);
4382  void AddNewLine() { Add('\n'); }
4383  void AddNumber(uint32_t num);
4384  void AddNumber(uint64_t num);
4385  void AddPointer(const void* ptr);
4386 
4387 private:
4388  VmaVector< char, VmaStlAllocator<char> > m_Data;
4389 };
4390 
4391 void VmaStringBuilder::Add(const char* pStr)
4392 {
4393  const size_t strLen = strlen(pStr);
4394  if(strLen > 0)
4395  {
4396  const size_t oldCount = m_Data.size();
4397  m_Data.resize(oldCount + strLen);
4398  memcpy(m_Data.data() + oldCount, pStr, strLen);
4399  }
4400 }
4401 
4402 void VmaStringBuilder::AddNumber(uint32_t num)
4403 {
4404  char buf[11];
4405  VmaUint32ToStr(buf, sizeof(buf), num);
4406  Add(buf);
4407 }
4408 
4409 void VmaStringBuilder::AddNumber(uint64_t num)
4410 {
4411  char buf[21];
4412  VmaUint64ToStr(buf, sizeof(buf), num);
4413  Add(buf);
4414 }
4415 
4416 void VmaStringBuilder::AddPointer(const void* ptr)
4417 {
4418  char buf[21];
4419  VmaPtrToStr(buf, sizeof(buf), ptr);
4420  Add(buf);
4421 }
4422 
4423 #endif // #if VMA_STATS_STRING_ENABLED
4424 
4426 // VmaJsonWriter
4427 
4428 #if VMA_STATS_STRING_ENABLED
4429 
4430 class VmaJsonWriter
4431 {
4432 public:
4433  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
4434  ~VmaJsonWriter();
4435 
4436  void BeginObject(bool singleLine = false);
4437  void EndObject();
4438 
4439  void BeginArray(bool singleLine = false);
4440  void EndArray();
4441 
4442  void WriteString(const char* pStr);
4443  void BeginString(const char* pStr = VMA_NULL);
4444  void ContinueString(const char* pStr);
4445  void ContinueString(uint32_t n);
4446  void ContinueString(uint64_t n);
4447  void ContinueString_Pointer(const void* ptr);
4448  void EndString(const char* pStr = VMA_NULL);
4449 
4450  void WriteNumber(uint32_t n);
4451  void WriteNumber(uint64_t n);
4452  void WriteBool(bool b);
4453  void WriteNull();
4454 
4455 private:
4456  static const char* const INDENT;
4457 
4458  enum COLLECTION_TYPE
4459  {
4460  COLLECTION_TYPE_OBJECT,
4461  COLLECTION_TYPE_ARRAY,
4462  };
4463  struct StackItem
4464  {
4465  COLLECTION_TYPE type;
4466  uint32_t valueCount;
4467  bool singleLineMode;
4468  };
4469 
4470  VmaStringBuilder& m_SB;
4471  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
4472  bool m_InsideString;
4473 
4474  void BeginValue(bool isString);
4475  void WriteIndent(bool oneLess = false);
4476 };
4477 
4478 const char* const VmaJsonWriter::INDENT = " ";
4479 
4480 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
4481  m_SB(sb),
4482  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4483  m_InsideString(false)
4484 {
4485 }
4486 
4487 VmaJsonWriter::~VmaJsonWriter()
4488 {
4489  VMA_ASSERT(!m_InsideString);
4490  VMA_ASSERT(m_Stack.empty());
4491 }
4492 
4493 void VmaJsonWriter::BeginObject(bool singleLine)
4494 {
4495  VMA_ASSERT(!m_InsideString);
4496 
4497  BeginValue(false);
4498  m_SB.Add('{');
4499 
4500  StackItem item;
4501  item.type = COLLECTION_TYPE_OBJECT;
4502  item.valueCount = 0;
4503  item.singleLineMode = singleLine;
4504  m_Stack.push_back(item);
4505 }
4506 
4507 void VmaJsonWriter::EndObject()
4508 {
4509  VMA_ASSERT(!m_InsideString);
4510 
4511  WriteIndent(true);
4512  m_SB.Add('}');
4513 
4514  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4515  m_Stack.pop_back();
4516 }
4517 
4518 void VmaJsonWriter::BeginArray(bool singleLine)
4519 {
4520  VMA_ASSERT(!m_InsideString);
4521 
4522  BeginValue(false);
4523  m_SB.Add('[');
4524 
4525  StackItem item;
4526  item.type = COLLECTION_TYPE_ARRAY;
4527  item.valueCount = 0;
4528  item.singleLineMode = singleLine;
4529  m_Stack.push_back(item);
4530 }
4531 
4532 void VmaJsonWriter::EndArray()
4533 {
4534  VMA_ASSERT(!m_InsideString);
4535 
4536  WriteIndent(true);
4537  m_SB.Add(']');
4538 
4539  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4540  m_Stack.pop_back();
4541 }
4542 
4543 void VmaJsonWriter::WriteString(const char* pStr)
4544 {
4545  BeginString(pStr);
4546  EndString();
4547 }
4548 
4549 void VmaJsonWriter::BeginString(const char* pStr)
4550 {
4551  VMA_ASSERT(!m_InsideString);
4552 
4553  BeginValue(true);
4554  m_SB.Add('"');
4555  m_InsideString = true;
4556  if(pStr != VMA_NULL && pStr[0] != '\0')
4557  {
4558  ContinueString(pStr);
4559  }
4560 }
4561 
4562 void VmaJsonWriter::ContinueString(const char* pStr)
4563 {
4564  VMA_ASSERT(m_InsideString);
4565 
4566  const size_t strLen = strlen(pStr);
4567  for(size_t i = 0; i < strLen; ++i)
4568  {
4569  char ch = pStr[i];
4570  if(ch == '\'')
4571  {
4572  m_SB.Add("\\\\");
4573  }
4574  else if(ch == '"')
4575  {
4576  m_SB.Add("\\\"");
4577  }
4578  else if(ch >= 32)
4579  {
4580  m_SB.Add(ch);
4581  }
4582  else switch(ch)
4583  {
4584  case '\b':
4585  m_SB.Add("\\b");
4586  break;
4587  case '\f':
4588  m_SB.Add("\\f");
4589  break;
4590  case '\n':
4591  m_SB.Add("\\n");
4592  break;
4593  case '\r':
4594  m_SB.Add("\\r");
4595  break;
4596  case '\t':
4597  m_SB.Add("\\t");
4598  break;
4599  default:
4600  VMA_ASSERT(0 && "Character not currently supported.");
4601  break;
4602  }
4603  }
4604 }
4605 
4606 void VmaJsonWriter::ContinueString(uint32_t n)
4607 {
4608  VMA_ASSERT(m_InsideString);
4609  m_SB.AddNumber(n);
4610 }
4611 
4612 void VmaJsonWriter::ContinueString(uint64_t n)
4613 {
4614  VMA_ASSERT(m_InsideString);
4615  m_SB.AddNumber(n);
4616 }
4617 
4618 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4619 {
4620  VMA_ASSERT(m_InsideString);
4621  m_SB.AddPointer(ptr);
4622 }
4623 
4624 void VmaJsonWriter::EndString(const char* pStr)
4625 {
4626  VMA_ASSERT(m_InsideString);
4627  if(pStr != VMA_NULL && pStr[0] != '\0')
4628  {
4629  ContinueString(pStr);
4630  }
4631  m_SB.Add('"');
4632  m_InsideString = false;
4633 }
4634 
4635 void VmaJsonWriter::WriteNumber(uint32_t n)
4636 {
4637  VMA_ASSERT(!m_InsideString);
4638  BeginValue(false);
4639  m_SB.AddNumber(n);
4640 }
4641 
4642 void VmaJsonWriter::WriteNumber(uint64_t n)
4643 {
4644  VMA_ASSERT(!m_InsideString);
4645  BeginValue(false);
4646  m_SB.AddNumber(n);
4647 }
4648 
4649 void VmaJsonWriter::WriteBool(bool b)
4650 {
4651  VMA_ASSERT(!m_InsideString);
4652  BeginValue(false);
4653  m_SB.Add(b ? "true" : "false");
4654 }
4655 
4656 void VmaJsonWriter::WriteNull()
4657 {
4658  VMA_ASSERT(!m_InsideString);
4659  BeginValue(false);
4660  m_SB.Add("null");
4661 }
4662 
4663 void VmaJsonWriter::BeginValue(bool isString)
4664 {
4665  if(!m_Stack.empty())
4666  {
4667  StackItem& currItem = m_Stack.back();
4668  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4669  currItem.valueCount % 2 == 0)
4670  {
4671  VMA_ASSERT(isString);
4672  }
4673 
4674  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4675  currItem.valueCount % 2 != 0)
4676  {
4677  m_SB.Add(": ");
4678  }
4679  else if(currItem.valueCount > 0)
4680  {
4681  m_SB.Add(", ");
4682  WriteIndent();
4683  }
4684  else
4685  {
4686  WriteIndent();
4687  }
4688  ++currItem.valueCount;
4689  }
4690 }
4691 
4692 void VmaJsonWriter::WriteIndent(bool oneLess)
4693 {
4694  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4695  {
4696  m_SB.AddNewLine();
4697 
4698  size_t count = m_Stack.size();
4699  if(count > 0 && oneLess)
4700  {
4701  --count;
4702  }
4703  for(size_t i = 0; i < count; ++i)
4704  {
4705  m_SB.Add(INDENT);
4706  }
4707  }
4708 }
4709 
4710 #endif // #if VMA_STATS_STRING_ENABLED
4711 
4713 
4714 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
4715 {
4716  if(IsUserDataString())
4717  {
4718  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
4719 
4720  FreeUserDataString(hAllocator);
4721 
4722  if(pUserData != VMA_NULL)
4723  {
4724  const char* const newStrSrc = (char*)pUserData;
4725  const size_t newStrLen = strlen(newStrSrc);
4726  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
4727  memcpy(newStrDst, newStrSrc, newStrLen + 1);
4728  m_pUserData = newStrDst;
4729  }
4730  }
4731  else
4732  {
4733  m_pUserData = pUserData;
4734  }
4735 }
4736 
4737 void VmaAllocation_T::ChangeBlockAllocation(
4738  VmaAllocator hAllocator,
4739  VmaDeviceMemoryBlock* block,
4740  VkDeviceSize offset)
4741 {
4742  VMA_ASSERT(block != VMA_NULL);
4743  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4744 
4745  // Move mapping reference counter from old block to new block.
4746  if(block != m_BlockAllocation.m_Block)
4747  {
4748  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
4749  if(IsPersistentMap())
4750  ++mapRefCount;
4751  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
4752  block->Map(hAllocator, mapRefCount, VMA_NULL);
4753  }
4754 
4755  m_BlockAllocation.m_Block = block;
4756  m_BlockAllocation.m_Offset = offset;
4757 }
4758 
4759 VkDeviceSize VmaAllocation_T::GetOffset() const
4760 {
4761  switch(m_Type)
4762  {
4763  case ALLOCATION_TYPE_BLOCK:
4764  return m_BlockAllocation.m_Offset;
4765  case ALLOCATION_TYPE_DEDICATED:
4766  return 0;
4767  default:
4768  VMA_ASSERT(0);
4769  return 0;
4770  }
4771 }
4772 
4773 VkDeviceMemory VmaAllocation_T::GetMemory() const
4774 {
4775  switch(m_Type)
4776  {
4777  case ALLOCATION_TYPE_BLOCK:
4778  return m_BlockAllocation.m_Block->m_hMemory;
4779  case ALLOCATION_TYPE_DEDICATED:
4780  return m_DedicatedAllocation.m_hMemory;
4781  default:
4782  VMA_ASSERT(0);
4783  return VK_NULL_HANDLE;
4784  }
4785 }
4786 
4787 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
4788 {
4789  switch(m_Type)
4790  {
4791  case ALLOCATION_TYPE_BLOCK:
4792  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
4793  case ALLOCATION_TYPE_DEDICATED:
4794  return m_DedicatedAllocation.m_MemoryTypeIndex;
4795  default:
4796  VMA_ASSERT(0);
4797  return UINT32_MAX;
4798  }
4799 }
4800 
4801 void* VmaAllocation_T::GetMappedData() const
4802 {
4803  switch(m_Type)
4804  {
4805  case ALLOCATION_TYPE_BLOCK:
4806  if(m_MapCount != 0)
4807  {
4808  void* pBlockData = m_BlockAllocation.m_Block->m_Mapping.GetMappedData();
4809  VMA_ASSERT(pBlockData != VMA_NULL);
4810  return (char*)pBlockData + m_BlockAllocation.m_Offset;
4811  }
4812  else
4813  {
4814  return VMA_NULL;
4815  }
4816  break;
4817  case ALLOCATION_TYPE_DEDICATED:
4818  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
4819  return m_DedicatedAllocation.m_pMappedData;
4820  default:
4821  VMA_ASSERT(0);
4822  return VMA_NULL;
4823  }
4824 }
4825 
4826 bool VmaAllocation_T::CanBecomeLost() const
4827 {
4828  switch(m_Type)
4829  {
4830  case ALLOCATION_TYPE_BLOCK:
4831  return m_BlockAllocation.m_CanBecomeLost;
4832  case ALLOCATION_TYPE_DEDICATED:
4833  return false;
4834  default:
4835  VMA_ASSERT(0);
4836  return false;
4837  }
4838 }
4839 
4840 VmaPool VmaAllocation_T::GetPool() const
4841 {
4842  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4843  return m_BlockAllocation.m_hPool;
4844 }
4845 
4846 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4847 {
4848  VMA_ASSERT(CanBecomeLost());
4849 
4850  /*
4851  Warning: This is a carefully designed algorithm.
4852  Do not modify unless you really know what you're doing :)
4853  */
4854  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4855  for(;;)
4856  {
4857  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4858  {
4859  VMA_ASSERT(0);
4860  return false;
4861  }
4862  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4863  {
4864  return false;
4865  }
4866  else // Last use time earlier than current time.
4867  {
4868  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4869  {
4870  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4871  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4872  return true;
4873  }
4874  }
4875  }
4876 }
4877 
4878 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
4879 {
4880  VMA_ASSERT(IsUserDataString());
4881  if(m_pUserData != VMA_NULL)
4882  {
4883  char* const oldStr = (char*)m_pUserData;
4884  const size_t oldStrLen = strlen(oldStr);
4885  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
4886  m_pUserData = VMA_NULL;
4887  }
4888 }
4889 
4890 void VmaAllocation_T::BlockAllocMap()
4891 {
4892  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4893 
4894  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4895  {
4896  ++m_MapCount;
4897  }
4898  else
4899  {
4900  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
4901  }
4902 }
4903 
4904 void VmaAllocation_T::BlockAllocUnmap()
4905 {
4906  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4907 
4908  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4909  {
4910  --m_MapCount;
4911  }
4912  else
4913  {
4914  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
4915  }
4916 }
4917 
4918 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
4919 {
4920  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4921 
4922  if(m_MapCount != 0)
4923  {
4924  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4925  {
4926  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
4927  *ppData = m_DedicatedAllocation.m_pMappedData;
4928  ++m_MapCount;
4929  return VK_SUCCESS;
4930  }
4931  else
4932  {
4933  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
4934  return VK_ERROR_MEMORY_MAP_FAILED;
4935  }
4936  }
4937  else
4938  {
4939  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4940  hAllocator->m_hDevice,
4941  m_DedicatedAllocation.m_hMemory,
4942  0, // offset
4943  VK_WHOLE_SIZE,
4944  0, // flags
4945  ppData);
4946  if(result == VK_SUCCESS)
4947  {
4948  m_DedicatedAllocation.m_pMappedData = *ppData;
4949  m_MapCount = 1;
4950  }
4951  return result;
4952  }
4953 }
4954 
4955 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
4956 {
4957  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4958 
4959  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4960  {
4961  --m_MapCount;
4962  if(m_MapCount == 0)
4963  {
4964  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
4965  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
4966  hAllocator->m_hDevice,
4967  m_DedicatedAllocation.m_hMemory);
4968  }
4969  }
4970  else
4971  {
4972  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
4973  }
4974 }
4975 
4976 #if VMA_STATS_STRING_ENABLED
4977 
4978 // Correspond to values of enum VmaSuballocationType.
4979 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4980  "FREE",
4981  "UNKNOWN",
4982  "BUFFER",
4983  "IMAGE_UNKNOWN",
4984  "IMAGE_LINEAR",
4985  "IMAGE_OPTIMAL",
4986 };
4987 
4988 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4989 {
4990  json.BeginObject();
4991 
4992  json.WriteString("Blocks");
4993  json.WriteNumber(stat.blockCount);
4994 
4995  json.WriteString("Allocations");
4996  json.WriteNumber(stat.allocationCount);
4997 
4998  json.WriteString("UnusedRanges");
4999  json.WriteNumber(stat.unusedRangeCount);
5000 
5001  json.WriteString("UsedBytes");
5002  json.WriteNumber(stat.usedBytes);
5003 
5004  json.WriteString("UnusedBytes");
5005  json.WriteNumber(stat.unusedBytes);
5006 
5007  if(stat.allocationCount > 1)
5008  {
5009  json.WriteString("AllocationSize");
5010  json.BeginObject(true);
5011  json.WriteString("Min");
5012  json.WriteNumber(stat.allocationSizeMin);
5013  json.WriteString("Avg");
5014  json.WriteNumber(stat.allocationSizeAvg);
5015  json.WriteString("Max");
5016  json.WriteNumber(stat.allocationSizeMax);
5017  json.EndObject();
5018  }
5019 
5020  if(stat.unusedRangeCount > 1)
5021  {
5022  json.WriteString("UnusedRangeSize");
5023  json.BeginObject(true);
5024  json.WriteString("Min");
5025  json.WriteNumber(stat.unusedRangeSizeMin);
5026  json.WriteString("Avg");
5027  json.WriteNumber(stat.unusedRangeSizeAvg);
5028  json.WriteString("Max");
5029  json.WriteNumber(stat.unusedRangeSizeMax);
5030  json.EndObject();
5031  }
5032 
5033  json.EndObject();
5034 }
5035 
5036 #endif // #if VMA_STATS_STRING_ENABLED
5037 
5038 struct VmaSuballocationItemSizeLess
5039 {
5040  bool operator()(
5041  const VmaSuballocationList::iterator lhs,
5042  const VmaSuballocationList::iterator rhs) const
5043  {
5044  return lhs->size < rhs->size;
5045  }
5046  bool operator()(
5047  const VmaSuballocationList::iterator lhs,
5048  VkDeviceSize rhsSize) const
5049  {
5050  return lhs->size < rhsSize;
5051  }
5052 };
5053 
5055 // class VmaBlockMetadata
5056 
5057 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
5058  m_Size(0),
5059  m_FreeCount(0),
5060  m_SumFreeSize(0),
5061  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
5062  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
5063 {
5064 }
5065 
5066 VmaBlockMetadata::~VmaBlockMetadata()
5067 {
5068 }
5069 
5070 void VmaBlockMetadata::Init(VkDeviceSize size)
5071 {
5072  m_Size = size;
5073  m_FreeCount = 1;
5074  m_SumFreeSize = size;
5075 
5076  VmaSuballocation suballoc = {};
5077  suballoc.offset = 0;
5078  suballoc.size = size;
5079  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5080  suballoc.hAllocation = VK_NULL_HANDLE;
5081 
5082  m_Suballocations.push_back(suballoc);
5083  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
5084  --suballocItem;
5085  m_FreeSuballocationsBySize.push_back(suballocItem);
5086 }
5087 
5088 bool VmaBlockMetadata::Validate() const
5089 {
5090  if(m_Suballocations.empty())
5091  {
5092  return false;
5093  }
5094 
5095  // Expected offset of new suballocation as calculates from previous ones.
5096  VkDeviceSize calculatedOffset = 0;
5097  // Expected number of free suballocations as calculated from traversing their list.
5098  uint32_t calculatedFreeCount = 0;
5099  // Expected sum size of free suballocations as calculated from traversing their list.
5100  VkDeviceSize calculatedSumFreeSize = 0;
5101  // Expected number of free suballocations that should be registered in
5102  // m_FreeSuballocationsBySize calculated from traversing their list.
5103  size_t freeSuballocationsToRegister = 0;
5104  // True if previous visisted suballocation was free.
5105  bool prevFree = false;
5106 
5107  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5108  suballocItem != m_Suballocations.cend();
5109  ++suballocItem)
5110  {
5111  const VmaSuballocation& subAlloc = *suballocItem;
5112 
5113  // Actual offset of this suballocation doesn't match expected one.
5114  if(subAlloc.offset != calculatedOffset)
5115  {
5116  return false;
5117  }
5118 
5119  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
5120  // Two adjacent free suballocations are invalid. They should be merged.
5121  if(prevFree && currFree)
5122  {
5123  return false;
5124  }
5125 
5126  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
5127  {
5128  return false;
5129  }
5130 
5131  if(currFree)
5132  {
5133  calculatedSumFreeSize += subAlloc.size;
5134  ++calculatedFreeCount;
5135  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5136  {
5137  ++freeSuballocationsToRegister;
5138  }
5139  }
5140  else
5141  {
5142  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
5143  {
5144  return false;
5145  }
5146  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
5147  {
5148  return false;
5149  }
5150  }
5151 
5152  calculatedOffset += subAlloc.size;
5153  prevFree = currFree;
5154  }
5155 
5156  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
5157  // match expected one.
5158  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
5159  {
5160  return false;
5161  }
5162 
5163  VkDeviceSize lastSize = 0;
5164  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
5165  {
5166  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
5167 
5168  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
5169  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5170  {
5171  return false;
5172  }
5173  // They must be sorted by size ascending.
5174  if(suballocItem->size < lastSize)
5175  {
5176  return false;
5177  }
5178 
5179  lastSize = suballocItem->size;
5180  }
5181 
5182  // Check if totals match calculacted values.
5183  if(!ValidateFreeSuballocationList() ||
5184  (calculatedOffset != m_Size) ||
5185  (calculatedSumFreeSize != m_SumFreeSize) ||
5186  (calculatedFreeCount != m_FreeCount))
5187  {
5188  return false;
5189  }
5190 
5191  return true;
5192 }
5193 
5194 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
5195 {
5196  if(!m_FreeSuballocationsBySize.empty())
5197  {
5198  return m_FreeSuballocationsBySize.back()->size;
5199  }
5200  else
5201  {
5202  return 0;
5203  }
5204 }
5205 
5206 bool VmaBlockMetadata::IsEmpty() const
5207 {
5208  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
5209 }
5210 
5211 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
5212 {
5213  outInfo.blockCount = 1;
5214 
5215  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5216  outInfo.allocationCount = rangeCount - m_FreeCount;
5217  outInfo.unusedRangeCount = m_FreeCount;
5218 
5219  outInfo.unusedBytes = m_SumFreeSize;
5220  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
5221 
5222  outInfo.allocationSizeMin = UINT64_MAX;
5223  outInfo.allocationSizeMax = 0;
5224  outInfo.unusedRangeSizeMin = UINT64_MAX;
5225  outInfo.unusedRangeSizeMax = 0;
5226 
5227  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5228  suballocItem != m_Suballocations.cend();
5229  ++suballocItem)
5230  {
5231  const VmaSuballocation& suballoc = *suballocItem;
5232  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5233  {
5234  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
5235  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
5236  }
5237  else
5238  {
5239  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
5240  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
5241  }
5242  }
5243 }
5244 
5245 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
5246 {
5247  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5248 
5249  inoutStats.size += m_Size;
5250  inoutStats.unusedSize += m_SumFreeSize;
5251  inoutStats.allocationCount += rangeCount - m_FreeCount;
5252  inoutStats.unusedRangeCount += m_FreeCount;
5253  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
5254 }
5255 
5256 #if VMA_STATS_STRING_ENABLED
5257 
5258 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
5259 {
5260  json.BeginObject();
5261 
5262  json.WriteString("TotalBytes");
5263  json.WriteNumber(m_Size);
5264 
5265  json.WriteString("UnusedBytes");
5266  json.WriteNumber(m_SumFreeSize);
5267 
5268  json.WriteString("Allocations");
5269  json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);
5270 
5271  json.WriteString("UnusedRanges");
5272  json.WriteNumber(m_FreeCount);
5273 
5274  json.WriteString("Suballocations");
5275  json.BeginArray();
5276  size_t i = 0;
5277  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5278  suballocItem != m_Suballocations.cend();
5279  ++suballocItem, ++i)
5280  {
5281  json.BeginObject(true);
5282 
5283  json.WriteString("Type");
5284  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
5285 
5286  json.WriteString("Size");
5287  json.WriteNumber(suballocItem->size);
5288 
5289  json.WriteString("Offset");
5290  json.WriteNumber(suballocItem->offset);
5291 
5292  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5293  {
5294  const void* pUserData = suballocItem->hAllocation->GetUserData();
5295  if(pUserData != VMA_NULL)
5296  {
5297  json.WriteString("UserData");
5298  if(suballocItem->hAllocation->IsUserDataString())
5299  {
5300  json.WriteString((const char*)pUserData);
5301  }
5302  else
5303  {
5304  json.BeginString();
5305  json.ContinueString_Pointer(pUserData);
5306  json.EndString();
5307  }
5308  }
5309  }
5310 
5311  json.EndObject();
5312  }
5313  json.EndArray();
5314 
5315  json.EndObject();
5316 }
5317 
5318 #endif // #if VMA_STATS_STRING_ENABLED
5319 
5320 /*
5321 How many suitable free suballocations to analyze before choosing best one.
5322 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
5323  be chosen.
5324 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
5325  suballocations will be analized and best one will be chosen.
5326 - Any other value is also acceptable.
5327 */
5328 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
5329 
5330 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
5331 {
5332  VMA_ASSERT(IsEmpty());
5333  pAllocationRequest->offset = 0;
5334  pAllocationRequest->sumFreeSize = m_SumFreeSize;
5335  pAllocationRequest->sumItemSize = 0;
5336  pAllocationRequest->item = m_Suballocations.begin();
5337  pAllocationRequest->itemsToMakeLostCount = 0;
5338 }
5339 
5340 bool VmaBlockMetadata::CreateAllocationRequest(
5341  uint32_t currentFrameIndex,
5342  uint32_t frameInUseCount,
5343  VkDeviceSize bufferImageGranularity,
5344  VkDeviceSize allocSize,
5345  VkDeviceSize allocAlignment,
5346  VmaSuballocationType allocType,
5347  bool canMakeOtherLost,
5348  VmaAllocationRequest* pAllocationRequest)
5349 {
5350  VMA_ASSERT(allocSize > 0);
5351  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5352  VMA_ASSERT(pAllocationRequest != VMA_NULL);
5353  VMA_HEAVY_ASSERT(Validate());
5354 
5355  // There is not enough total free space in this block to fullfill the request: Early return.
5356  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
5357  {
5358  return false;
5359  }
5360 
5361  // New algorithm, efficiently searching freeSuballocationsBySize.
5362  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
5363  if(freeSuballocCount > 0)
5364  {
5365  if(VMA_BEST_FIT)
5366  {
5367  // Find first free suballocation with size not less than allocSize.
5368  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5369  m_FreeSuballocationsBySize.data(),
5370  m_FreeSuballocationsBySize.data() + freeSuballocCount,
5371  allocSize,
5372  VmaSuballocationItemSizeLess());
5373  size_t index = it - m_FreeSuballocationsBySize.data();
5374  for(; index < freeSuballocCount; ++index)
5375  {
5376  if(CheckAllocation(
5377  currentFrameIndex,
5378  frameInUseCount,
5379  bufferImageGranularity,
5380  allocSize,
5381  allocAlignment,
5382  allocType,
5383  m_FreeSuballocationsBySize[index],
5384  false, // canMakeOtherLost
5385  &pAllocationRequest->offset,
5386  &pAllocationRequest->itemsToMakeLostCount,
5387  &pAllocationRequest->sumFreeSize,
5388  &pAllocationRequest->sumItemSize))
5389  {
5390  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5391  return true;
5392  }
5393  }
5394  }
5395  else
5396  {
5397  // Search staring from biggest suballocations.
5398  for(size_t index = freeSuballocCount; index--; )
5399  {
5400  if(CheckAllocation(
5401  currentFrameIndex,
5402  frameInUseCount,
5403  bufferImageGranularity,
5404  allocSize,
5405  allocAlignment,
5406  allocType,
5407  m_FreeSuballocationsBySize[index],
5408  false, // canMakeOtherLost
5409  &pAllocationRequest->offset,
5410  &pAllocationRequest->itemsToMakeLostCount,
5411  &pAllocationRequest->sumFreeSize,
5412  &pAllocationRequest->sumItemSize))
5413  {
5414  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5415  return true;
5416  }
5417  }
5418  }
5419  }
5420 
5421  if(canMakeOtherLost)
5422  {
5423  // Brute-force algorithm. TODO: Come up with something better.
5424 
5425  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
5426  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
5427 
5428  VmaAllocationRequest tmpAllocRequest = {};
5429  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
5430  suballocIt != m_Suballocations.end();
5431  ++suballocIt)
5432  {
5433  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
5434  suballocIt->hAllocation->CanBecomeLost())
5435  {
5436  if(CheckAllocation(
5437  currentFrameIndex,
5438  frameInUseCount,
5439  bufferImageGranularity,
5440  allocSize,
5441  allocAlignment,
5442  allocType,
5443  suballocIt,
5444  canMakeOtherLost,
5445  &tmpAllocRequest.offset,
5446  &tmpAllocRequest.itemsToMakeLostCount,
5447  &tmpAllocRequest.sumFreeSize,
5448  &tmpAllocRequest.sumItemSize))
5449  {
5450  tmpAllocRequest.item = suballocIt;
5451 
5452  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
5453  {
5454  *pAllocationRequest = tmpAllocRequest;
5455  }
5456  }
5457  }
5458  }
5459 
5460  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
5461  {
5462  return true;
5463  }
5464  }
5465 
5466  return false;
5467 }
5468 
5469 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
5470  uint32_t currentFrameIndex,
5471  uint32_t frameInUseCount,
5472  VmaAllocationRequest* pAllocationRequest)
5473 {
5474  while(pAllocationRequest->itemsToMakeLostCount > 0)
5475  {
5476  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
5477  {
5478  ++pAllocationRequest->item;
5479  }
5480  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5481  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
5482  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
5483  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5484  {
5485  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
5486  --pAllocationRequest->itemsToMakeLostCount;
5487  }
5488  else
5489  {
5490  return false;
5491  }
5492  }
5493 
5494  VMA_HEAVY_ASSERT(Validate());
5495  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5496  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
5497 
5498  return true;
5499 }
5500 
5501 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5502 {
5503  uint32_t lostAllocationCount = 0;
5504  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
5505  it != m_Suballocations.end();
5506  ++it)
5507  {
5508  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
5509  it->hAllocation->CanBecomeLost() &&
5510  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5511  {
5512  it = FreeSuballocation(it);
5513  ++lostAllocationCount;
5514  }
5515  }
5516  return lostAllocationCount;
5517 }
5518 
5519 void VmaBlockMetadata::Alloc(
5520  const VmaAllocationRequest& request,
5521  VmaSuballocationType type,
5522  VkDeviceSize allocSize,
5523  VmaAllocation hAllocation)
5524 {
5525  VMA_ASSERT(request.item != m_Suballocations.end());
5526  VmaSuballocation& suballoc = *request.item;
5527  // Given suballocation is a free block.
5528  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5529  // Given offset is inside this suballocation.
5530  VMA_ASSERT(request.offset >= suballoc.offset);
5531  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
5532  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
5533  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
5534 
5535  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
5536  // it to become used.
5537  UnregisterFreeSuballocation(request.item);
5538 
5539  suballoc.offset = request.offset;
5540  suballoc.size = allocSize;
5541  suballoc.type = type;
5542  suballoc.hAllocation = hAllocation;
5543 
5544  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
5545  if(paddingEnd)
5546  {
5547  VmaSuballocation paddingSuballoc = {};
5548  paddingSuballoc.offset = request.offset + allocSize;
5549  paddingSuballoc.size = paddingEnd;
5550  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5551  VmaSuballocationList::iterator next = request.item;
5552  ++next;
5553  const VmaSuballocationList::iterator paddingEndItem =
5554  m_Suballocations.insert(next, paddingSuballoc);
5555  RegisterFreeSuballocation(paddingEndItem);
5556  }
5557 
5558  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5559  if(paddingBegin)
5560  {
5561  VmaSuballocation paddingSuballoc = {};
5562  paddingSuballoc.offset = request.offset - paddingBegin;
5563  paddingSuballoc.size = paddingBegin;
5564  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5565  const VmaSuballocationList::iterator paddingBeginItem =
5566  m_Suballocations.insert(request.item, paddingSuballoc);
5567  RegisterFreeSuballocation(paddingBeginItem);
5568  }
5569 
5570  // Update totals.
5571  m_FreeCount = m_FreeCount - 1;
5572  if(paddingBegin > 0)
5573  {
5574  ++m_FreeCount;
5575  }
5576  if(paddingEnd > 0)
5577  {
5578  ++m_FreeCount;
5579  }
5580  m_SumFreeSize -= allocSize;
5581 }
5582 
5583 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5584 {
5585  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5586  suballocItem != m_Suballocations.end();
5587  ++suballocItem)
5588  {
5589  VmaSuballocation& suballoc = *suballocItem;
5590  if(suballoc.hAllocation == allocation)
5591  {
5592  FreeSuballocation(suballocItem);
5593  VMA_HEAVY_ASSERT(Validate());
5594  return;
5595  }
5596  }
5597  VMA_ASSERT(0 && "Not found!");
5598 }
5599 
5600 void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)
5601 {
5602  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5603  suballocItem != m_Suballocations.end();
5604  ++suballocItem)
5605  {
5606  VmaSuballocation& suballoc = *suballocItem;
5607  if(suballoc.offset == offset)
5608  {
5609  FreeSuballocation(suballocItem);
5610  return;
5611  }
5612  }
5613  VMA_ASSERT(0 && "Not found!");
5614 }
5615 
5616 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5617 {
5618  VkDeviceSize lastSize = 0;
5619  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5620  {
5621  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5622 
5623  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5624  {
5625  VMA_ASSERT(0);
5626  return false;
5627  }
5628  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5629  {
5630  VMA_ASSERT(0);
5631  return false;
5632  }
5633  if(it->size < lastSize)
5634  {
5635  VMA_ASSERT(0);
5636  return false;
5637  }
5638 
5639  lastSize = it->size;
5640  }
5641  return true;
5642 }
5643 
5644 bool VmaBlockMetadata::CheckAllocation(
5645  uint32_t currentFrameIndex,
5646  uint32_t frameInUseCount,
5647  VkDeviceSize bufferImageGranularity,
5648  VkDeviceSize allocSize,
5649  VkDeviceSize allocAlignment,
5650  VmaSuballocationType allocType,
5651  VmaSuballocationList::const_iterator suballocItem,
5652  bool canMakeOtherLost,
5653  VkDeviceSize* pOffset,
5654  size_t* itemsToMakeLostCount,
5655  VkDeviceSize* pSumFreeSize,
5656  VkDeviceSize* pSumItemSize) const
5657 {
5658  VMA_ASSERT(allocSize > 0);
5659  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5660  VMA_ASSERT(suballocItem != m_Suballocations.cend());
5661  VMA_ASSERT(pOffset != VMA_NULL);
5662 
5663  *itemsToMakeLostCount = 0;
5664  *pSumFreeSize = 0;
5665  *pSumItemSize = 0;
5666 
5667  if(canMakeOtherLost)
5668  {
5669  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5670  {
5671  *pSumFreeSize = suballocItem->size;
5672  }
5673  else
5674  {
5675  if(suballocItem->hAllocation->CanBecomeLost() &&
5676  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5677  {
5678  ++*itemsToMakeLostCount;
5679  *pSumItemSize = suballocItem->size;
5680  }
5681  else
5682  {
5683  return false;
5684  }
5685  }
5686 
5687  // Remaining size is too small for this request: Early return.
5688  if(m_Size - suballocItem->offset < allocSize)
5689  {
5690  return false;
5691  }
5692 
5693  // Start from offset equal to beginning of this suballocation.
5694  *pOffset = suballocItem->offset;
5695 
5696  // Apply VMA_DEBUG_MARGIN at the beginning.
5697  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5698  {
5699  *pOffset += VMA_DEBUG_MARGIN;
5700  }
5701 
5702  // Apply alignment.
5703  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5704  *pOffset = VmaAlignUp(*pOffset, alignment);
5705 
5706  // Check previous suballocations for BufferImageGranularity conflicts.
5707  // Make bigger alignment if necessary.
5708  if(bufferImageGranularity > 1)
5709  {
5710  bool bufferImageGranularityConflict = false;
5711  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5712  while(prevSuballocItem != m_Suballocations.cbegin())
5713  {
5714  --prevSuballocItem;
5715  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5716  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5717  {
5718  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5719  {
5720  bufferImageGranularityConflict = true;
5721  break;
5722  }
5723  }
5724  else
5725  // Already on previous page.
5726  break;
5727  }
5728  if(bufferImageGranularityConflict)
5729  {
5730  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5731  }
5732  }
5733 
5734  // Now that we have final *pOffset, check if we are past suballocItem.
5735  // If yes, return false - this function should be called for another suballocItem as starting point.
5736  if(*pOffset >= suballocItem->offset + suballocItem->size)
5737  {
5738  return false;
5739  }
5740 
5741  // Calculate padding at the beginning based on current offset.
5742  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
5743 
5744  // Calculate required margin at the end if this is not last suballocation.
5745  VmaSuballocationList::const_iterator next = suballocItem;
5746  ++next;
5747  const VkDeviceSize requiredEndMargin =
5748  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5749 
5750  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
5751  // Another early return check.
5752  if(suballocItem->offset + totalSize > m_Size)
5753  {
5754  return false;
5755  }
5756 
5757  // Advance lastSuballocItem until desired size is reached.
5758  // Update itemsToMakeLostCount.
5759  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
5760  if(totalSize > suballocItem->size)
5761  {
5762  VkDeviceSize remainingSize = totalSize - suballocItem->size;
5763  while(remainingSize > 0)
5764  {
5765  ++lastSuballocItem;
5766  if(lastSuballocItem == m_Suballocations.cend())
5767  {
5768  return false;
5769  }
5770  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5771  {
5772  *pSumFreeSize += lastSuballocItem->size;
5773  }
5774  else
5775  {
5776  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
5777  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
5778  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5779  {
5780  ++*itemsToMakeLostCount;
5781  *pSumItemSize += lastSuballocItem->size;
5782  }
5783  else
5784  {
5785  return false;
5786  }
5787  }
5788  remainingSize = (lastSuballocItem->size < remainingSize) ?
5789  remainingSize - lastSuballocItem->size : 0;
5790  }
5791  }
5792 
5793  // Check next suballocations for BufferImageGranularity conflicts.
5794  // If conflict exists, we must mark more allocations lost or fail.
5795  if(bufferImageGranularity > 1)
5796  {
5797  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
5798  ++nextSuballocItem;
5799  while(nextSuballocItem != m_Suballocations.cend())
5800  {
5801  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5802  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5803  {
5804  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5805  {
5806  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
5807  if(nextSuballoc.hAllocation->CanBecomeLost() &&
5808  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5809  {
5810  ++*itemsToMakeLostCount;
5811  }
5812  else
5813  {
5814  return false;
5815  }
5816  }
5817  }
5818  else
5819  {
5820  // Already on next page.
5821  break;
5822  }
5823  ++nextSuballocItem;
5824  }
5825  }
5826  }
5827  else
5828  {
5829  const VmaSuballocation& suballoc = *suballocItem;
5830  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5831 
5832  *pSumFreeSize = suballoc.size;
5833 
5834  // Size of this suballocation is too small for this request: Early return.
5835  if(suballoc.size < allocSize)
5836  {
5837  return false;
5838  }
5839 
5840  // Start from offset equal to beginning of this suballocation.
5841  *pOffset = suballoc.offset;
5842 
5843  // Apply VMA_DEBUG_MARGIN at the beginning.
5844  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5845  {
5846  *pOffset += VMA_DEBUG_MARGIN;
5847  }
5848 
5849  // Apply alignment.
5850  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5851  *pOffset = VmaAlignUp(*pOffset, alignment);
5852 
5853  // Check previous suballocations for BufferImageGranularity conflicts.
5854  // Make bigger alignment if necessary.
5855  if(bufferImageGranularity > 1)
5856  {
5857  bool bufferImageGranularityConflict = false;
5858  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5859  while(prevSuballocItem != m_Suballocations.cbegin())
5860  {
5861  --prevSuballocItem;
5862  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5863  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5864  {
5865  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5866  {
5867  bufferImageGranularityConflict = true;
5868  break;
5869  }
5870  }
5871  else
5872  // Already on previous page.
5873  break;
5874  }
5875  if(bufferImageGranularityConflict)
5876  {
5877  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5878  }
5879  }
5880 
5881  // Calculate padding at the beginning based on current offset.
5882  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
5883 
5884  // Calculate required margin at the end if this is not last suballocation.
5885  VmaSuballocationList::const_iterator next = suballocItem;
5886  ++next;
5887  const VkDeviceSize requiredEndMargin =
5888  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5889 
5890  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
5891  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
5892  {
5893  return false;
5894  }
5895 
5896  // Check next suballocations for BufferImageGranularity conflicts.
5897  // If conflict exists, allocation cannot be made here.
5898  if(bufferImageGranularity > 1)
5899  {
5900  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
5901  ++nextSuballocItem;
5902  while(nextSuballocItem != m_Suballocations.cend())
5903  {
5904  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5905  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5906  {
5907  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5908  {
5909  return false;
5910  }
5911  }
5912  else
5913  {
5914  // Already on next page.
5915  break;
5916  }
5917  ++nextSuballocItem;
5918  }
5919  }
5920  }
5921 
5922  // All tests passed: Success. pOffset is already filled.
5923  return true;
5924 }
5925 
5926 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
5927 {
5928  VMA_ASSERT(item != m_Suballocations.end());
5929  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5930 
5931  VmaSuballocationList::iterator nextItem = item;
5932  ++nextItem;
5933  VMA_ASSERT(nextItem != m_Suballocations.end());
5934  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
5935 
5936  item->size += nextItem->size;
5937  --m_FreeCount;
5938  m_Suballocations.erase(nextItem);
5939 }
5940 
5941 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
5942 {
5943  // Change this suballocation to be marked as free.
5944  VmaSuballocation& suballoc = *suballocItem;
5945  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5946  suballoc.hAllocation = VK_NULL_HANDLE;
5947 
5948  // Update totals.
5949  ++m_FreeCount;
5950  m_SumFreeSize += suballoc.size;
5951 
5952  // Merge with previous and/or next suballocation if it's also free.
5953  bool mergeWithNext = false;
5954  bool mergeWithPrev = false;
5955 
5956  VmaSuballocationList::iterator nextItem = suballocItem;
5957  ++nextItem;
5958  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
5959  {
5960  mergeWithNext = true;
5961  }
5962 
5963  VmaSuballocationList::iterator prevItem = suballocItem;
5964  if(suballocItem != m_Suballocations.begin())
5965  {
5966  --prevItem;
5967  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5968  {
5969  mergeWithPrev = true;
5970  }
5971  }
5972 
5973  if(mergeWithNext)
5974  {
5975  UnregisterFreeSuballocation(nextItem);
5976  MergeFreeWithNext(suballocItem);
5977  }
5978 
5979  if(mergeWithPrev)
5980  {
5981  UnregisterFreeSuballocation(prevItem);
5982  MergeFreeWithNext(prevItem);
5983  RegisterFreeSuballocation(prevItem);
5984  return prevItem;
5985  }
5986  else
5987  {
5988  RegisterFreeSuballocation(suballocItem);
5989  return suballocItem;
5990  }
5991 }
5992 
5993 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
5994 {
5995  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5996  VMA_ASSERT(item->size > 0);
5997 
5998  // You may want to enable this validation at the beginning or at the end of
5999  // this function, depending on what do you want to check.
6000  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6001 
6002  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6003  {
6004  if(m_FreeSuballocationsBySize.empty())
6005  {
6006  m_FreeSuballocationsBySize.push_back(item);
6007  }
6008  else
6009  {
6010  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
6011  }
6012  }
6013 
6014  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6015 }
6016 
6017 
6018 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
6019 {
6020  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6021  VMA_ASSERT(item->size > 0);
6022 
6023  // You may want to enable this validation at the beginning or at the end of
6024  // this function, depending on what do you want to check.
6025  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6026 
6027  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6028  {
6029  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6030  m_FreeSuballocationsBySize.data(),
6031  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
6032  item,
6033  VmaSuballocationItemSizeLess());
6034  for(size_t index = it - m_FreeSuballocationsBySize.data();
6035  index < m_FreeSuballocationsBySize.size();
6036  ++index)
6037  {
6038  if(m_FreeSuballocationsBySize[index] == item)
6039  {
6040  VmaVectorRemove(m_FreeSuballocationsBySize, index);
6041  return;
6042  }
6043  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
6044  }
6045  VMA_ASSERT(0 && "Not found.");
6046  }
6047 
6048  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6049 }
6050 
6052 // class VmaDeviceMemoryMapping
6053 
6054 VmaDeviceMemoryMapping::VmaDeviceMemoryMapping() :
6055  m_MapCount(0),
6056  m_pMappedData(VMA_NULL)
6057 {
6058 }
6059 
6060 VmaDeviceMemoryMapping::~VmaDeviceMemoryMapping()
6061 {
6062  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
6063 }
6064 
6065 VkResult VmaDeviceMemoryMapping::Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData)
6066 {
6067  if(count == 0)
6068  {
6069  return VK_SUCCESS;
6070  }
6071 
6072  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6073  if(m_MapCount != 0)
6074  {
6075  m_MapCount += count;
6076  VMA_ASSERT(m_pMappedData != VMA_NULL);
6077  if(ppData != VMA_NULL)
6078  {
6079  *ppData = m_pMappedData;
6080  }
6081  return VK_SUCCESS;
6082  }
6083  else
6084  {
6085  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6086  hAllocator->m_hDevice,
6087  hMemory,
6088  0, // offset
6089  VK_WHOLE_SIZE,
6090  0, // flags
6091  &m_pMappedData);
6092  if(result == VK_SUCCESS)
6093  {
6094  if(ppData != VMA_NULL)
6095  {
6096  *ppData = m_pMappedData;
6097  }
6098  m_MapCount = count;
6099  }
6100  return result;
6101  }
6102 }
6103 
6104 void VmaDeviceMemoryMapping::Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count)
6105 {
6106  if(count == 0)
6107  {
6108  return;
6109  }
6110 
6111  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6112  if(m_MapCount >= count)
6113  {
6114  m_MapCount -= count;
6115  if(m_MapCount == 0)
6116  {
6117  m_pMappedData = VMA_NULL;
6118  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, hMemory);
6119  }
6120  }
6121  else
6122  {
6123  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
6124  }
6125 }
6126 
6128 // class VmaDeviceMemoryBlock
6129 
6130 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
6131  m_MemoryTypeIndex(UINT32_MAX),
6132  m_hMemory(VK_NULL_HANDLE),
6133  m_Metadata(hAllocator)
6134 {
6135 }
6136 
6137 void VmaDeviceMemoryBlock::Init(
6138  uint32_t newMemoryTypeIndex,
6139  VkDeviceMemory newMemory,
6140  VkDeviceSize newSize)
6141 {
6142  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6143 
6144  m_MemoryTypeIndex = newMemoryTypeIndex;
6145  m_hMemory = newMemory;
6146 
6147  m_Metadata.Init(newSize);
6148 }
6149 
6150 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
6151 {
6152  // This is the most important assert in the entire library.
6153  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
6154  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
6155 
6156  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
6157  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
6158  m_hMemory = VK_NULL_HANDLE;
6159 }
6160 
6161 bool VmaDeviceMemoryBlock::Validate() const
6162 {
6163  if((m_hMemory == VK_NULL_HANDLE) ||
6164  (m_Metadata.GetSize() == 0))
6165  {
6166  return false;
6167  }
6168 
6169  return m_Metadata.Validate();
6170 }
6171 
6172 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
6173 {
6174  return m_Mapping.Map(hAllocator, m_hMemory, count, ppData);
6175 }
6176 
6177 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
6178 {
6179  m_Mapping.Unmap(hAllocator, m_hMemory, count);
6180 }
6181 
6182 static void InitStatInfo(VmaStatInfo& outInfo)
6183 {
6184  memset(&outInfo, 0, sizeof(outInfo));
6185  outInfo.allocationSizeMin = UINT64_MAX;
6186  outInfo.unusedRangeSizeMin = UINT64_MAX;
6187 }
6188 
6189 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
6190 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
6191 {
6192  inoutInfo.blockCount += srcInfo.blockCount;
6193  inoutInfo.allocationCount += srcInfo.allocationCount;
6194  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
6195  inoutInfo.usedBytes += srcInfo.usedBytes;
6196  inoutInfo.unusedBytes += srcInfo.unusedBytes;
6197  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
6198  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
6199  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
6200  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
6201 }
6202 
6203 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
6204 {
6205  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
6206  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
6207  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
6208  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
6209 }
6210 
6211 VmaPool_T::VmaPool_T(
6212  VmaAllocator hAllocator,
6213  const VmaPoolCreateInfo& createInfo) :
6214  m_BlockVector(
6215  hAllocator,
6216  createInfo.memoryTypeIndex,
6217  createInfo.blockSize,
6218  createInfo.minBlockCount,
6219  createInfo.maxBlockCount,
6220  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
6221  createInfo.frameInUseCount,
6222  true) // isCustomPool
6223 {
6224 }
6225 
6226 VmaPool_T::~VmaPool_T()
6227 {
6228 }
6229 
6230 #if VMA_STATS_STRING_ENABLED
6231 
6232 #endif // #if VMA_STATS_STRING_ENABLED
6233 
6234 VmaBlockVector::VmaBlockVector(
6235  VmaAllocator hAllocator,
6236  uint32_t memoryTypeIndex,
6237  VkDeviceSize preferredBlockSize,
6238  size_t minBlockCount,
6239  size_t maxBlockCount,
6240  VkDeviceSize bufferImageGranularity,
6241  uint32_t frameInUseCount,
6242  bool isCustomPool) :
6243  m_hAllocator(hAllocator),
6244  m_MemoryTypeIndex(memoryTypeIndex),
6245  m_PreferredBlockSize(preferredBlockSize),
6246  m_MinBlockCount(minBlockCount),
6247  m_MaxBlockCount(maxBlockCount),
6248  m_BufferImageGranularity(bufferImageGranularity),
6249  m_FrameInUseCount(frameInUseCount),
6250  m_IsCustomPool(isCustomPool),
6251  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
6252  m_HasEmptyBlock(false),
6253  m_pDefragmentator(VMA_NULL)
6254 {
6255 }
6256 
6257 VmaBlockVector::~VmaBlockVector()
6258 {
6259  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
6260 
6261  for(size_t i = m_Blocks.size(); i--; )
6262  {
6263  m_Blocks[i]->Destroy(m_hAllocator);
6264  vma_delete(m_hAllocator, m_Blocks[i]);
6265  }
6266 }
6267 
6268 VkResult VmaBlockVector::CreateMinBlocks()
6269 {
6270  for(size_t i = 0; i < m_MinBlockCount; ++i)
6271  {
6272  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
6273  if(res != VK_SUCCESS)
6274  {
6275  return res;
6276  }
6277  }
6278  return VK_SUCCESS;
6279 }
6280 
6281 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
6282 {
6283  pStats->size = 0;
6284  pStats->unusedSize = 0;
6285  pStats->allocationCount = 0;
6286  pStats->unusedRangeCount = 0;
6287  pStats->unusedRangeSizeMax = 0;
6288 
6289  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6290 
6291  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6292  {
6293  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6294  VMA_ASSERT(pBlock);
6295  VMA_HEAVY_ASSERT(pBlock->Validate());
6296  pBlock->m_Metadata.AddPoolStats(*pStats);
6297  }
6298 }
6299 
6300 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
6301 
6302 VkResult VmaBlockVector::Allocate(
6303  VmaPool hCurrentPool,
6304  uint32_t currentFrameIndex,
6305  const VkMemoryRequirements& vkMemReq,
6306  const VmaAllocationCreateInfo& createInfo,
6307  VmaSuballocationType suballocType,
6308  VmaAllocation* pAllocation)
6309 {
6310  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
6311  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
6312 
6313  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6314 
6315  // 1. Search existing allocations. Try to allocate without making other allocations lost.
6316  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6317  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6318  {
6319  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6320  VMA_ASSERT(pCurrBlock);
6321  VmaAllocationRequest currRequest = {};
6322  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6323  currentFrameIndex,
6324  m_FrameInUseCount,
6325  m_BufferImageGranularity,
6326  vkMemReq.size,
6327  vkMemReq.alignment,
6328  suballocType,
6329  false, // canMakeOtherLost
6330  &currRequest))
6331  {
6332  // Allocate from pCurrBlock.
6333  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
6334 
6335  if(mapped)
6336  {
6337  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
6338  if(res != VK_SUCCESS)
6339  {
6340  return res;
6341  }
6342  }
6343 
6344  // We no longer have an empty Allocation.
6345  if(pCurrBlock->m_Metadata.IsEmpty())
6346  {
6347  m_HasEmptyBlock = false;
6348  }
6349 
6350  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6351  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
6352  (*pAllocation)->InitBlockAllocation(
6353  hCurrentPool,
6354  pCurrBlock,
6355  currRequest.offset,
6356  vkMemReq.alignment,
6357  vkMemReq.size,
6358  suballocType,
6359  mapped,
6360  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6361  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
6362  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6363  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6364  return VK_SUCCESS;
6365  }
6366  }
6367 
6368  const bool canCreateNewBlock =
6369  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
6370  (m_Blocks.size() < m_MaxBlockCount);
6371 
6372  // 2. Try to create new block.
6373  if(canCreateNewBlock)
6374  {
6375  // Calculate optimal size for new block.
6376  VkDeviceSize newBlockSize = m_PreferredBlockSize;
6377  uint32_t newBlockSizeShift = 0;
6378  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
6379 
6380  // Allocating blocks of other sizes is allowed only in default pools.
6381  // In custom pools block size is fixed.
6382  if(m_IsCustomPool == false)
6383  {
6384  // Allocate 1/8, 1/4, 1/2 as first blocks.
6385  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
6386  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
6387  {
6388  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6389  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= vkMemReq.size * 2)
6390  {
6391  newBlockSize = smallerNewBlockSize;
6392  ++newBlockSizeShift;
6393  }
6394  else
6395  {
6396  break;
6397  }
6398  }
6399  }
6400 
6401  size_t newBlockIndex = 0;
6402  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
6403  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
6404  if(m_IsCustomPool == false)
6405  {
6406  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
6407  {
6408  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6409  if(smallerNewBlockSize >= vkMemReq.size)
6410  {
6411  newBlockSize = smallerNewBlockSize;
6412  ++newBlockSizeShift;
6413  res = CreateBlock(newBlockSize, &newBlockIndex);
6414  }
6415  else
6416  {
6417  break;
6418  }
6419  }
6420  }
6421 
6422  if(res == VK_SUCCESS)
6423  {
6424  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
6425  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
6426 
6427  if(mapped)
6428  {
6429  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
6430  if(res != VK_SUCCESS)
6431  {
6432  return res;
6433  }
6434  }
6435 
6436  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
6437  VmaAllocationRequest allocRequest;
6438  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
6439  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6440  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
6441  (*pAllocation)->InitBlockAllocation(
6442  hCurrentPool,
6443  pBlock,
6444  allocRequest.offset,
6445  vkMemReq.alignment,
6446  vkMemReq.size,
6447  suballocType,
6448  mapped,
6449  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6450  VMA_HEAVY_ASSERT(pBlock->Validate());
6451  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
6452  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6453  return VK_SUCCESS;
6454  }
6455  }
6456 
6457  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
6458 
6459  // 3. Try to allocate from existing blocks with making other allocations lost.
6460  if(canMakeOtherLost)
6461  {
6462  uint32_t tryIndex = 0;
6463  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
6464  {
6465  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
6466  VmaAllocationRequest bestRequest = {};
6467  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
6468 
6469  // 1. Search existing allocations.
6470  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6471  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6472  {
6473  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6474  VMA_ASSERT(pCurrBlock);
6475  VmaAllocationRequest currRequest = {};
6476  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6477  currentFrameIndex,
6478  m_FrameInUseCount,
6479  m_BufferImageGranularity,
6480  vkMemReq.size,
6481  vkMemReq.alignment,
6482  suballocType,
6483  canMakeOtherLost,
6484  &currRequest))
6485  {
6486  const VkDeviceSize currRequestCost = currRequest.CalcCost();
6487  if(pBestRequestBlock == VMA_NULL ||
6488  currRequestCost < bestRequestCost)
6489  {
6490  pBestRequestBlock = pCurrBlock;
6491  bestRequest = currRequest;
6492  bestRequestCost = currRequestCost;
6493 
6494  if(bestRequestCost == 0)
6495  {
6496  break;
6497  }
6498  }
6499  }
6500  }
6501 
6502  if(pBestRequestBlock != VMA_NULL)
6503  {
6504  if(mapped)
6505  {
6506  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
6507  if(res != VK_SUCCESS)
6508  {
6509  return res;
6510  }
6511  }
6512 
6513  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
6514  currentFrameIndex,
6515  m_FrameInUseCount,
6516  &bestRequest))
6517  {
6518  // We no longer have an empty Allocation.
6519  if(pBestRequestBlock->m_Metadata.IsEmpty())
6520  {
6521  m_HasEmptyBlock = false;
6522  }
6523  // Allocate from this pBlock.
6524  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6525  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
6526  (*pAllocation)->InitBlockAllocation(
6527  hCurrentPool,
6528  pBestRequestBlock,
6529  bestRequest.offset,
6530  vkMemReq.alignment,
6531  vkMemReq.size,
6532  suballocType,
6533  mapped,
6534  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6535  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
6536  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6537  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6538  return VK_SUCCESS;
6539  }
6540  // else: Some allocations must have been touched while we are here. Next try.
6541  }
6542  else
6543  {
6544  // Could not find place in any of the blocks - break outer loop.
6545  break;
6546  }
6547  }
6548  /* Maximum number of tries exceeded - a very unlike event when many other
6549  threads are simultaneously touching allocations making it impossible to make
6550  lost at the same time as we try to allocate. */
6551  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
6552  {
6553  return VK_ERROR_TOO_MANY_OBJECTS;
6554  }
6555  }
6556 
6557  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6558 }
6559 
6560 void VmaBlockVector::Free(
6561  VmaAllocation hAllocation)
6562 {
6563  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
6564 
6565  // Scope for lock.
6566  {
6567  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6568 
6569  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
6570 
6571  if(hAllocation->IsPersistentMap())
6572  {
6573  pBlock->m_Mapping.Unmap(m_hAllocator, pBlock->m_hMemory, 1);
6574  }
6575 
6576  pBlock->m_Metadata.Free(hAllocation);
6577  VMA_HEAVY_ASSERT(pBlock->Validate());
6578 
6579  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
6580 
6581  // pBlock became empty after this deallocation.
6582  if(pBlock->m_Metadata.IsEmpty())
6583  {
6584  // Already has empty Allocation. We don't want to have two, so delete this one.
6585  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
6586  {
6587  pBlockToDelete = pBlock;
6588  Remove(pBlock);
6589  }
6590  // We now have first empty Allocation.
6591  else
6592  {
6593  m_HasEmptyBlock = true;
6594  }
6595  }
6596  // pBlock didn't become empty, but we have another empty block - find and free that one.
6597  // (This is optional, heuristics.)
6598  else if(m_HasEmptyBlock)
6599  {
6600  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6601  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6602  {
6603  pBlockToDelete = pLastBlock;
6604  m_Blocks.pop_back();
6605  m_HasEmptyBlock = false;
6606  }
6607  }
6608 
6609  IncrementallySortBlocks();
6610  }
6611 
6612  // Destruction of a free Allocation. Deferred until this point, outside of mutex
6613  // lock, for performance reason.
6614  if(pBlockToDelete != VMA_NULL)
6615  {
6616  VMA_DEBUG_LOG(" Deleted empty allocation");
6617  pBlockToDelete->Destroy(m_hAllocator);
6618  vma_delete(m_hAllocator, pBlockToDelete);
6619  }
6620 }
6621 
6622 size_t VmaBlockVector::CalcMaxBlockSize() const
6623 {
6624  size_t result = 0;
6625  for(size_t i = m_Blocks.size(); i--; )
6626  {
6627  result = VMA_MAX((uint64_t)result, (uint64_t)m_Blocks[i]->m_Metadata.GetSize());
6628  if(result >= m_PreferredBlockSize)
6629  {
6630  break;
6631  }
6632  }
6633  return result;
6634 }
6635 
6636 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
6637 {
6638  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6639  {
6640  if(m_Blocks[blockIndex] == pBlock)
6641  {
6642  VmaVectorRemove(m_Blocks, blockIndex);
6643  return;
6644  }
6645  }
6646  VMA_ASSERT(0);
6647 }
6648 
6649 void VmaBlockVector::IncrementallySortBlocks()
6650 {
6651  // Bubble sort only until first swap.
6652  for(size_t i = 1; i < m_Blocks.size(); ++i)
6653  {
6654  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
6655  {
6656  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
6657  return;
6658  }
6659  }
6660 }
6661 
6662 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
6663 {
6664  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6665  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
6666  allocInfo.allocationSize = blockSize;
6667  VkDeviceMemory mem = VK_NULL_HANDLE;
6668  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
6669  if(res < 0)
6670  {
6671  return res;
6672  }
6673 
6674  // New VkDeviceMemory successfully created.
6675 
6676  // Create new Allocation for it.
6677  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
6678  pBlock->Init(
6679  m_MemoryTypeIndex,
6680  mem,
6681  allocInfo.allocationSize);
6682 
6683  m_Blocks.push_back(pBlock);
6684  if(pNewBlockIndex != VMA_NULL)
6685  {
6686  *pNewBlockIndex = m_Blocks.size() - 1;
6687  }
6688 
6689  return VK_SUCCESS;
6690 }
6691 
6692 #if VMA_STATS_STRING_ENABLED
6693 
6694 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
6695 {
6696  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6697 
6698  json.BeginObject();
6699 
6700  if(m_IsCustomPool)
6701  {
6702  json.WriteString("MemoryTypeIndex");
6703  json.WriteNumber(m_MemoryTypeIndex);
6704 
6705  json.WriteString("BlockSize");
6706  json.WriteNumber(m_PreferredBlockSize);
6707 
6708  json.WriteString("BlockCount");
6709  json.BeginObject(true);
6710  if(m_MinBlockCount > 0)
6711  {
6712  json.WriteString("Min");
6713  json.WriteNumber((uint64_t)m_MinBlockCount);
6714  }
6715  if(m_MaxBlockCount < SIZE_MAX)
6716  {
6717  json.WriteString("Max");
6718  json.WriteNumber((uint64_t)m_MaxBlockCount);
6719  }
6720  json.WriteString("Cur");
6721  json.WriteNumber((uint64_t)m_Blocks.size());
6722  json.EndObject();
6723 
6724  if(m_FrameInUseCount > 0)
6725  {
6726  json.WriteString("FrameInUseCount");
6727  json.WriteNumber(m_FrameInUseCount);
6728  }
6729  }
6730  else
6731  {
6732  json.WriteString("PreferredBlockSize");
6733  json.WriteNumber(m_PreferredBlockSize);
6734  }
6735 
6736  json.WriteString("Blocks");
6737  json.BeginArray();
6738  for(size_t i = 0; i < m_Blocks.size(); ++i)
6739  {
6740  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
6741  }
6742  json.EndArray();
6743 
6744  json.EndObject();
6745 }
6746 
6747 #endif // #if VMA_STATS_STRING_ENABLED
6748 
6749 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
6750  VmaAllocator hAllocator,
6751  uint32_t currentFrameIndex)
6752 {
6753  if(m_pDefragmentator == VMA_NULL)
6754  {
6755  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
6756  hAllocator,
6757  this,
6758  currentFrameIndex);
6759  }
6760 
6761  return m_pDefragmentator;
6762 }
6763 
6764 VkResult VmaBlockVector::Defragment(
6765  VmaDefragmentationStats* pDefragmentationStats,
6766  VkDeviceSize& maxBytesToMove,
6767  uint32_t& maxAllocationsToMove)
6768 {
6769  if(m_pDefragmentator == VMA_NULL)
6770  {
6771  return VK_SUCCESS;
6772  }
6773 
6774  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6775 
6776  // Defragment.
6777  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
6778 
6779  // Accumulate statistics.
6780  if(pDefragmentationStats != VMA_NULL)
6781  {
6782  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
6783  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
6784  pDefragmentationStats->bytesMoved += bytesMoved;
6785  pDefragmentationStats->allocationsMoved += allocationsMoved;
6786  VMA_ASSERT(bytesMoved <= maxBytesToMove);
6787  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
6788  maxBytesToMove -= bytesMoved;
6789  maxAllocationsToMove -= allocationsMoved;
6790  }
6791 
6792  // Free empty blocks.
6793  m_HasEmptyBlock = false;
6794  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
6795  {
6796  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
6797  if(pBlock->m_Metadata.IsEmpty())
6798  {
6799  if(m_Blocks.size() > m_MinBlockCount)
6800  {
6801  if(pDefragmentationStats != VMA_NULL)
6802  {
6803  ++pDefragmentationStats->deviceMemoryBlocksFreed;
6804  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
6805  }
6806 
6807  VmaVectorRemove(m_Blocks, blockIndex);
6808  pBlock->Destroy(m_hAllocator);
6809  vma_delete(m_hAllocator, pBlock);
6810  }
6811  else
6812  {
6813  m_HasEmptyBlock = true;
6814  }
6815  }
6816  }
6817 
6818  return result;
6819 }
6820 
6821 void VmaBlockVector::DestroyDefragmentator()
6822 {
6823  if(m_pDefragmentator != VMA_NULL)
6824  {
6825  vma_delete(m_hAllocator, m_pDefragmentator);
6826  m_pDefragmentator = VMA_NULL;
6827  }
6828 }
6829 
6830 void VmaBlockVector::MakePoolAllocationsLost(
6831  uint32_t currentFrameIndex,
6832  size_t* pLostAllocationCount)
6833 {
6834  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6835  size_t lostAllocationCount = 0;
6836  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6837  {
6838  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6839  VMA_ASSERT(pBlock);
6840  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
6841  }
6842  if(pLostAllocationCount != VMA_NULL)
6843  {
6844  *pLostAllocationCount = lostAllocationCount;
6845  }
6846 }
6847 
6848 void VmaBlockVector::AddStats(VmaStats* pStats)
6849 {
6850  const uint32_t memTypeIndex = m_MemoryTypeIndex;
6851  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
6852 
6853  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6854 
6855  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6856  {
6857  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6858  VMA_ASSERT(pBlock);
6859  VMA_HEAVY_ASSERT(pBlock->Validate());
6860  VmaStatInfo allocationStatInfo;
6861  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
6862  VmaAddStatInfo(pStats->total, allocationStatInfo);
6863  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6864  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6865  }
6866 }
6867 
6869 // VmaDefragmentator members definition
6870 
6871 VmaDefragmentator::VmaDefragmentator(
6872  VmaAllocator hAllocator,
6873  VmaBlockVector* pBlockVector,
6874  uint32_t currentFrameIndex) :
6875  m_hAllocator(hAllocator),
6876  m_pBlockVector(pBlockVector),
6877  m_CurrentFrameIndex(currentFrameIndex),
6878  m_BytesMoved(0),
6879  m_AllocationsMoved(0),
6880  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
6881  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
6882 {
6883 }
6884 
6885 VmaDefragmentator::~VmaDefragmentator()
6886 {
6887  for(size_t i = m_Blocks.size(); i--; )
6888  {
6889  vma_delete(m_hAllocator, m_Blocks[i]);
6890  }
6891 }
6892 
6893 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
6894 {
6895  AllocationInfo allocInfo;
6896  allocInfo.m_hAllocation = hAlloc;
6897  allocInfo.m_pChanged = pChanged;
6898  m_Allocations.push_back(allocInfo);
6899 }
6900 
6901 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
6902 {
6903  // It has already been mapped for defragmentation.
6904  if(m_pMappedDataForDefragmentation)
6905  {
6906  *ppMappedData = m_pMappedDataForDefragmentation;
6907  return VK_SUCCESS;
6908  }
6909 
6910  // It is originally mapped.
6911  if(m_pBlock->m_Mapping.GetMappedData())
6912  {
6913  *ppMappedData = m_pBlock->m_Mapping.GetMappedData();
6914  return VK_SUCCESS;
6915  }
6916 
6917  // Map on first usage.
6918  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
6919  *ppMappedData = m_pMappedDataForDefragmentation;
6920  return res;
6921 }
6922 
6923 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
6924 {
6925  if(m_pMappedDataForDefragmentation != VMA_NULL)
6926  {
6927  m_pBlock->Unmap(hAllocator, 1);
6928  }
6929 }
6930 
6931 VkResult VmaDefragmentator::DefragmentRound(
6932  VkDeviceSize maxBytesToMove,
6933  uint32_t maxAllocationsToMove)
6934 {
6935  if(m_Blocks.empty())
6936  {
6937  return VK_SUCCESS;
6938  }
6939 
6940  size_t srcBlockIndex = m_Blocks.size() - 1;
6941  size_t srcAllocIndex = SIZE_MAX;
6942  for(;;)
6943  {
6944  // 1. Find next allocation to move.
6945  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
6946  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
6947  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
6948  {
6949  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
6950  {
6951  // Finished: no more allocations to process.
6952  if(srcBlockIndex == 0)
6953  {
6954  return VK_SUCCESS;
6955  }
6956  else
6957  {
6958  --srcBlockIndex;
6959  srcAllocIndex = SIZE_MAX;
6960  }
6961  }
6962  else
6963  {
6964  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
6965  }
6966  }
6967 
6968  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
6969  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
6970 
6971  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
6972  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
6973  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
6974  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
6975 
6976  // 2. Try to find new place for this allocation in preceding or current block.
6977  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
6978  {
6979  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
6980  VmaAllocationRequest dstAllocRequest;
6981  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
6982  m_CurrentFrameIndex,
6983  m_pBlockVector->GetFrameInUseCount(),
6984  m_pBlockVector->GetBufferImageGranularity(),
6985  size,
6986  alignment,
6987  suballocType,
6988  false, // canMakeOtherLost
6989  &dstAllocRequest) &&
6990  MoveMakesSense(
6991  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
6992  {
6993  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
6994 
6995  // Reached limit on number of allocations or bytes to move.
6996  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
6997  (m_BytesMoved + size > maxBytesToMove))
6998  {
6999  return VK_INCOMPLETE;
7000  }
7001 
7002  void* pDstMappedData = VMA_NULL;
7003  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
7004  if(res != VK_SUCCESS)
7005  {
7006  return res;
7007  }
7008 
7009  void* pSrcMappedData = VMA_NULL;
7010  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
7011  if(res != VK_SUCCESS)
7012  {
7013  return res;
7014  }
7015 
7016  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
7017  memcpy(
7018  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
7019  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
7020  static_cast<size_t>(size));
7021 
7022  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
7023  pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);
7024 
7025  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
7026 
7027  if(allocInfo.m_pChanged != VMA_NULL)
7028  {
7029  *allocInfo.m_pChanged = VK_TRUE;
7030  }
7031 
7032  ++m_AllocationsMoved;
7033  m_BytesMoved += size;
7034 
7035  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
7036 
7037  break;
7038  }
7039  }
7040 
7041  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
7042 
7043  if(srcAllocIndex > 0)
7044  {
7045  --srcAllocIndex;
7046  }
7047  else
7048  {
7049  if(srcBlockIndex > 0)
7050  {
7051  --srcBlockIndex;
7052  srcAllocIndex = SIZE_MAX;
7053  }
7054  else
7055  {
7056  return VK_SUCCESS;
7057  }
7058  }
7059  }
7060 }
7061 
7062 VkResult VmaDefragmentator::Defragment(
7063  VkDeviceSize maxBytesToMove,
7064  uint32_t maxAllocationsToMove)
7065 {
7066  if(m_Allocations.empty())
7067  {
7068  return VK_SUCCESS;
7069  }
7070 
7071  // Create block info for each block.
7072  const size_t blockCount = m_pBlockVector->m_Blocks.size();
7073  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7074  {
7075  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
7076  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
7077  m_Blocks.push_back(pBlockInfo);
7078  }
7079 
7080  // Sort them by m_pBlock pointer value.
7081  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
7082 
7083  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
7084  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
7085  {
7086  AllocationInfo& allocInfo = m_Allocations[blockIndex];
7087  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
7088  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7089  {
7090  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
7091  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
7092  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
7093  {
7094  (*it)->m_Allocations.push_back(allocInfo);
7095  }
7096  else
7097  {
7098  VMA_ASSERT(0);
7099  }
7100  }
7101  }
7102  m_Allocations.clear();
7103 
7104  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7105  {
7106  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
7107  pBlockInfo->CalcHasNonMovableAllocations();
7108  pBlockInfo->SortAllocationsBySizeDescecnding();
7109  }
7110 
7111  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
7112  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
7113 
7114  // Execute defragmentation rounds (the main part).
7115  VkResult result = VK_SUCCESS;
7116  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
7117  {
7118  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
7119  }
7120 
7121  // Unmap blocks that were mapped for defragmentation.
7122  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7123  {
7124  m_Blocks[blockIndex]->Unmap(m_hAllocator);
7125  }
7126 
7127  return result;
7128 }
7129 
7130 bool VmaDefragmentator::MoveMakesSense(
7131  size_t dstBlockIndex, VkDeviceSize dstOffset,
7132  size_t srcBlockIndex, VkDeviceSize srcOffset)
7133 {
7134  if(dstBlockIndex < srcBlockIndex)
7135  {
7136  return true;
7137  }
7138  if(dstBlockIndex > srcBlockIndex)
7139  {
7140  return false;
7141  }
7142  if(dstOffset < srcOffset)
7143  {
7144  return true;
7145  }
7146  return false;
7147 }
7148 
7150 // VmaAllocator_T
7151 
7152 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
7153  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
7154  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
7155  m_hDevice(pCreateInfo->device),
7156  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
7157  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
7158  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
7159  m_PreferredLargeHeapBlockSize(0),
7160  m_PhysicalDevice(pCreateInfo->physicalDevice),
7161  m_CurrentFrameIndex(0),
7162  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
7163 {
7164  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
7165 
7166  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
7167  memset(&m_MemProps, 0, sizeof(m_MemProps));
7168  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
7169 
7170  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
7171  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
7172 
7173  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7174  {
7175  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
7176  }
7177 
7178  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
7179  {
7180  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
7181  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
7182  }
7183 
7184  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
7185 
7186  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
7187  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
7188 
7189  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
7190  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
7191 
7192  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
7193  {
7194  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
7195  {
7196  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
7197  if(limit != VK_WHOLE_SIZE)
7198  {
7199  m_HeapSizeLimit[heapIndex] = limit;
7200  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
7201  {
7202  m_MemProps.memoryHeaps[heapIndex].size = limit;
7203  }
7204  }
7205  }
7206  }
7207 
7208  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7209  {
7210  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
7211 
7212  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
7213  this,
7214  memTypeIndex,
7215  preferredBlockSize,
7216  0,
7217  SIZE_MAX,
7218  GetBufferImageGranularity(),
7219  pCreateInfo->frameInUseCount,
7220  false); // isCustomPool
7221  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
7222  // becase minBlockCount is 0.
7223  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
7224  }
7225 }
7226 
7227 VmaAllocator_T::~VmaAllocator_T()
7228 {
7229  VMA_ASSERT(m_Pools.empty());
7230 
7231  for(size_t i = GetMemoryTypeCount(); i--; )
7232  {
7233  vma_delete(this, m_pDedicatedAllocations[i]);
7234  vma_delete(this, m_pBlockVectors[i]);
7235  }
7236 }
7237 
7238 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
7239 {
7240 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7241  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
7242  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
7243  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
7244  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
7245  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
7246  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
7247  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
7248  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
7249  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
7250  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
7251  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
7252  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
7253  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
7254  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
7255  if(m_UseKhrDedicatedAllocation)
7256  {
7257  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
7258  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
7259  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
7260  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
7261  }
7262 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7263 
7264 #define VMA_COPY_IF_NOT_NULL(funcName) \
7265  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
7266 
7267  if(pVulkanFunctions != VMA_NULL)
7268  {
7269  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
7270  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
7271  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
7272  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
7273  VMA_COPY_IF_NOT_NULL(vkMapMemory);
7274  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
7275  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
7276  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
7277  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
7278  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
7279  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
7280  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
7281  VMA_COPY_IF_NOT_NULL(vkCreateImage);
7282  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
7283  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
7284  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
7285  }
7286 
7287 #undef VMA_COPY_IF_NOT_NULL
7288 
7289  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
7290  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
7291  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
7292  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
7293  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
7294  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
7295  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
7296  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
7297  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
7298  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
7299  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
7300  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
7301  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
7302  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
7303  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
7304  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
7305  if(m_UseKhrDedicatedAllocation)
7306  {
7307  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
7308  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
7309  }
7310 }
7311 
7312 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
7313 {
7314  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7315  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
7316  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
7317  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
7318 }
7319 
7320 VkResult VmaAllocator_T::AllocateMemoryOfType(
7321  const VkMemoryRequirements& vkMemReq,
7322  bool dedicatedAllocation,
7323  VkBuffer dedicatedBuffer,
7324  VkImage dedicatedImage,
7325  const VmaAllocationCreateInfo& createInfo,
7326  uint32_t memTypeIndex,
7327  VmaSuballocationType suballocType,
7328  VmaAllocation* pAllocation)
7329 {
7330  VMA_ASSERT(pAllocation != VMA_NULL);
7331  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
7332 
7333  VmaAllocationCreateInfo finalCreateInfo = createInfo;
7334 
7335  // If memory type is not HOST_VISIBLE, disable MAPPED.
7336  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7337  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
7338  {
7339  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
7340  }
7341 
7342  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
7343  VMA_ASSERT(blockVector);
7344 
7345  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
7346  bool preferDedicatedMemory =
7347  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
7348  dedicatedAllocation ||
7349  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
7350  vkMemReq.size > preferredBlockSize / 2;
7351 
7352  if(preferDedicatedMemory &&
7353  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
7354  finalCreateInfo.pool == VK_NULL_HANDLE)
7355  {
7357  }
7358 
7359  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
7360  {
7361  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7362  {
7363  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7364  }
7365  else
7366  {
7367  return AllocateDedicatedMemory(
7368  vkMemReq.size,
7369  suballocType,
7370  memTypeIndex,
7371  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7372  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7373  finalCreateInfo.pUserData,
7374  dedicatedBuffer,
7375  dedicatedImage,
7376  pAllocation);
7377  }
7378  }
7379  else
7380  {
7381  VkResult res = blockVector->Allocate(
7382  VK_NULL_HANDLE, // hCurrentPool
7383  m_CurrentFrameIndex.load(),
7384  vkMemReq,
7385  finalCreateInfo,
7386  suballocType,
7387  pAllocation);
7388  if(res == VK_SUCCESS)
7389  {
7390  return res;
7391  }
7392 
7393  // 5. Try dedicated memory.
7394  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7395  {
7396  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7397  }
7398  else
7399  {
7400  res = AllocateDedicatedMemory(
7401  vkMemReq.size,
7402  suballocType,
7403  memTypeIndex,
7404  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7405  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7406  finalCreateInfo.pUserData,
7407  dedicatedBuffer,
7408  dedicatedImage,
7409  pAllocation);
7410  if(res == VK_SUCCESS)
7411  {
7412  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
7413  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
7414  return VK_SUCCESS;
7415  }
7416  else
7417  {
7418  // Everything failed: Return error code.
7419  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7420  return res;
7421  }
7422  }
7423  }
7424 }
7425 
7426 VkResult VmaAllocator_T::AllocateDedicatedMemory(
7427  VkDeviceSize size,
7428  VmaSuballocationType suballocType,
7429  uint32_t memTypeIndex,
7430  bool map,
7431  bool isUserDataString,
7432  void* pUserData,
7433  VkBuffer dedicatedBuffer,
7434  VkImage dedicatedImage,
7435  VmaAllocation* pAllocation)
7436 {
7437  VMA_ASSERT(pAllocation);
7438 
7439  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
7440  allocInfo.memoryTypeIndex = memTypeIndex;
7441  allocInfo.allocationSize = size;
7442 
7443  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
7444  if(m_UseKhrDedicatedAllocation)
7445  {
7446  if(dedicatedBuffer != VK_NULL_HANDLE)
7447  {
7448  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
7449  dedicatedAllocInfo.buffer = dedicatedBuffer;
7450  allocInfo.pNext = &dedicatedAllocInfo;
7451  }
7452  else if(dedicatedImage != VK_NULL_HANDLE)
7453  {
7454  dedicatedAllocInfo.image = dedicatedImage;
7455  allocInfo.pNext = &dedicatedAllocInfo;
7456  }
7457  }
7458 
7459  // Allocate VkDeviceMemory.
7460  VkDeviceMemory hMemory = VK_NULL_HANDLE;
7461  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
7462  if(res < 0)
7463  {
7464  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7465  return res;
7466  }
7467 
7468  void* pMappedData = VMA_NULL;
7469  if(map)
7470  {
7471  res = (*m_VulkanFunctions.vkMapMemory)(
7472  m_hDevice,
7473  hMemory,
7474  0,
7475  VK_WHOLE_SIZE,
7476  0,
7477  &pMappedData);
7478  if(res < 0)
7479  {
7480  VMA_DEBUG_LOG(" vkMapMemory FAILED");
7481  FreeVulkanMemory(memTypeIndex, size, hMemory);
7482  return res;
7483  }
7484  }
7485 
7486  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
7487  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
7488  (*pAllocation)->SetUserData(this, pUserData);
7489 
7490  // Register it in m_pDedicatedAllocations.
7491  {
7492  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7493  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7494  VMA_ASSERT(pDedicatedAllocations);
7495  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
7496  }
7497 
7498  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
7499 
7500  return VK_SUCCESS;
7501 }
7502 
7503 void VmaAllocator_T::GetBufferMemoryRequirements(
7504  VkBuffer hBuffer,
7505  VkMemoryRequirements& memReq,
7506  bool& requiresDedicatedAllocation,
7507  bool& prefersDedicatedAllocation) const
7508 {
7509  if(m_UseKhrDedicatedAllocation)
7510  {
7511  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
7512  memReqInfo.buffer = hBuffer;
7513 
7514  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7515 
7516  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7517  memReq2.pNext = &memDedicatedReq;
7518 
7519  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7520 
7521  memReq = memReq2.memoryRequirements;
7522  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7523  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7524  }
7525  else
7526  {
7527  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
7528  requiresDedicatedAllocation = false;
7529  prefersDedicatedAllocation = false;
7530  }
7531 }
7532 
7533 void VmaAllocator_T::GetImageMemoryRequirements(
7534  VkImage hImage,
7535  VkMemoryRequirements& memReq,
7536  bool& requiresDedicatedAllocation,
7537  bool& prefersDedicatedAllocation) const
7538 {
7539  if(m_UseKhrDedicatedAllocation)
7540  {
7541  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
7542  memReqInfo.image = hImage;
7543 
7544  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7545 
7546  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7547  memReq2.pNext = &memDedicatedReq;
7548 
7549  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7550 
7551  memReq = memReq2.memoryRequirements;
7552  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7553  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7554  }
7555  else
7556  {
7557  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
7558  requiresDedicatedAllocation = false;
7559  prefersDedicatedAllocation = false;
7560  }
7561 }
7562 
7563 VkResult VmaAllocator_T::AllocateMemory(
7564  const VkMemoryRequirements& vkMemReq,
7565  bool requiresDedicatedAllocation,
7566  bool prefersDedicatedAllocation,
7567  VkBuffer dedicatedBuffer,
7568  VkImage dedicatedImage,
7569  const VmaAllocationCreateInfo& createInfo,
7570  VmaSuballocationType suballocType,
7571  VmaAllocation* pAllocation)
7572 {
7573  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
7574  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7575  {
7576  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
7577  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7578  }
7579  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7581  {
7582  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
7583  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7584  }
7585  if(requiresDedicatedAllocation)
7586  {
7587  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7588  {
7589  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
7590  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7591  }
7592  if(createInfo.pool != VK_NULL_HANDLE)
7593  {
7594  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
7595  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7596  }
7597  }
7598  if((createInfo.pool != VK_NULL_HANDLE) &&
7599  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
7600  {
7601  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
7602  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7603  }
7604 
7605  if(createInfo.pool != VK_NULL_HANDLE)
7606  {
7607  return createInfo.pool->m_BlockVector.Allocate(
7608  createInfo.pool,
7609  m_CurrentFrameIndex.load(),
7610  vkMemReq,
7611  createInfo,
7612  suballocType,
7613  pAllocation);
7614  }
7615  else
7616  {
7617  // Bit mask of memory Vulkan types acceptable for this allocation.
7618  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
7619  uint32_t memTypeIndex = UINT32_MAX;
7620  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7621  if(res == VK_SUCCESS)
7622  {
7623  res = AllocateMemoryOfType(
7624  vkMemReq,
7625  requiresDedicatedAllocation || prefersDedicatedAllocation,
7626  dedicatedBuffer,
7627  dedicatedImage,
7628  createInfo,
7629  memTypeIndex,
7630  suballocType,
7631  pAllocation);
7632  // Succeeded on first try.
7633  if(res == VK_SUCCESS)
7634  {
7635  return res;
7636  }
7637  // Allocation from this memory type failed. Try other compatible memory types.
7638  else
7639  {
7640  for(;;)
7641  {
7642  // Remove old memTypeIndex from list of possibilities.
7643  memoryTypeBits &= ~(1u << memTypeIndex);
7644  // Find alternative memTypeIndex.
7645  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7646  if(res == VK_SUCCESS)
7647  {
7648  res = AllocateMemoryOfType(
7649  vkMemReq,
7650  requiresDedicatedAllocation || prefersDedicatedAllocation,
7651  dedicatedBuffer,
7652  dedicatedImage,
7653  createInfo,
7654  memTypeIndex,
7655  suballocType,
7656  pAllocation);
7657  // Allocation from this alternative memory type succeeded.
7658  if(res == VK_SUCCESS)
7659  {
7660  return res;
7661  }
7662  // else: Allocation from this memory type failed. Try next one - next loop iteration.
7663  }
7664  // No other matching memory type index could be found.
7665  else
7666  {
7667  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
7668  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7669  }
7670  }
7671  }
7672  }
7673  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
7674  else
7675  return res;
7676  }
7677 }
7678 
7679 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
7680 {
7681  VMA_ASSERT(allocation);
7682 
7683  if(allocation->CanBecomeLost() == false ||
7684  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7685  {
7686  switch(allocation->GetType())
7687  {
7688  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7689  {
7690  VmaBlockVector* pBlockVector = VMA_NULL;
7691  VmaPool hPool = allocation->GetPool();
7692  if(hPool != VK_NULL_HANDLE)
7693  {
7694  pBlockVector = &hPool->m_BlockVector;
7695  }
7696  else
7697  {
7698  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7699  pBlockVector = m_pBlockVectors[memTypeIndex];
7700  }
7701  pBlockVector->Free(allocation);
7702  }
7703  break;
7704  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7705  FreeDedicatedMemory(allocation);
7706  break;
7707  default:
7708  VMA_ASSERT(0);
7709  }
7710  }
7711 
7712  allocation->SetUserData(this, VMA_NULL);
7713  vma_delete(this, allocation);
7714 }
7715 
7716 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
7717 {
7718  // Initialize.
7719  InitStatInfo(pStats->total);
7720  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
7721  InitStatInfo(pStats->memoryType[i]);
7722  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7723  InitStatInfo(pStats->memoryHeap[i]);
7724 
7725  // Process default pools.
7726  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7727  {
7728  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
7729  VMA_ASSERT(pBlockVector);
7730  pBlockVector->AddStats(pStats);
7731  }
7732 
7733  // Process custom pools.
7734  {
7735  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7736  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
7737  {
7738  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
7739  }
7740  }
7741 
7742  // Process dedicated allocations.
7743  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7744  {
7745  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7746  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7747  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7748  VMA_ASSERT(pDedicatedAllocVector);
7749  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
7750  {
7751  VmaStatInfo allocationStatInfo;
7752  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
7753  VmaAddStatInfo(pStats->total, allocationStatInfo);
7754  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7755  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7756  }
7757  }
7758 
7759  // Postprocess.
7760  VmaPostprocessCalcStatInfo(pStats->total);
7761  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
7762  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
7763  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
7764  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
7765 }
7766 
7767 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
7768 
7769 VkResult VmaAllocator_T::Defragment(
7770  VmaAllocation* pAllocations,
7771  size_t allocationCount,
7772  VkBool32* pAllocationsChanged,
7773  const VmaDefragmentationInfo* pDefragmentationInfo,
7774  VmaDefragmentationStats* pDefragmentationStats)
7775 {
7776  if(pAllocationsChanged != VMA_NULL)
7777  {
7778  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
7779  }
7780  if(pDefragmentationStats != VMA_NULL)
7781  {
7782  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
7783  }
7784 
7785  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
7786 
7787  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
7788 
7789  const size_t poolCount = m_Pools.size();
7790 
7791  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
7792  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
7793  {
7794  VmaAllocation hAlloc = pAllocations[allocIndex];
7795  VMA_ASSERT(hAlloc);
7796  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
7797  // DedicatedAlloc cannot be defragmented.
7798  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
7799  // Only HOST_VISIBLE memory types can be defragmented.
7800  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
7801  // Lost allocation cannot be defragmented.
7802  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
7803  {
7804  VmaBlockVector* pAllocBlockVector = VMA_NULL;
7805 
7806  const VmaPool hAllocPool = hAlloc->GetPool();
7807  // This allocation belongs to custom pool.
7808  if(hAllocPool != VK_NULL_HANDLE)
7809  {
7810  pAllocBlockVector = &hAllocPool->GetBlockVector();
7811  }
7812  // This allocation belongs to general pool.
7813  else
7814  {
7815  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
7816  }
7817 
7818  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
7819 
7820  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
7821  &pAllocationsChanged[allocIndex] : VMA_NULL;
7822  pDefragmentator->AddAllocation(hAlloc, pChanged);
7823  }
7824  }
7825 
7826  VkResult result = VK_SUCCESS;
7827 
7828  // ======== Main processing.
7829 
7830  VkDeviceSize maxBytesToMove = SIZE_MAX;
7831  uint32_t maxAllocationsToMove = UINT32_MAX;
7832  if(pDefragmentationInfo != VMA_NULL)
7833  {
7834  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
7835  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
7836  }
7837 
7838  // Process standard memory.
7839  for(uint32_t memTypeIndex = 0;
7840  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
7841  ++memTypeIndex)
7842  {
7843  // Only HOST_VISIBLE memory types can be defragmented.
7844  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7845  {
7846  result = m_pBlockVectors[memTypeIndex]->Defragment(
7847  pDefragmentationStats,
7848  maxBytesToMove,
7849  maxAllocationsToMove);
7850  }
7851  }
7852 
7853  // Process custom pools.
7854  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
7855  {
7856  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
7857  pDefragmentationStats,
7858  maxBytesToMove,
7859  maxAllocationsToMove);
7860  }
7861 
7862  // ======== Destroy defragmentators.
7863 
7864  // Process custom pools.
7865  for(size_t poolIndex = poolCount; poolIndex--; )
7866  {
7867  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
7868  }
7869 
7870  // Process standard memory.
7871  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
7872  {
7873  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7874  {
7875  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
7876  }
7877  }
7878 
7879  return result;
7880 }
7881 
7882 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
7883 {
7884  if(hAllocation->CanBecomeLost())
7885  {
7886  /*
7887  Warning: This is a carefully designed algorithm.
7888  Do not modify unless you really know what you're doing :)
7889  */
7890  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7891  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7892  for(;;)
7893  {
7894  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7895  {
7896  pAllocationInfo->memoryType = UINT32_MAX;
7897  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
7898  pAllocationInfo->offset = 0;
7899  pAllocationInfo->size = hAllocation->GetSize();
7900  pAllocationInfo->pMappedData = VMA_NULL;
7901  pAllocationInfo->pUserData = hAllocation->GetUserData();
7902  return;
7903  }
7904  else if(localLastUseFrameIndex == localCurrFrameIndex)
7905  {
7906  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7907  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7908  pAllocationInfo->offset = hAllocation->GetOffset();
7909  pAllocationInfo->size = hAllocation->GetSize();
7910  pAllocationInfo->pMappedData = VMA_NULL;
7911  pAllocationInfo->pUserData = hAllocation->GetUserData();
7912  return;
7913  }
7914  else // Last use time earlier than current time.
7915  {
7916  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7917  {
7918  localLastUseFrameIndex = localCurrFrameIndex;
7919  }
7920  }
7921  }
7922  }
7923  else
7924  {
7925  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7926  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7927  pAllocationInfo->offset = hAllocation->GetOffset();
7928  pAllocationInfo->size = hAllocation->GetSize();
7929  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7930  pAllocationInfo->pUserData = hAllocation->GetUserData();
7931  }
7932 }
7933 
7934 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
7935 {
7936  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
7937  if(hAllocation->CanBecomeLost())
7938  {
7939  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7940  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7941  for(;;)
7942  {
7943  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7944  {
7945  return false;
7946  }
7947  else if(localLastUseFrameIndex == localCurrFrameIndex)
7948  {
7949  return true;
7950  }
7951  else // Last use time earlier than current time.
7952  {
7953  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7954  {
7955  localLastUseFrameIndex = localCurrFrameIndex;
7956  }
7957  }
7958  }
7959  }
7960  else
7961  {
7962  return true;
7963  }
7964 }
7965 
7966 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
7967 {
7968  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
7969 
7970  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
7971 
7972  if(newCreateInfo.maxBlockCount == 0)
7973  {
7974  newCreateInfo.maxBlockCount = SIZE_MAX;
7975  }
7976  if(newCreateInfo.blockSize == 0)
7977  {
7978  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
7979  }
7980 
7981  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
7982 
7983  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
7984  if(res != VK_SUCCESS)
7985  {
7986  vma_delete(this, *pPool);
7987  *pPool = VMA_NULL;
7988  return res;
7989  }
7990 
7991  // Add to m_Pools.
7992  {
7993  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7994  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
7995  }
7996 
7997  return VK_SUCCESS;
7998 }
7999 
8000 void VmaAllocator_T::DestroyPool(VmaPool pool)
8001 {
8002  // Remove from m_Pools.
8003  {
8004  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8005  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
8006  VMA_ASSERT(success && "Pool not found in Allocator.");
8007  }
8008 
8009  vma_delete(this, pool);
8010 }
8011 
8012 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
8013 {
8014  pool->m_BlockVector.GetPoolStats(pPoolStats);
8015 }
8016 
8017 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
8018 {
8019  m_CurrentFrameIndex.store(frameIndex);
8020 }
8021 
8022 void VmaAllocator_T::MakePoolAllocationsLost(
8023  VmaPool hPool,
8024  size_t* pLostAllocationCount)
8025 {
8026  hPool->m_BlockVector.MakePoolAllocationsLost(
8027  m_CurrentFrameIndex.load(),
8028  pLostAllocationCount);
8029 }
8030 
8031 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
8032 {
8033  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
8034  (*pAllocation)->InitLost();
8035 }
8036 
8037 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
8038 {
8039  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
8040 
8041  VkResult res;
8042  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8043  {
8044  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8045  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
8046  {
8047  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8048  if(res == VK_SUCCESS)
8049  {
8050  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
8051  }
8052  }
8053  else
8054  {
8055  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
8056  }
8057  }
8058  else
8059  {
8060  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8061  }
8062 
8063  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
8064  {
8065  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
8066  }
8067 
8068  return res;
8069 }
8070 
8071 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
8072 {
8073  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
8074  {
8075  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
8076  }
8077 
8078  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
8079 
8080  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
8081  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8082  {
8083  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8084  m_HeapSizeLimit[heapIndex] += size;
8085  }
8086 }
8087 
8088 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
8089 {
8090  if(hAllocation->CanBecomeLost())
8091  {
8092  return VK_ERROR_MEMORY_MAP_FAILED;
8093  }
8094 
8095  switch(hAllocation->GetType())
8096  {
8097  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8098  {
8099  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8100  char *pBytes = VMA_NULL;
8101  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
8102  if(res == VK_SUCCESS)
8103  {
8104  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
8105  hAllocation->BlockAllocMap();
8106  }
8107  return res;
8108  }
8109  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8110  return hAllocation->DedicatedAllocMap(this, ppData);
8111  default:
8112  VMA_ASSERT(0);
8113  return VK_ERROR_MEMORY_MAP_FAILED;
8114  }
8115 }
8116 
8117 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
8118 {
8119  switch(hAllocation->GetType())
8120  {
8121  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8122  {
8123  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8124  hAllocation->BlockAllocUnmap();
8125  pBlock->Unmap(this, 1);
8126  }
8127  break;
8128  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8129  hAllocation->DedicatedAllocUnmap(this);
8130  break;
8131  default:
8132  VMA_ASSERT(0);
8133  }
8134 }
8135 
8136 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
8137 {
8138  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
8139 
8140  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
8141  {
8142  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8143  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
8144  VMA_ASSERT(pDedicatedAllocations);
8145  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
8146  VMA_ASSERT(success);
8147  }
8148 
8149  VkDeviceMemory hMemory = allocation->GetMemory();
8150 
8151  if(allocation->GetMappedData() != VMA_NULL)
8152  {
8153  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
8154  }
8155 
8156  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
8157 
8158  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
8159 }
8160 
8161 #if VMA_STATS_STRING_ENABLED
8162 
8163 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
8164 {
8165  bool dedicatedAllocationsStarted = false;
8166  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8167  {
8168  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8169  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
8170  VMA_ASSERT(pDedicatedAllocVector);
8171  if(pDedicatedAllocVector->empty() == false)
8172  {
8173  if(dedicatedAllocationsStarted == false)
8174  {
8175  dedicatedAllocationsStarted = true;
8176  json.WriteString("DedicatedAllocations");
8177  json.BeginObject();
8178  }
8179 
8180  json.BeginString("Type ");
8181  json.ContinueString(memTypeIndex);
8182  json.EndString();
8183 
8184  json.BeginArray();
8185 
8186  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
8187  {
8188  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
8189  json.BeginObject(true);
8190 
8191  json.WriteString("Type");
8192  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
8193 
8194  json.WriteString("Size");
8195  json.WriteNumber(hAlloc->GetSize());
8196 
8197  const void* pUserData = hAlloc->GetUserData();
8198  if(pUserData != VMA_NULL)
8199  {
8200  json.WriteString("UserData");
8201  if(hAlloc->IsUserDataString())
8202  {
8203  json.WriteString((const char*)pUserData);
8204  }
8205  else
8206  {
8207  json.BeginString();
8208  json.ContinueString_Pointer(pUserData);
8209  json.EndString();
8210  }
8211  }
8212 
8213  json.EndObject();
8214  }
8215 
8216  json.EndArray();
8217  }
8218  }
8219  if(dedicatedAllocationsStarted)
8220  {
8221  json.EndObject();
8222  }
8223 
8224  {
8225  bool allocationsStarted = false;
8226  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8227  {
8228  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
8229  {
8230  if(allocationsStarted == false)
8231  {
8232  allocationsStarted = true;
8233  json.WriteString("DefaultPools");
8234  json.BeginObject();
8235  }
8236 
8237  json.BeginString("Type ");
8238  json.ContinueString(memTypeIndex);
8239  json.EndString();
8240 
8241  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
8242  }
8243  }
8244  if(allocationsStarted)
8245  {
8246  json.EndObject();
8247  }
8248  }
8249 
8250  {
8251  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8252  const size_t poolCount = m_Pools.size();
8253  if(poolCount > 0)
8254  {
8255  json.WriteString("Pools");
8256  json.BeginArray();
8257  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
8258  {
8259  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
8260  }
8261  json.EndArray();
8262  }
8263  }
8264 }
8265 
8266 #endif // #if VMA_STATS_STRING_ENABLED
8267 
8268 static VkResult AllocateMemoryForImage(
8269  VmaAllocator allocator,
8270  VkImage image,
8271  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8272  VmaSuballocationType suballocType,
8273  VmaAllocation* pAllocation)
8274 {
8275  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
8276 
8277  VkMemoryRequirements vkMemReq = {};
8278  bool requiresDedicatedAllocation = false;
8279  bool prefersDedicatedAllocation = false;
8280  allocator->GetImageMemoryRequirements(image, vkMemReq,
8281  requiresDedicatedAllocation, prefersDedicatedAllocation);
8282 
8283  return allocator->AllocateMemory(
8284  vkMemReq,
8285  requiresDedicatedAllocation,
8286  prefersDedicatedAllocation,
8287  VK_NULL_HANDLE, // dedicatedBuffer
8288  image, // dedicatedImage
8289  *pAllocationCreateInfo,
8290  suballocType,
8291  pAllocation);
8292 }
8293 
8295 // Public interface
8296 
8297 VkResult vmaCreateAllocator(
8298  const VmaAllocatorCreateInfo* pCreateInfo,
8299  VmaAllocator* pAllocator)
8300 {
8301  VMA_ASSERT(pCreateInfo && pAllocator);
8302  VMA_DEBUG_LOG("vmaCreateAllocator");
8303  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
8304  return VK_SUCCESS;
8305 }
8306 
8307 void vmaDestroyAllocator(
8308  VmaAllocator allocator)
8309 {
8310  if(allocator != VK_NULL_HANDLE)
8311  {
8312  VMA_DEBUG_LOG("vmaDestroyAllocator");
8313  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
8314  vma_delete(&allocationCallbacks, allocator);
8315  }
8316 }
8317 
8319  VmaAllocator allocator,
8320  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
8321 {
8322  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
8323  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
8324 }
8325 
8327  VmaAllocator allocator,
8328  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
8329 {
8330  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
8331  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
8332 }
8333 
8335  VmaAllocator allocator,
8336  uint32_t memoryTypeIndex,
8337  VkMemoryPropertyFlags* pFlags)
8338 {
8339  VMA_ASSERT(allocator && pFlags);
8340  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
8341  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
8342 }
8343 
8345  VmaAllocator allocator,
8346  uint32_t frameIndex)
8347 {
8348  VMA_ASSERT(allocator);
8349  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
8350 
8351  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8352 
8353  allocator->SetCurrentFrameIndex(frameIndex);
8354 }
8355 
8356 void vmaCalculateStats(
8357  VmaAllocator allocator,
8358  VmaStats* pStats)
8359 {
8360  VMA_ASSERT(allocator && pStats);
8361  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8362  allocator->CalculateStats(pStats);
8363 }
8364 
8365 #if VMA_STATS_STRING_ENABLED
8366 
8367 void vmaBuildStatsString(
8368  VmaAllocator allocator,
8369  char** ppStatsString,
8370  VkBool32 detailedMap)
8371 {
8372  VMA_ASSERT(allocator && ppStatsString);
8373  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8374 
8375  VmaStringBuilder sb(allocator);
8376  {
8377  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
8378  json.BeginObject();
8379 
8380  VmaStats stats;
8381  allocator->CalculateStats(&stats);
8382 
8383  json.WriteString("Total");
8384  VmaPrintStatInfo(json, stats.total);
8385 
8386  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
8387  {
8388  json.BeginString("Heap ");
8389  json.ContinueString(heapIndex);
8390  json.EndString();
8391  json.BeginObject();
8392 
8393  json.WriteString("Size");
8394  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
8395 
8396  json.WriteString("Flags");
8397  json.BeginArray(true);
8398  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
8399  {
8400  json.WriteString("DEVICE_LOCAL");
8401  }
8402  json.EndArray();
8403 
8404  if(stats.memoryHeap[heapIndex].blockCount > 0)
8405  {
8406  json.WriteString("Stats");
8407  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
8408  }
8409 
8410  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
8411  {
8412  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
8413  {
8414  json.BeginString("Type ");
8415  json.ContinueString(typeIndex);
8416  json.EndString();
8417 
8418  json.BeginObject();
8419 
8420  json.WriteString("Flags");
8421  json.BeginArray(true);
8422  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
8423  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
8424  {
8425  json.WriteString("DEVICE_LOCAL");
8426  }
8427  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8428  {
8429  json.WriteString("HOST_VISIBLE");
8430  }
8431  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
8432  {
8433  json.WriteString("HOST_COHERENT");
8434  }
8435  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
8436  {
8437  json.WriteString("HOST_CACHED");
8438  }
8439  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
8440  {
8441  json.WriteString("LAZILY_ALLOCATED");
8442  }
8443  json.EndArray();
8444 
8445  if(stats.memoryType[typeIndex].blockCount > 0)
8446  {
8447  json.WriteString("Stats");
8448  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
8449  }
8450 
8451  json.EndObject();
8452  }
8453  }
8454 
8455  json.EndObject();
8456  }
8457  if(detailedMap == VK_TRUE)
8458  {
8459  allocator->PrintDetailedMap(json);
8460  }
8461 
8462  json.EndObject();
8463  }
8464 
8465  const size_t len = sb.GetLength();
8466  char* const pChars = vma_new_array(allocator, char, len + 1);
8467  if(len > 0)
8468  {
8469  memcpy(pChars, sb.GetData(), len);
8470  }
8471  pChars[len] = '\0';
8472  *ppStatsString = pChars;
8473 }
8474 
8475 void vmaFreeStatsString(
8476  VmaAllocator allocator,
8477  char* pStatsString)
8478 {
8479  if(pStatsString != VMA_NULL)
8480  {
8481  VMA_ASSERT(allocator);
8482  size_t len = strlen(pStatsString);
8483  vma_delete_array(allocator, pStatsString, len + 1);
8484  }
8485 }
8486 
8487 #endif // #if VMA_STATS_STRING_ENABLED
8488 
8489 /*
8490 This function is not protected by any mutex because it just reads immutable data.
8491 */
8492 VkResult vmaFindMemoryTypeIndex(
8493  VmaAllocator allocator,
8494  uint32_t memoryTypeBits,
8495  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8496  uint32_t* pMemoryTypeIndex)
8497 {
8498  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8499  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8500  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8501 
8502  if(pAllocationCreateInfo->memoryTypeBits != 0)
8503  {
8504  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
8505  }
8506 
8507  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
8508  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
8509 
8510  // Convert usage to requiredFlags and preferredFlags.
8511  switch(pAllocationCreateInfo->usage)
8512  {
8514  break;
8516  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8517  break;
8519  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
8520  break;
8522  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8523  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8524  break;
8526  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8527  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
8528  break;
8529  default:
8530  break;
8531  }
8532 
8533  *pMemoryTypeIndex = UINT32_MAX;
8534  uint32_t minCost = UINT32_MAX;
8535  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
8536  memTypeIndex < allocator->GetMemoryTypeCount();
8537  ++memTypeIndex, memTypeBit <<= 1)
8538  {
8539  // This memory type is acceptable according to memoryTypeBits bitmask.
8540  if((memTypeBit & memoryTypeBits) != 0)
8541  {
8542  const VkMemoryPropertyFlags currFlags =
8543  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
8544  // This memory type contains requiredFlags.
8545  if((requiredFlags & ~currFlags) == 0)
8546  {
8547  // Calculate cost as number of bits from preferredFlags not present in this memory type.
8548  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
8549  // Remember memory type with lowest cost.
8550  if(currCost < minCost)
8551  {
8552  *pMemoryTypeIndex = memTypeIndex;
8553  if(currCost == 0)
8554  {
8555  return VK_SUCCESS;
8556  }
8557  minCost = currCost;
8558  }
8559  }
8560  }
8561  }
8562  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
8563 }
8564 
8566  VmaAllocator allocator,
8567  const VkBufferCreateInfo* pBufferCreateInfo,
8568  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8569  uint32_t* pMemoryTypeIndex)
8570 {
8571  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8572  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
8573  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8574  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8575 
8576  const VkDevice hDev = allocator->m_hDevice;
8577  VkBuffer hBuffer = VK_NULL_HANDLE;
8578  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
8579  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
8580  if(res == VK_SUCCESS)
8581  {
8582  VkMemoryRequirements memReq = {};
8583  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
8584  hDev, hBuffer, &memReq);
8585 
8586  res = vmaFindMemoryTypeIndex(
8587  allocator,
8588  memReq.memoryTypeBits,
8589  pAllocationCreateInfo,
8590  pMemoryTypeIndex);
8591 
8592  allocator->GetVulkanFunctions().vkDestroyBuffer(
8593  hDev, hBuffer, allocator->GetAllocationCallbacks());
8594  }
8595  return res;
8596 }
8597 
8599  VmaAllocator allocator,
8600  const VkImageCreateInfo* pImageCreateInfo,
8601  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8602  uint32_t* pMemoryTypeIndex)
8603 {
8604  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8605  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
8606  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8607  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8608 
8609  const VkDevice hDev = allocator->m_hDevice;
8610  VkImage hImage = VK_NULL_HANDLE;
8611  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
8612  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
8613  if(res == VK_SUCCESS)
8614  {
8615  VkMemoryRequirements memReq = {};
8616  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
8617  hDev, hImage, &memReq);
8618 
8619  res = vmaFindMemoryTypeIndex(
8620  allocator,
8621  memReq.memoryTypeBits,
8622  pAllocationCreateInfo,
8623  pMemoryTypeIndex);
8624 
8625  allocator->GetVulkanFunctions().vkDestroyImage(
8626  hDev, hImage, allocator->GetAllocationCallbacks());
8627  }
8628  return res;
8629 }
8630 
8631 VkResult vmaCreatePool(
8632  VmaAllocator allocator,
8633  const VmaPoolCreateInfo* pCreateInfo,
8634  VmaPool* pPool)
8635 {
8636  VMA_ASSERT(allocator && pCreateInfo && pPool);
8637 
8638  VMA_DEBUG_LOG("vmaCreatePool");
8639 
8640  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8641 
8642  return allocator->CreatePool(pCreateInfo, pPool);
8643 }
8644 
8645 void vmaDestroyPool(
8646  VmaAllocator allocator,
8647  VmaPool pool)
8648 {
8649  VMA_ASSERT(allocator);
8650 
8651  if(pool == VK_NULL_HANDLE)
8652  {
8653  return;
8654  }
8655 
8656  VMA_DEBUG_LOG("vmaDestroyPool");
8657 
8658  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8659 
8660  allocator->DestroyPool(pool);
8661 }
8662 
8663 void vmaGetPoolStats(
8664  VmaAllocator allocator,
8665  VmaPool pool,
8666  VmaPoolStats* pPoolStats)
8667 {
8668  VMA_ASSERT(allocator && pool && pPoolStats);
8669 
8670  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8671 
8672  allocator->GetPoolStats(pool, pPoolStats);
8673 }
8674 
8676  VmaAllocator allocator,
8677  VmaPool pool,
8678  size_t* pLostAllocationCount)
8679 {
8680  VMA_ASSERT(allocator && pool);
8681 
8682  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8683 
8684  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
8685 }
8686 
8687 VkResult vmaAllocateMemory(
8688  VmaAllocator allocator,
8689  const VkMemoryRequirements* pVkMemoryRequirements,
8690  const VmaAllocationCreateInfo* pCreateInfo,
8691  VmaAllocation* pAllocation,
8692  VmaAllocationInfo* pAllocationInfo)
8693 {
8694  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
8695 
8696  VMA_DEBUG_LOG("vmaAllocateMemory");
8697 
8698  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8699 
8700  VkResult result = allocator->AllocateMemory(
8701  *pVkMemoryRequirements,
8702  false, // requiresDedicatedAllocation
8703  false, // prefersDedicatedAllocation
8704  VK_NULL_HANDLE, // dedicatedBuffer
8705  VK_NULL_HANDLE, // dedicatedImage
8706  *pCreateInfo,
8707  VMA_SUBALLOCATION_TYPE_UNKNOWN,
8708  pAllocation);
8709 
8710  if(pAllocationInfo && result == VK_SUCCESS)
8711  {
8712  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8713  }
8714 
8715  return result;
8716 }
8717 
8719  VmaAllocator allocator,
8720  VkBuffer buffer,
8721  const VmaAllocationCreateInfo* pCreateInfo,
8722  VmaAllocation* pAllocation,
8723  VmaAllocationInfo* pAllocationInfo)
8724 {
8725  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8726 
8727  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
8728 
8729  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8730 
8731  VkMemoryRequirements vkMemReq = {};
8732  bool requiresDedicatedAllocation = false;
8733  bool prefersDedicatedAllocation = false;
8734  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
8735  requiresDedicatedAllocation,
8736  prefersDedicatedAllocation);
8737 
8738  VkResult result = allocator->AllocateMemory(
8739  vkMemReq,
8740  requiresDedicatedAllocation,
8741  prefersDedicatedAllocation,
8742  buffer, // dedicatedBuffer
8743  VK_NULL_HANDLE, // dedicatedImage
8744  *pCreateInfo,
8745  VMA_SUBALLOCATION_TYPE_BUFFER,
8746  pAllocation);
8747 
8748  if(pAllocationInfo && result == VK_SUCCESS)
8749  {
8750  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8751  }
8752 
8753  return result;
8754 }
8755 
8756 VkResult vmaAllocateMemoryForImage(
8757  VmaAllocator allocator,
8758  VkImage image,
8759  const VmaAllocationCreateInfo* pCreateInfo,
8760  VmaAllocation* pAllocation,
8761  VmaAllocationInfo* pAllocationInfo)
8762 {
8763  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8764 
8765  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
8766 
8767  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8768 
8769  VkResult result = AllocateMemoryForImage(
8770  allocator,
8771  image,
8772  pCreateInfo,
8773  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
8774  pAllocation);
8775 
8776  if(pAllocationInfo && result == VK_SUCCESS)
8777  {
8778  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8779  }
8780 
8781  return result;
8782 }
8783 
8784 void vmaFreeMemory(
8785  VmaAllocator allocator,
8786  VmaAllocation allocation)
8787 {
8788  VMA_ASSERT(allocator && allocation);
8789 
8790  VMA_DEBUG_LOG("vmaFreeMemory");
8791 
8792  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8793 
8794  allocator->FreeMemory(allocation);
8795 }
8796 
8798  VmaAllocator allocator,
8799  VmaAllocation allocation,
8800  VmaAllocationInfo* pAllocationInfo)
8801 {
8802  VMA_ASSERT(allocator && allocation && pAllocationInfo);
8803 
8804  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8805 
8806  allocator->GetAllocationInfo(allocation, pAllocationInfo);
8807 }
8808 
8809 VkBool32 vmaTouchAllocation(
8810  VmaAllocator allocator,
8811  VmaAllocation allocation)
8812 {
8813  VMA_ASSERT(allocator && allocation);
8814 
8815  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8816 
8817  return allocator->TouchAllocation(allocation);
8818 }
8819 
8821  VmaAllocator allocator,
8822  VmaAllocation allocation,
8823  void* pUserData)
8824 {
8825  VMA_ASSERT(allocator && allocation);
8826 
8827  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8828 
8829  allocation->SetUserData(allocator, pUserData);
8830 }
8831 
8833  VmaAllocator allocator,
8834  VmaAllocation* pAllocation)
8835 {
8836  VMA_ASSERT(allocator && pAllocation);
8837 
8838  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
8839 
8840  allocator->CreateLostAllocation(pAllocation);
8841 }
8842 
8843 VkResult vmaMapMemory(
8844  VmaAllocator allocator,
8845  VmaAllocation allocation,
8846  void** ppData)
8847 {
8848  VMA_ASSERT(allocator && allocation && ppData);
8849 
8850  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8851 
8852  return allocator->Map(allocation, ppData);
8853 }
8854 
8855 void vmaUnmapMemory(
8856  VmaAllocator allocator,
8857  VmaAllocation allocation)
8858 {
8859  VMA_ASSERT(allocator && allocation);
8860 
8861  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8862 
8863  allocator->Unmap(allocation);
8864 }
8865 
8866 VkResult vmaDefragment(
8867  VmaAllocator allocator,
8868  VmaAllocation* pAllocations,
8869  size_t allocationCount,
8870  VkBool32* pAllocationsChanged,
8871  const VmaDefragmentationInfo *pDefragmentationInfo,
8872  VmaDefragmentationStats* pDefragmentationStats)
8873 {
8874  VMA_ASSERT(allocator && pAllocations);
8875 
8876  VMA_DEBUG_LOG("vmaDefragment");
8877 
8878  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8879 
8880  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
8881 }
8882 
8883 VkResult vmaCreateBuffer(
8884  VmaAllocator allocator,
8885  const VkBufferCreateInfo* pBufferCreateInfo,
8886  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8887  VkBuffer* pBuffer,
8888  VmaAllocation* pAllocation,
8889  VmaAllocationInfo* pAllocationInfo)
8890 {
8891  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
8892 
8893  VMA_DEBUG_LOG("vmaCreateBuffer");
8894 
8895  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8896 
8897  *pBuffer = VK_NULL_HANDLE;
8898  *pAllocation = VK_NULL_HANDLE;
8899 
8900  // 1. Create VkBuffer.
8901  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
8902  allocator->m_hDevice,
8903  pBufferCreateInfo,
8904  allocator->GetAllocationCallbacks(),
8905  pBuffer);
8906  if(res >= 0)
8907  {
8908  // 2. vkGetBufferMemoryRequirements.
8909  VkMemoryRequirements vkMemReq = {};
8910  bool requiresDedicatedAllocation = false;
8911  bool prefersDedicatedAllocation = false;
8912  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
8913  requiresDedicatedAllocation, prefersDedicatedAllocation);
8914 
8915  // Make sure alignment requirements for specific buffer usages reported
8916  // in Physical Device Properties are included in alignment reported by memory requirements.
8917  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
8918  {
8919  VMA_ASSERT(vkMemReq.alignment %
8920  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
8921  }
8922  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
8923  {
8924  VMA_ASSERT(vkMemReq.alignment %
8925  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
8926  }
8927  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
8928  {
8929  VMA_ASSERT(vkMemReq.alignment %
8930  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
8931  }
8932 
8933  // 3. Allocate memory using allocator.
8934  res = allocator->AllocateMemory(
8935  vkMemReq,
8936  requiresDedicatedAllocation,
8937  prefersDedicatedAllocation,
8938  *pBuffer, // dedicatedBuffer
8939  VK_NULL_HANDLE, // dedicatedImage
8940  *pAllocationCreateInfo,
8941  VMA_SUBALLOCATION_TYPE_BUFFER,
8942  pAllocation);
8943  if(res >= 0)
8944  {
8945  // 3. Bind buffer with memory.
8946  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
8947  allocator->m_hDevice,
8948  *pBuffer,
8949  (*pAllocation)->GetMemory(),
8950  (*pAllocation)->GetOffset());
8951  if(res >= 0)
8952  {
8953  // All steps succeeded.
8954  if(pAllocationInfo != VMA_NULL)
8955  {
8956  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8957  }
8958  return VK_SUCCESS;
8959  }
8960  allocator->FreeMemory(*pAllocation);
8961  *pAllocation = VK_NULL_HANDLE;
8962  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8963  *pBuffer = VK_NULL_HANDLE;
8964  return res;
8965  }
8966  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8967  *pBuffer = VK_NULL_HANDLE;
8968  return res;
8969  }
8970  return res;
8971 }
8972 
8973 void vmaDestroyBuffer(
8974  VmaAllocator allocator,
8975  VkBuffer buffer,
8976  VmaAllocation allocation)
8977 {
8978  if(buffer != VK_NULL_HANDLE)
8979  {
8980  VMA_ASSERT(allocator);
8981 
8982  VMA_DEBUG_LOG("vmaDestroyBuffer");
8983 
8984  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8985 
8986  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
8987 
8988  allocator->FreeMemory(allocation);
8989  }
8990 }
8991 
8992 VkResult vmaCreateImage(
8993  VmaAllocator allocator,
8994  const VkImageCreateInfo* pImageCreateInfo,
8995  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8996  VkImage* pImage,
8997  VmaAllocation* pAllocation,
8998  VmaAllocationInfo* pAllocationInfo)
8999 {
9000  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
9001 
9002  VMA_DEBUG_LOG("vmaCreateImage");
9003 
9004  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9005 
9006  *pImage = VK_NULL_HANDLE;
9007  *pAllocation = VK_NULL_HANDLE;
9008 
9009  // 1. Create VkImage.
9010  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
9011  allocator->m_hDevice,
9012  pImageCreateInfo,
9013  allocator->GetAllocationCallbacks(),
9014  pImage);
9015  if(res >= 0)
9016  {
9017  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
9018  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
9019  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
9020 
9021  // 2. Allocate memory using allocator.
9022  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
9023  if(res >= 0)
9024  {
9025  // 3. Bind image with memory.
9026  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
9027  allocator->m_hDevice,
9028  *pImage,
9029  (*pAllocation)->GetMemory(),
9030  (*pAllocation)->GetOffset());
9031  if(res >= 0)
9032  {
9033  // All steps succeeded.
9034  if(pAllocationInfo != VMA_NULL)
9035  {
9036  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9037  }
9038  return VK_SUCCESS;
9039  }
9040  allocator->FreeMemory(*pAllocation);
9041  *pAllocation = VK_NULL_HANDLE;
9042  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
9043  *pImage = VK_NULL_HANDLE;
9044  return res;
9045  }
9046  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
9047  *pImage = VK_NULL_HANDLE;
9048  return res;
9049  }
9050  return res;
9051 }
9052 
9053 void vmaDestroyImage(
9054  VmaAllocator allocator,
9055  VkImage image,
9056  VmaAllocation allocation)
9057 {
9058  if(image != VK_NULL_HANDLE)
9059  {
9060  VMA_ASSERT(allocator);
9061 
9062  VMA_DEBUG_LOG("vmaDestroyImage");
9063 
9064  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9065 
9066  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
9067 
9068  allocator->FreeMemory(allocation);
9069  }
9070 }
9071 
9072 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1013
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1267
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1038
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1023
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1224
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1017
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1573
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1035
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1772
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1443
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1497
Definition: vk_mem_alloc.h:1304
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1006
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1342
Definition: vk_mem_alloc.h:1251
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1047
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1100
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1032
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1255
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1165
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1020
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1164
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
Definition: vk_mem_alloc.h:1028
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1776
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1064
VmaStatInfo total
Definition: vk_mem_alloc.h:1174
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1784
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1326
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1767
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1021
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:948
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1041
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1451
Definition: vk_mem_alloc.h:1445
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1583
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1018
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1363
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1467
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1503
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1004
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1454
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaMemoryUsage
Definition: vk_mem_alloc.h:1202
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1762
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1780
Definition: vk_mem_alloc.h:1241
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1350
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1019
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1170
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:954
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:975
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:980
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1782
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1337
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1513
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1014
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1153
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1462
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:967
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1311
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1166
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:971
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1457
Definition: vk_mem_alloc.h:1250
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1332
Definition: vk_mem_alloc.h:1323
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1156
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1016
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:1475
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1050
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1506
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1321
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1356
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1088
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1172
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1291
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1165
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1025
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:969
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1024
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1489
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1597
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1044
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1165
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1162
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1494
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1578
Definition: vk_mem_alloc.h:1319
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1778
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1012
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
Definition: vk_mem_alloc.h:1027
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1160
Definition: vk_mem_alloc.h:1207
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1447
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1158
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1022
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1026
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1278
Definition: vk_mem_alloc.h:1234
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1592
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1002
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1015
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1559
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1425
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1166
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1173
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1500
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1166
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1564