Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
943 #include <vulkan/vulkan.h>
944 
954 VK_DEFINE_HANDLE(VmaAllocator)
955 
956 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
958  VmaAllocator allocator,
959  uint32_t memoryType,
960  VkDeviceMemory memory,
961  VkDeviceSize size);
963 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
964  VmaAllocator allocator,
965  uint32_t memoryType,
966  VkDeviceMemory memory,
967  VkDeviceSize size);
968 
976 typedef struct VmaDeviceMemoryCallbacks {
982 
1012 
1015 typedef VkFlags VmaAllocatorCreateFlags;
1016 
1021 typedef struct VmaVulkanFunctions {
1022  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1023  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1024  PFN_vkAllocateMemory vkAllocateMemory;
1025  PFN_vkFreeMemory vkFreeMemory;
1026  PFN_vkMapMemory vkMapMemory;
1027  PFN_vkUnmapMemory vkUnmapMemory;
1028  PFN_vkBindBufferMemory vkBindBufferMemory;
1029  PFN_vkBindImageMemory vkBindImageMemory;
1030  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1031  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1032  PFN_vkCreateBuffer vkCreateBuffer;
1033  PFN_vkDestroyBuffer vkDestroyBuffer;
1034  PFN_vkCreateImage vkCreateImage;
1035  PFN_vkDestroyImage vkDestroyImage;
1036  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1037  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1039 
1042 {
1044  VmaAllocatorCreateFlags flags;
1046 
1047  VkPhysicalDevice physicalDevice;
1049 
1050  VkDevice device;
1052 
1055 
1056  const VkAllocationCallbacks* pAllocationCallbacks;
1058 
1097  const VkDeviceSize* pHeapSizeLimit;
1111 
1113 VkResult vmaCreateAllocator(
1114  const VmaAllocatorCreateInfo* pCreateInfo,
1115  VmaAllocator* pAllocator);
1116 
1118 void vmaDestroyAllocator(
1119  VmaAllocator allocator);
1120 
1126  VmaAllocator allocator,
1127  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1128 
1134  VmaAllocator allocator,
1135  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1136 
1144  VmaAllocator allocator,
1145  uint32_t memoryTypeIndex,
1146  VkMemoryPropertyFlags* pFlags);
1147 
1157  VmaAllocator allocator,
1158  uint32_t frameIndex);
1159 
1162 typedef struct VmaStatInfo
1163 {
1165  uint32_t blockCount;
1171  VkDeviceSize usedBytes;
1173  VkDeviceSize unusedBytes;
1174  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1175  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1176 } VmaStatInfo;
1177 
1179 typedef struct VmaStats
1180 {
1181  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1182  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1184 } VmaStats;
1185 
1187 void vmaCalculateStats(
1188  VmaAllocator allocator,
1189  VmaStats* pStats);
1190 
1191 #define VMA_STATS_STRING_ENABLED 1
1192 
1193 #if VMA_STATS_STRING_ENABLED
1194 
1196 
1198 void vmaBuildStatsString(
1199  VmaAllocator allocator,
1200  char** ppStatsString,
1201  VkBool32 detailedMap);
1202 
1203 void vmaFreeStatsString(
1204  VmaAllocator allocator,
1205  char* pStatsString);
1206 
1207 #endif // #if VMA_STATS_STRING_ENABLED
1208 
1217 VK_DEFINE_HANDLE(VmaPool)
1218 
1219 typedef enum VmaMemoryUsage
1220 {
1269 } VmaMemoryUsage;
1270 
1285 
1335 
1339 
1341 {
1343  VmaAllocationCreateFlags flags;
1354  VkMemoryPropertyFlags requiredFlags;
1359  VkMemoryPropertyFlags preferredFlags;
1367  uint32_t memoryTypeBits;
1380  void* pUserData;
1382 
1399 VkResult vmaFindMemoryTypeIndex(
1400  VmaAllocator allocator,
1401  uint32_t memoryTypeBits,
1402  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1403  uint32_t* pMemoryTypeIndex);
1404 
1418  VmaAllocator allocator,
1419  const VkBufferCreateInfo* pBufferCreateInfo,
1420  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1421  uint32_t* pMemoryTypeIndex);
1422 
1436  VmaAllocator allocator,
1437  const VkImageCreateInfo* pImageCreateInfo,
1438  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1439  uint32_t* pMemoryTypeIndex);
1440 
1461 
1464 typedef VkFlags VmaPoolCreateFlags;
1465 
1468 typedef struct VmaPoolCreateInfo {
1474  VmaPoolCreateFlags flags;
1479  VkDeviceSize blockSize;
1508 
1511 typedef struct VmaPoolStats {
1514  VkDeviceSize size;
1517  VkDeviceSize unusedSize;
1530  VkDeviceSize unusedRangeSizeMax;
1531 } VmaPoolStats;
1532 
1539 VkResult vmaCreatePool(
1540  VmaAllocator allocator,
1541  const VmaPoolCreateInfo* pCreateInfo,
1542  VmaPool* pPool);
1543 
1546 void vmaDestroyPool(
1547  VmaAllocator allocator,
1548  VmaPool pool);
1549 
1556 void vmaGetPoolStats(
1557  VmaAllocator allocator,
1558  VmaPool pool,
1559  VmaPoolStats* pPoolStats);
1560 
1568  VmaAllocator allocator,
1569  VmaPool pool,
1570  size_t* pLostAllocationCount);
1571 
1596 VK_DEFINE_HANDLE(VmaAllocation)
1597 
1598 
1600 typedef struct VmaAllocationInfo {
1605  uint32_t memoryType;
1614  VkDeviceMemory deviceMemory;
1619  VkDeviceSize offset;
1624  VkDeviceSize size;
1638  void* pUserData;
1640 
1651 VkResult vmaAllocateMemory(
1652  VmaAllocator allocator,
1653  const VkMemoryRequirements* pVkMemoryRequirements,
1654  const VmaAllocationCreateInfo* pCreateInfo,
1655  VmaAllocation* pAllocation,
1656  VmaAllocationInfo* pAllocationInfo);
1657 
1665  VmaAllocator allocator,
1666  VkBuffer buffer,
1667  const VmaAllocationCreateInfo* pCreateInfo,
1668  VmaAllocation* pAllocation,
1669  VmaAllocationInfo* pAllocationInfo);
1670 
1672 VkResult vmaAllocateMemoryForImage(
1673  VmaAllocator allocator,
1674  VkImage image,
1675  const VmaAllocationCreateInfo* pCreateInfo,
1676  VmaAllocation* pAllocation,
1677  VmaAllocationInfo* pAllocationInfo);
1678 
1680 void vmaFreeMemory(
1681  VmaAllocator allocator,
1682  VmaAllocation allocation);
1683 
1701  VmaAllocator allocator,
1702  VmaAllocation allocation,
1703  VmaAllocationInfo* pAllocationInfo);
1704 
1719 VkBool32 vmaTouchAllocation(
1720  VmaAllocator allocator,
1721  VmaAllocation allocation);
1722 
1737  VmaAllocator allocator,
1738  VmaAllocation allocation,
1739  void* pUserData);
1740 
1752  VmaAllocator allocator,
1753  VmaAllocation* pAllocation);
1754 
1789 VkResult vmaMapMemory(
1790  VmaAllocator allocator,
1791  VmaAllocation allocation,
1792  void** ppData);
1793 
1798 void vmaUnmapMemory(
1799  VmaAllocator allocator,
1800  VmaAllocation allocation);
1801 
1803 typedef struct VmaDefragmentationInfo {
1808  VkDeviceSize maxBytesToMove;
1815 
1817 typedef struct VmaDefragmentationStats {
1819  VkDeviceSize bytesMoved;
1821  VkDeviceSize bytesFreed;
1827 
1910 VkResult vmaDefragment(
1911  VmaAllocator allocator,
1912  VmaAllocation* pAllocations,
1913  size_t allocationCount,
1914  VkBool32* pAllocationsChanged,
1915  const VmaDefragmentationInfo *pDefragmentationInfo,
1916  VmaDefragmentationStats* pDefragmentationStats);
1917 
1944 VkResult vmaCreateBuffer(
1945  VmaAllocator allocator,
1946  const VkBufferCreateInfo* pBufferCreateInfo,
1947  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1948  VkBuffer* pBuffer,
1949  VmaAllocation* pAllocation,
1950  VmaAllocationInfo* pAllocationInfo);
1951 
1963 void vmaDestroyBuffer(
1964  VmaAllocator allocator,
1965  VkBuffer buffer,
1966  VmaAllocation allocation);
1967 
1969 VkResult vmaCreateImage(
1970  VmaAllocator allocator,
1971  const VkImageCreateInfo* pImageCreateInfo,
1972  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1973  VkImage* pImage,
1974  VmaAllocation* pAllocation,
1975  VmaAllocationInfo* pAllocationInfo);
1976 
1988 void vmaDestroyImage(
1989  VmaAllocator allocator,
1990  VkImage image,
1991  VmaAllocation allocation);
1992 
1993 #ifdef __cplusplus
1994 }
1995 #endif
1996 
1997 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1998 
1999 // For Visual Studio IntelliSense.
2000 #ifdef __INTELLISENSE__
2001 #define VMA_IMPLEMENTATION
2002 #endif
2003 
2004 #ifdef VMA_IMPLEMENTATION
2005 #undef VMA_IMPLEMENTATION
2006 
2007 #include <cstdint>
2008 #include <cstdlib>
2009 #include <cstring>
2010 
2011 /*******************************************************************************
2012 CONFIGURATION SECTION
2013 
2014 Define some of these macros before each #include of this header or change them
2015 here if you need other then default behavior depending on your environment.
2016 */
2017 
2018 /*
2019 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2020 internally, like:
2021 
2022  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2023 
2024 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2025 VmaAllocatorCreateInfo::pVulkanFunctions.
2026 */
2027 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2028 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2029 #endif
2030 
2031 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2032 //#define VMA_USE_STL_CONTAINERS 1
2033 
2034 /* Set this macro to 1 to make the library including and using STL containers:
2035 std::pair, std::vector, std::list, std::unordered_map.
2036 
2037 Set it to 0 or undefined to make the library using its own implementation of
2038 the containers.
2039 */
2040 #if VMA_USE_STL_CONTAINERS
2041  #define VMA_USE_STL_VECTOR 1
2042  #define VMA_USE_STL_UNORDERED_MAP 1
2043  #define VMA_USE_STL_LIST 1
2044 #endif
2045 
2046 #if VMA_USE_STL_VECTOR
2047  #include <vector>
2048 #endif
2049 
2050 #if VMA_USE_STL_UNORDERED_MAP
2051  #include <unordered_map>
2052 #endif
2053 
2054 #if VMA_USE_STL_LIST
2055  #include <list>
2056 #endif
2057 
2058 /*
2059 Following headers are used in this CONFIGURATION section only, so feel free to
2060 remove them if not needed.
2061 */
2062 #include <cassert> // for assert
2063 #include <algorithm> // for min, max
2064 #include <mutex> // for std::mutex
2065 #include <atomic> // for std::atomic
2066 
2067 #if !defined(_WIN32) && !defined(__APPLE__)
2068  #include <malloc.h> // for aligned_alloc()
2069 #endif
2070 
2071 #ifndef VMA_NULL
2072  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2073  #define VMA_NULL nullptr
2074 #endif
2075 
2076 #if defined(__APPLE__) || defined(__ANDROID__)
2077 #include <cstdlib>
2078 void *aligned_alloc(size_t alignment, size_t size)
2079 {
2080  // alignment must be >= sizeof(void*)
2081  if(alignment < sizeof(void*))
2082  {
2083  alignment = sizeof(void*);
2084  }
2085 
2086  void *pointer;
2087  if(posix_memalign(&pointer, alignment, size) == 0)
2088  return pointer;
2089  return VMA_NULL;
2090 }
2091 #endif
2092 
2093 // Normal assert to check for programmer's errors, especially in Debug configuration.
2094 #ifndef VMA_ASSERT
2095  #ifdef _DEBUG
2096  #define VMA_ASSERT(expr) assert(expr)
2097  #else
2098  #define VMA_ASSERT(expr)
2099  #endif
2100 #endif
2101 
2102 // Assert that will be called very often, like inside data structures e.g. operator[].
2103 // Making it non-empty can make program slow.
2104 #ifndef VMA_HEAVY_ASSERT
2105  #ifdef _DEBUG
2106  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2107  #else
2108  #define VMA_HEAVY_ASSERT(expr)
2109  #endif
2110 #endif
2111 
2112 #ifndef VMA_ALIGN_OF
2113  #define VMA_ALIGN_OF(type) (__alignof(type))
2114 #endif
2115 
2116 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2117  #if defined(_WIN32)
2118  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2119  #else
2120  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2121  #endif
2122 #endif
2123 
2124 #ifndef VMA_SYSTEM_FREE
2125  #if defined(_WIN32)
2126  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2127  #else
2128  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2129  #endif
2130 #endif
2131 
2132 #ifndef VMA_MIN
2133  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2134 #endif
2135 
2136 #ifndef VMA_MAX
2137  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2138 #endif
2139 
2140 #ifndef VMA_SWAP
2141  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2142 #endif
2143 
2144 #ifndef VMA_SORT
2145  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2146 #endif
2147 
2148 #ifndef VMA_DEBUG_LOG
2149  #define VMA_DEBUG_LOG(format, ...)
2150  /*
2151  #define VMA_DEBUG_LOG(format, ...) do { \
2152  printf(format, __VA_ARGS__); \
2153  printf("\n"); \
2154  } while(false)
2155  */
2156 #endif
2157 
2158 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2159 #if VMA_STATS_STRING_ENABLED
2160  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2161  {
2162  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2163  }
2164  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2165  {
2166  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2167  }
2168  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2169  {
2170  snprintf(outStr, strLen, "%p", ptr);
2171  }
2172 #endif
2173 
2174 #ifndef VMA_MUTEX
2175  class VmaMutex
2176  {
2177  public:
2178  VmaMutex() { }
2179  ~VmaMutex() { }
2180  void Lock() { m_Mutex.lock(); }
2181  void Unlock() { m_Mutex.unlock(); }
2182  private:
2183  std::mutex m_Mutex;
2184  };
2185  #define VMA_MUTEX VmaMutex
2186 #endif
2187 
2188 /*
2189 If providing your own implementation, you need to implement a subset of std::atomic:
2190 
2191 - Constructor(uint32_t desired)
2192 - uint32_t load() const
2193 - void store(uint32_t desired)
2194 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2195 */
2196 #ifndef VMA_ATOMIC_UINT32
2197  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2198 #endif
2199 
2200 #ifndef VMA_BEST_FIT
2201 
2213  #define VMA_BEST_FIT (1)
2214 #endif
2215 
2216 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2217 
2221  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2222 #endif
2223 
2224 #ifndef VMA_DEBUG_ALIGNMENT
2225 
2229  #define VMA_DEBUG_ALIGNMENT (1)
2230 #endif
2231 
2232 #ifndef VMA_DEBUG_MARGIN
2233 
2237  #define VMA_DEBUG_MARGIN (0)
2238 #endif
2239 
2240 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2241 
2245  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2246 #endif
2247 
2248 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2249 
2253  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2254 #endif
2255 
2256 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2257  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2259 #endif
2260 
2261 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2262  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2264 #endif
2265 
2266 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2267 
2268 /*******************************************************************************
2269 END OF CONFIGURATION
2270 */
2271 
2272 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2273  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2274 
2275 // Returns number of bits set to 1 in (v).
2276 static inline uint32_t VmaCountBitsSet(uint32_t v)
2277 {
2278  uint32_t c = v - ((v >> 1) & 0x55555555);
2279  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2280  c = ((c >> 4) + c) & 0x0F0F0F0F;
2281  c = ((c >> 8) + c) & 0x00FF00FF;
2282  c = ((c >> 16) + c) & 0x0000FFFF;
2283  return c;
2284 }
2285 
2286 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2287 // Use types like uint32_t, uint64_t as T.
2288 template <typename T>
2289 static inline T VmaAlignUp(T val, T align)
2290 {
2291  return (val + align - 1) / align * align;
2292 }
2293 
2294 // Division with mathematical rounding to nearest number.
2295 template <typename T>
2296 inline T VmaRoundDiv(T x, T y)
2297 {
2298  return (x + (y / (T)2)) / y;
2299 }
2300 
2301 #ifndef VMA_SORT
2302 
2303 template<typename Iterator, typename Compare>
2304 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
2305 {
2306  Iterator centerValue = end; --centerValue;
2307  Iterator insertIndex = beg;
2308  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
2309  {
2310  if(cmp(*memTypeIndex, *centerValue))
2311  {
2312  if(insertIndex != memTypeIndex)
2313  {
2314  VMA_SWAP(*memTypeIndex, *insertIndex);
2315  }
2316  ++insertIndex;
2317  }
2318  }
2319  if(insertIndex != centerValue)
2320  {
2321  VMA_SWAP(*insertIndex, *centerValue);
2322  }
2323  return insertIndex;
2324 }
2325 
2326 template<typename Iterator, typename Compare>
2327 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
2328 {
2329  if(beg < end)
2330  {
2331  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
2332  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
2333  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
2334  }
2335 }
2336 
2337 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
2338 
2339 #endif // #ifndef VMA_SORT
2340 
2341 /*
2342 Returns true if two memory blocks occupy overlapping pages.
2343 ResourceA must be in less memory offset than ResourceB.
2344 
2345 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
2346 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
2347 */
2348 static inline bool VmaBlocksOnSamePage(
2349  VkDeviceSize resourceAOffset,
2350  VkDeviceSize resourceASize,
2351  VkDeviceSize resourceBOffset,
2352  VkDeviceSize pageSize)
2353 {
2354  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
2355  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
2356  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
2357  VkDeviceSize resourceBStart = resourceBOffset;
2358  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
2359  return resourceAEndPage == resourceBStartPage;
2360 }
2361 
2362 enum VmaSuballocationType
2363 {
2364  VMA_SUBALLOCATION_TYPE_FREE = 0,
2365  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
2366  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
2367  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
2368  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
2369  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
2370  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
2371 };
2372 
2373 /*
2374 Returns true if given suballocation types could conflict and must respect
2375 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
2376 or linear image and another one is optimal image. If type is unknown, behave
2377 conservatively.
2378 */
2379 static inline bool VmaIsBufferImageGranularityConflict(
2380  VmaSuballocationType suballocType1,
2381  VmaSuballocationType suballocType2)
2382 {
2383  if(suballocType1 > suballocType2)
2384  {
2385  VMA_SWAP(suballocType1, suballocType2);
2386  }
2387 
2388  switch(suballocType1)
2389  {
2390  case VMA_SUBALLOCATION_TYPE_FREE:
2391  return false;
2392  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
2393  return true;
2394  case VMA_SUBALLOCATION_TYPE_BUFFER:
2395  return
2396  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2397  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2398  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
2399  return
2400  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2401  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
2402  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2403  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
2404  return
2405  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2406  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
2407  return false;
2408  default:
2409  VMA_ASSERT(0);
2410  return true;
2411  }
2412 }
2413 
2414 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
2415 struct VmaMutexLock
2416 {
2417 public:
2418  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
2419  m_pMutex(useMutex ? &mutex : VMA_NULL)
2420  {
2421  if(m_pMutex)
2422  {
2423  m_pMutex->Lock();
2424  }
2425  }
2426 
2427  ~VmaMutexLock()
2428  {
2429  if(m_pMutex)
2430  {
2431  m_pMutex->Unlock();
2432  }
2433  }
2434 
2435 private:
2436  VMA_MUTEX* m_pMutex;
2437 };
2438 
2439 #if VMA_DEBUG_GLOBAL_MUTEX
2440  static VMA_MUTEX gDebugGlobalMutex;
2441  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
2442 #else
2443  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
2444 #endif
2445 
2446 // Minimum size of a free suballocation to register it in the free suballocation collection.
2447 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
2448 
2449 /*
2450 Performs binary search and returns iterator to first element that is greater or
2451 equal to (key), according to comparison (cmp).
2452 
2453 Cmp should return true if first argument is less than second argument.
2454 
2455 Returned value is the found element, if present in the collection or place where
2456 new element with value (key) should be inserted.
2457 */
2458 template <typename IterT, typename KeyT, typename CmpT>
2459 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
2460 {
2461  size_t down = 0, up = (end - beg);
2462  while(down < up)
2463  {
2464  const size_t mid = (down + up) / 2;
2465  if(cmp(*(beg+mid), key))
2466  {
2467  down = mid + 1;
2468  }
2469  else
2470  {
2471  up = mid;
2472  }
2473  }
2474  return beg + down;
2475 }
2476 
2478 // Memory allocation
2479 
2480 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
2481 {
2482  if((pAllocationCallbacks != VMA_NULL) &&
2483  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
2484  {
2485  return (*pAllocationCallbacks->pfnAllocation)(
2486  pAllocationCallbacks->pUserData,
2487  size,
2488  alignment,
2489  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2490  }
2491  else
2492  {
2493  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
2494  }
2495 }
2496 
2497 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
2498 {
2499  if((pAllocationCallbacks != VMA_NULL) &&
2500  (pAllocationCallbacks->pfnFree != VMA_NULL))
2501  {
2502  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
2503  }
2504  else
2505  {
2506  VMA_SYSTEM_FREE(ptr);
2507  }
2508 }
2509 
2510 template<typename T>
2511 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
2512 {
2513  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
2514 }
2515 
2516 template<typename T>
2517 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
2518 {
2519  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
2520 }
2521 
2522 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2523 
2524 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2525 
2526 template<typename T>
2527 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2528 {
2529  ptr->~T();
2530  VmaFree(pAllocationCallbacks, ptr);
2531 }
2532 
2533 template<typename T>
2534 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2535 {
2536  if(ptr != VMA_NULL)
2537  {
2538  for(size_t i = count; i--; )
2539  {
2540  ptr[i].~T();
2541  }
2542  VmaFree(pAllocationCallbacks, ptr);
2543  }
2544 }
2545 
2546 // STL-compatible allocator.
2547 template<typename T>
2548 class VmaStlAllocator
2549 {
2550 public:
2551  const VkAllocationCallbacks* const m_pCallbacks;
2552  typedef T value_type;
2553 
2554  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2555  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2556 
2557  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2558  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2559 
2560  template<typename U>
2561  bool operator==(const VmaStlAllocator<U>& rhs) const
2562  {
2563  return m_pCallbacks == rhs.m_pCallbacks;
2564  }
2565  template<typename U>
2566  bool operator!=(const VmaStlAllocator<U>& rhs) const
2567  {
2568  return m_pCallbacks != rhs.m_pCallbacks;
2569  }
2570 
2571  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2572 };
2573 
2574 #if VMA_USE_STL_VECTOR
2575 
2576 #define VmaVector std::vector
2577 
2578 template<typename T, typename allocatorT>
2579 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2580 {
2581  vec.insert(vec.begin() + index, item);
2582 }
2583 
2584 template<typename T, typename allocatorT>
2585 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2586 {
2587  vec.erase(vec.begin() + index);
2588 }
2589 
2590 #else // #if VMA_USE_STL_VECTOR
2591 
2592 /* Class with interface compatible with subset of std::vector.
2593 T must be POD because constructors and destructors are not called and memcpy is
2594 used for these objects. */
2595 template<typename T, typename AllocatorT>
2596 class VmaVector
2597 {
2598 public:
2599  typedef T value_type;
2600 
2601  VmaVector(const AllocatorT& allocator) :
2602  m_Allocator(allocator),
2603  m_pArray(VMA_NULL),
2604  m_Count(0),
2605  m_Capacity(0)
2606  {
2607  }
2608 
2609  VmaVector(size_t count, const AllocatorT& allocator) :
2610  m_Allocator(allocator),
2611  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2612  m_Count(count),
2613  m_Capacity(count)
2614  {
2615  }
2616 
2617  VmaVector(const VmaVector<T, AllocatorT>& src) :
2618  m_Allocator(src.m_Allocator),
2619  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2620  m_Count(src.m_Count),
2621  m_Capacity(src.m_Count)
2622  {
2623  if(m_Count != 0)
2624  {
2625  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2626  }
2627  }
2628 
2629  ~VmaVector()
2630  {
2631  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2632  }
2633 
2634  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2635  {
2636  if(&rhs != this)
2637  {
2638  resize(rhs.m_Count);
2639  if(m_Count != 0)
2640  {
2641  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2642  }
2643  }
2644  return *this;
2645  }
2646 
2647  bool empty() const { return m_Count == 0; }
2648  size_t size() const { return m_Count; }
2649  T* data() { return m_pArray; }
2650  const T* data() const { return m_pArray; }
2651 
2652  T& operator[](size_t index)
2653  {
2654  VMA_HEAVY_ASSERT(index < m_Count);
2655  return m_pArray[index];
2656  }
2657  const T& operator[](size_t index) const
2658  {
2659  VMA_HEAVY_ASSERT(index < m_Count);
2660  return m_pArray[index];
2661  }
2662 
2663  T& front()
2664  {
2665  VMA_HEAVY_ASSERT(m_Count > 0);
2666  return m_pArray[0];
2667  }
2668  const T& front() const
2669  {
2670  VMA_HEAVY_ASSERT(m_Count > 0);
2671  return m_pArray[0];
2672  }
2673  T& back()
2674  {
2675  VMA_HEAVY_ASSERT(m_Count > 0);
2676  return m_pArray[m_Count - 1];
2677  }
2678  const T& back() const
2679  {
2680  VMA_HEAVY_ASSERT(m_Count > 0);
2681  return m_pArray[m_Count - 1];
2682  }
2683 
2684  void reserve(size_t newCapacity, bool freeMemory = false)
2685  {
2686  newCapacity = VMA_MAX(newCapacity, m_Count);
2687 
2688  if((newCapacity < m_Capacity) && !freeMemory)
2689  {
2690  newCapacity = m_Capacity;
2691  }
2692 
2693  if(newCapacity != m_Capacity)
2694  {
2695  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2696  if(m_Count != 0)
2697  {
2698  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2699  }
2700  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2701  m_Capacity = newCapacity;
2702  m_pArray = newArray;
2703  }
2704  }
2705 
2706  void resize(size_t newCount, bool freeMemory = false)
2707  {
2708  size_t newCapacity = m_Capacity;
2709  if(newCount > m_Capacity)
2710  {
2711  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2712  }
2713  else if(freeMemory)
2714  {
2715  newCapacity = newCount;
2716  }
2717 
2718  if(newCapacity != m_Capacity)
2719  {
2720  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2721  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2722  if(elementsToCopy != 0)
2723  {
2724  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2725  }
2726  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2727  m_Capacity = newCapacity;
2728  m_pArray = newArray;
2729  }
2730 
2731  m_Count = newCount;
2732  }
2733 
2734  void clear(bool freeMemory = false)
2735  {
2736  resize(0, freeMemory);
2737  }
2738 
2739  void insert(size_t index, const T& src)
2740  {
2741  VMA_HEAVY_ASSERT(index <= m_Count);
2742  const size_t oldCount = size();
2743  resize(oldCount + 1);
2744  if(index < oldCount)
2745  {
2746  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2747  }
2748  m_pArray[index] = src;
2749  }
2750 
2751  void remove(size_t index)
2752  {
2753  VMA_HEAVY_ASSERT(index < m_Count);
2754  const size_t oldCount = size();
2755  if(index < oldCount - 1)
2756  {
2757  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2758  }
2759  resize(oldCount - 1);
2760  }
2761 
2762  void push_back(const T& src)
2763  {
2764  const size_t newIndex = size();
2765  resize(newIndex + 1);
2766  m_pArray[newIndex] = src;
2767  }
2768 
2769  void pop_back()
2770  {
2771  VMA_HEAVY_ASSERT(m_Count > 0);
2772  resize(size() - 1);
2773  }
2774 
2775  void push_front(const T& src)
2776  {
2777  insert(0, src);
2778  }
2779 
2780  void pop_front()
2781  {
2782  VMA_HEAVY_ASSERT(m_Count > 0);
2783  remove(0);
2784  }
2785 
2786  typedef T* iterator;
2787 
2788  iterator begin() { return m_pArray; }
2789  iterator end() { return m_pArray + m_Count; }
2790 
2791 private:
2792  AllocatorT m_Allocator;
2793  T* m_pArray;
2794  size_t m_Count;
2795  size_t m_Capacity;
2796 };
2797 
2798 template<typename T, typename allocatorT>
2799 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2800 {
2801  vec.insert(index, item);
2802 }
2803 
2804 template<typename T, typename allocatorT>
2805 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2806 {
2807  vec.remove(index);
2808 }
2809 
2810 #endif // #if VMA_USE_STL_VECTOR
2811 
2812 template<typename CmpLess, typename VectorT>
2813 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2814 {
2815  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2816  vector.data(),
2817  vector.data() + vector.size(),
2818  value,
2819  CmpLess()) - vector.data();
2820  VmaVectorInsert(vector, indexToInsert, value);
2821  return indexToInsert;
2822 }
2823 
2824 template<typename CmpLess, typename VectorT>
2825 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2826 {
2827  CmpLess comparator;
2828  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2829  vector.begin(),
2830  vector.end(),
2831  value,
2832  comparator);
2833  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2834  {
2835  size_t indexToRemove = it - vector.begin();
2836  VmaVectorRemove(vector, indexToRemove);
2837  return true;
2838  }
2839  return false;
2840 }
2841 
2842 template<typename CmpLess, typename VectorT>
2843 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2844 {
2845  CmpLess comparator;
2846  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2847  vector.data(),
2848  vector.data() + vector.size(),
2849  value,
2850  comparator);
2851  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2852  {
2853  return it - vector.begin();
2854  }
2855  else
2856  {
2857  return vector.size();
2858  }
2859 }
2860 
2862 // class VmaPoolAllocator
2863 
2864 /*
2865 Allocator for objects of type T using a list of arrays (pools) to speed up
2866 allocation. Number of elements that can be allocated is not bounded because
2867 allocator can create multiple blocks.
2868 */
2869 template<typename T>
2870 class VmaPoolAllocator
2871 {
2872 public:
2873  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2874  ~VmaPoolAllocator();
2875  void Clear();
2876  T* Alloc();
2877  void Free(T* ptr);
2878 
2879 private:
2880  union Item
2881  {
2882  uint32_t NextFreeIndex;
2883  T Value;
2884  };
2885 
2886  struct ItemBlock
2887  {
2888  Item* pItems;
2889  uint32_t FirstFreeIndex;
2890  };
2891 
2892  const VkAllocationCallbacks* m_pAllocationCallbacks;
2893  size_t m_ItemsPerBlock;
2894  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2895 
2896  ItemBlock& CreateNewBlock();
2897 };
2898 
2899 template<typename T>
2900 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2901  m_pAllocationCallbacks(pAllocationCallbacks),
2902  m_ItemsPerBlock(itemsPerBlock),
2903  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2904 {
2905  VMA_ASSERT(itemsPerBlock > 0);
2906 }
2907 
2908 template<typename T>
2909 VmaPoolAllocator<T>::~VmaPoolAllocator()
2910 {
2911  Clear();
2912 }
2913 
2914 template<typename T>
2915 void VmaPoolAllocator<T>::Clear()
2916 {
2917  for(size_t i = m_ItemBlocks.size(); i--; )
2918  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2919  m_ItemBlocks.clear();
2920 }
2921 
2922 template<typename T>
2923 T* VmaPoolAllocator<T>::Alloc()
2924 {
2925  for(size_t i = m_ItemBlocks.size(); i--; )
2926  {
2927  ItemBlock& block = m_ItemBlocks[i];
2928  // This block has some free items: Use first one.
2929  if(block.FirstFreeIndex != UINT32_MAX)
2930  {
2931  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2932  block.FirstFreeIndex = pItem->NextFreeIndex;
2933  return &pItem->Value;
2934  }
2935  }
2936 
2937  // No block has free item: Create new one and use it.
2938  ItemBlock& newBlock = CreateNewBlock();
2939  Item* const pItem = &newBlock.pItems[0];
2940  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2941  return &pItem->Value;
2942 }
2943 
2944 template<typename T>
2945 void VmaPoolAllocator<T>::Free(T* ptr)
2946 {
2947  // Search all memory blocks to find ptr.
2948  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2949  {
2950  ItemBlock& block = m_ItemBlocks[i];
2951 
2952  // Casting to union.
2953  Item* pItemPtr;
2954  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2955 
2956  // Check if pItemPtr is in address range of this block.
2957  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2958  {
2959  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2960  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2961  block.FirstFreeIndex = index;
2962  return;
2963  }
2964  }
2965  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2966 }
2967 
2968 template<typename T>
2969 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2970 {
2971  ItemBlock newBlock = {
2972  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2973 
2974  m_ItemBlocks.push_back(newBlock);
2975 
2976  // Setup singly-linked list of all free items in this block.
2977  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2978  newBlock.pItems[i].NextFreeIndex = i + 1;
2979  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2980  return m_ItemBlocks.back();
2981 }
2982 
2984 // class VmaRawList, VmaList
2985 
2986 #if VMA_USE_STL_LIST
2987 
2988 #define VmaList std::list
2989 
2990 #else // #if VMA_USE_STL_LIST
2991 
2992 template<typename T>
2993 struct VmaListItem
2994 {
2995  VmaListItem* pPrev;
2996  VmaListItem* pNext;
2997  T Value;
2998 };
2999 
3000 // Doubly linked list.
3001 template<typename T>
3002 class VmaRawList
3003 {
3004 public:
3005  typedef VmaListItem<T> ItemType;
3006 
3007  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3008  ~VmaRawList();
3009  void Clear();
3010 
3011  size_t GetCount() const { return m_Count; }
3012  bool IsEmpty() const { return m_Count == 0; }
3013 
3014  ItemType* Front() { return m_pFront; }
3015  const ItemType* Front() const { return m_pFront; }
3016  ItemType* Back() { return m_pBack; }
3017  const ItemType* Back() const { return m_pBack; }
3018 
3019  ItemType* PushBack();
3020  ItemType* PushFront();
3021  ItemType* PushBack(const T& value);
3022  ItemType* PushFront(const T& value);
3023  void PopBack();
3024  void PopFront();
3025 
3026  // Item can be null - it means PushBack.
3027  ItemType* InsertBefore(ItemType* pItem);
3028  // Item can be null - it means PushFront.
3029  ItemType* InsertAfter(ItemType* pItem);
3030 
3031  ItemType* InsertBefore(ItemType* pItem, const T& value);
3032  ItemType* InsertAfter(ItemType* pItem, const T& value);
3033 
3034  void Remove(ItemType* pItem);
3035 
3036 private:
3037  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3038  VmaPoolAllocator<ItemType> m_ItemAllocator;
3039  ItemType* m_pFront;
3040  ItemType* m_pBack;
3041  size_t m_Count;
3042 
3043  // Declared not defined, to block copy constructor and assignment operator.
3044  VmaRawList(const VmaRawList<T>& src);
3045  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
3046 };
3047 
3048 template<typename T>
3049 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3050  m_pAllocationCallbacks(pAllocationCallbacks),
3051  m_ItemAllocator(pAllocationCallbacks, 128),
3052  m_pFront(VMA_NULL),
3053  m_pBack(VMA_NULL),
3054  m_Count(0)
3055 {
3056 }
3057 
3058 template<typename T>
3059 VmaRawList<T>::~VmaRawList()
3060 {
3061  // Intentionally not calling Clear, because that would be unnecessary
3062  // computations to return all items to m_ItemAllocator as free.
3063 }
3064 
3065 template<typename T>
3066 void VmaRawList<T>::Clear()
3067 {
3068  if(IsEmpty() == false)
3069  {
3070  ItemType* pItem = m_pBack;
3071  while(pItem != VMA_NULL)
3072  {
3073  ItemType* const pPrevItem = pItem->pPrev;
3074  m_ItemAllocator.Free(pItem);
3075  pItem = pPrevItem;
3076  }
3077  m_pFront = VMA_NULL;
3078  m_pBack = VMA_NULL;
3079  m_Count = 0;
3080  }
3081 }
3082 
3083 template<typename T>
3084 VmaListItem<T>* VmaRawList<T>::PushBack()
3085 {
3086  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3087  pNewItem->pNext = VMA_NULL;
3088  if(IsEmpty())
3089  {
3090  pNewItem->pPrev = VMA_NULL;
3091  m_pFront = pNewItem;
3092  m_pBack = pNewItem;
3093  m_Count = 1;
3094  }
3095  else
3096  {
3097  pNewItem->pPrev = m_pBack;
3098  m_pBack->pNext = pNewItem;
3099  m_pBack = pNewItem;
3100  ++m_Count;
3101  }
3102  return pNewItem;
3103 }
3104 
3105 template<typename T>
3106 VmaListItem<T>* VmaRawList<T>::PushFront()
3107 {
3108  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3109  pNewItem->pPrev = VMA_NULL;
3110  if(IsEmpty())
3111  {
3112  pNewItem->pNext = VMA_NULL;
3113  m_pFront = pNewItem;
3114  m_pBack = pNewItem;
3115  m_Count = 1;
3116  }
3117  else
3118  {
3119  pNewItem->pNext = m_pFront;
3120  m_pFront->pPrev = pNewItem;
3121  m_pFront = pNewItem;
3122  ++m_Count;
3123  }
3124  return pNewItem;
3125 }
3126 
3127 template<typename T>
3128 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3129 {
3130  ItemType* const pNewItem = PushBack();
3131  pNewItem->Value = value;
3132  return pNewItem;
3133 }
3134 
3135 template<typename T>
3136 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3137 {
3138  ItemType* const pNewItem = PushFront();
3139  pNewItem->Value = value;
3140  return pNewItem;
3141 }
3142 
3143 template<typename T>
3144 void VmaRawList<T>::PopBack()
3145 {
3146  VMA_HEAVY_ASSERT(m_Count > 0);
3147  ItemType* const pBackItem = m_pBack;
3148  ItemType* const pPrevItem = pBackItem->pPrev;
3149  if(pPrevItem != VMA_NULL)
3150  {
3151  pPrevItem->pNext = VMA_NULL;
3152  }
3153  m_pBack = pPrevItem;
3154  m_ItemAllocator.Free(pBackItem);
3155  --m_Count;
3156 }
3157 
3158 template<typename T>
3159 void VmaRawList<T>::PopFront()
3160 {
3161  VMA_HEAVY_ASSERT(m_Count > 0);
3162  ItemType* const pFrontItem = m_pFront;
3163  ItemType* const pNextItem = pFrontItem->pNext;
3164  if(pNextItem != VMA_NULL)
3165  {
3166  pNextItem->pPrev = VMA_NULL;
3167  }
3168  m_pFront = pNextItem;
3169  m_ItemAllocator.Free(pFrontItem);
3170  --m_Count;
3171 }
3172 
3173 template<typename T>
3174 void VmaRawList<T>::Remove(ItemType* pItem)
3175 {
3176  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
3177  VMA_HEAVY_ASSERT(m_Count > 0);
3178 
3179  if(pItem->pPrev != VMA_NULL)
3180  {
3181  pItem->pPrev->pNext = pItem->pNext;
3182  }
3183  else
3184  {
3185  VMA_HEAVY_ASSERT(m_pFront == pItem);
3186  m_pFront = pItem->pNext;
3187  }
3188 
3189  if(pItem->pNext != VMA_NULL)
3190  {
3191  pItem->pNext->pPrev = pItem->pPrev;
3192  }
3193  else
3194  {
3195  VMA_HEAVY_ASSERT(m_pBack == pItem);
3196  m_pBack = pItem->pPrev;
3197  }
3198 
3199  m_ItemAllocator.Free(pItem);
3200  --m_Count;
3201 }
3202 
3203 template<typename T>
3204 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
3205 {
3206  if(pItem != VMA_NULL)
3207  {
3208  ItemType* const prevItem = pItem->pPrev;
3209  ItemType* const newItem = m_ItemAllocator.Alloc();
3210  newItem->pPrev = prevItem;
3211  newItem->pNext = pItem;
3212  pItem->pPrev = newItem;
3213  if(prevItem != VMA_NULL)
3214  {
3215  prevItem->pNext = newItem;
3216  }
3217  else
3218  {
3219  VMA_HEAVY_ASSERT(m_pFront == pItem);
3220  m_pFront = newItem;
3221  }
3222  ++m_Count;
3223  return newItem;
3224  }
3225  else
3226  return PushBack();
3227 }
3228 
3229 template<typename T>
3230 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3231 {
3232  if(pItem != VMA_NULL)
3233  {
3234  ItemType* const nextItem = pItem->pNext;
3235  ItemType* const newItem = m_ItemAllocator.Alloc();
3236  newItem->pNext = nextItem;
3237  newItem->pPrev = pItem;
3238  pItem->pNext = newItem;
3239  if(nextItem != VMA_NULL)
3240  {
3241  nextItem->pPrev = newItem;
3242  }
3243  else
3244  {
3245  VMA_HEAVY_ASSERT(m_pBack == pItem);
3246  m_pBack = newItem;
3247  }
3248  ++m_Count;
3249  return newItem;
3250  }
3251  else
3252  return PushFront();
3253 }
3254 
3255 template<typename T>
3256 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
3257 {
3258  ItemType* const newItem = InsertBefore(pItem);
3259  newItem->Value = value;
3260  return newItem;
3261 }
3262 
3263 template<typename T>
3264 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
3265 {
3266  ItemType* const newItem = InsertAfter(pItem);
3267  newItem->Value = value;
3268  return newItem;
3269 }
3270 
3271 template<typename T, typename AllocatorT>
3272 class VmaList
3273 {
3274 public:
3275  class iterator
3276  {
3277  public:
3278  iterator() :
3279  m_pList(VMA_NULL),
3280  m_pItem(VMA_NULL)
3281  {
3282  }
3283 
3284  T& operator*() const
3285  {
3286  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3287  return m_pItem->Value;
3288  }
3289  T* operator->() const
3290  {
3291  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3292  return &m_pItem->Value;
3293  }
3294 
3295  iterator& operator++()
3296  {
3297  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3298  m_pItem = m_pItem->pNext;
3299  return *this;
3300  }
3301  iterator& operator--()
3302  {
3303  if(m_pItem != VMA_NULL)
3304  {
3305  m_pItem = m_pItem->pPrev;
3306  }
3307  else
3308  {
3309  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3310  m_pItem = m_pList->Back();
3311  }
3312  return *this;
3313  }
3314 
3315  iterator operator++(int)
3316  {
3317  iterator result = *this;
3318  ++*this;
3319  return result;
3320  }
3321  iterator operator--(int)
3322  {
3323  iterator result = *this;
3324  --*this;
3325  return result;
3326  }
3327 
3328  bool operator==(const iterator& rhs) const
3329  {
3330  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3331  return m_pItem == rhs.m_pItem;
3332  }
3333  bool operator!=(const iterator& rhs) const
3334  {
3335  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3336  return m_pItem != rhs.m_pItem;
3337  }
3338 
3339  private:
3340  VmaRawList<T>* m_pList;
3341  VmaListItem<T>* m_pItem;
3342 
3343  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
3344  m_pList(pList),
3345  m_pItem(pItem)
3346  {
3347  }
3348 
3349  friend class VmaList<T, AllocatorT>;
3350  };
3351 
3352  class const_iterator
3353  {
3354  public:
3355  const_iterator() :
3356  m_pList(VMA_NULL),
3357  m_pItem(VMA_NULL)
3358  {
3359  }
3360 
3361  const_iterator(const iterator& src) :
3362  m_pList(src.m_pList),
3363  m_pItem(src.m_pItem)
3364  {
3365  }
3366 
3367  const T& operator*() const
3368  {
3369  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3370  return m_pItem->Value;
3371  }
3372  const T* operator->() const
3373  {
3374  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3375  return &m_pItem->Value;
3376  }
3377 
3378  const_iterator& operator++()
3379  {
3380  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3381  m_pItem = m_pItem->pNext;
3382  return *this;
3383  }
3384  const_iterator& operator--()
3385  {
3386  if(m_pItem != VMA_NULL)
3387  {
3388  m_pItem = m_pItem->pPrev;
3389  }
3390  else
3391  {
3392  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3393  m_pItem = m_pList->Back();
3394  }
3395  return *this;
3396  }
3397 
3398  const_iterator operator++(int)
3399  {
3400  const_iterator result = *this;
3401  ++*this;
3402  return result;
3403  }
3404  const_iterator operator--(int)
3405  {
3406  const_iterator result = *this;
3407  --*this;
3408  return result;
3409  }
3410 
3411  bool operator==(const const_iterator& rhs) const
3412  {
3413  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3414  return m_pItem == rhs.m_pItem;
3415  }
3416  bool operator!=(const const_iterator& rhs) const
3417  {
3418  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3419  return m_pItem != rhs.m_pItem;
3420  }
3421 
3422  private:
3423  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
3424  m_pList(pList),
3425  m_pItem(pItem)
3426  {
3427  }
3428 
3429  const VmaRawList<T>* m_pList;
3430  const VmaListItem<T>* m_pItem;
3431 
3432  friend class VmaList<T, AllocatorT>;
3433  };
3434 
3435  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
3436 
3437  bool empty() const { return m_RawList.IsEmpty(); }
3438  size_t size() const { return m_RawList.GetCount(); }
3439 
3440  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
3441  iterator end() { return iterator(&m_RawList, VMA_NULL); }
3442 
3443  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
3444  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
3445 
3446  void clear() { m_RawList.Clear(); }
3447  void push_back(const T& value) { m_RawList.PushBack(value); }
3448  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
3449  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
3450 
3451 private:
3452  VmaRawList<T> m_RawList;
3453 };
3454 
3455 #endif // #if VMA_USE_STL_LIST
3456 
3458 // class VmaMap
3459 
3460 // Unused in this version.
3461 #if 0
3462 
3463 #if VMA_USE_STL_UNORDERED_MAP
3464 
3465 #define VmaPair std::pair
3466 
3467 #define VMA_MAP_TYPE(KeyT, ValueT) \
3468  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
3469 
3470 #else // #if VMA_USE_STL_UNORDERED_MAP
3471 
3472 template<typename T1, typename T2>
3473 struct VmaPair
3474 {
3475  T1 first;
3476  T2 second;
3477 
3478  VmaPair() : first(), second() { }
3479  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
3480 };
3481 
3482 /* Class compatible with subset of interface of std::unordered_map.
3483 KeyT, ValueT must be POD because they will be stored in VmaVector.
3484 */
3485 template<typename KeyT, typename ValueT>
3486 class VmaMap
3487 {
3488 public:
3489  typedef VmaPair<KeyT, ValueT> PairType;
3490  typedef PairType* iterator;
3491 
3492  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
3493 
3494  iterator begin() { return m_Vector.begin(); }
3495  iterator end() { return m_Vector.end(); }
3496 
3497  void insert(const PairType& pair);
3498  iterator find(const KeyT& key);
3499  void erase(iterator it);
3500 
3501 private:
3502  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
3503 };
3504 
3505 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
3506 
3507 template<typename FirstT, typename SecondT>
3508 struct VmaPairFirstLess
3509 {
3510  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
3511  {
3512  return lhs.first < rhs.first;
3513  }
3514  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
3515  {
3516  return lhs.first < rhsFirst;
3517  }
3518 };
3519 
3520 template<typename KeyT, typename ValueT>
3521 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
3522 {
3523  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3524  m_Vector.data(),
3525  m_Vector.data() + m_Vector.size(),
3526  pair,
3527  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3528  VmaVectorInsert(m_Vector, indexToInsert, pair);
3529 }
3530 
3531 template<typename KeyT, typename ValueT>
3532 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3533 {
3534  PairType* it = VmaBinaryFindFirstNotLess(
3535  m_Vector.data(),
3536  m_Vector.data() + m_Vector.size(),
3537  key,
3538  VmaPairFirstLess<KeyT, ValueT>());
3539  if((it != m_Vector.end()) && (it->first == key))
3540  {
3541  return it;
3542  }
3543  else
3544  {
3545  return m_Vector.end();
3546  }
3547 }
3548 
3549 template<typename KeyT, typename ValueT>
3550 void VmaMap<KeyT, ValueT>::erase(iterator it)
3551 {
3552  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3553 }
3554 
3555 #endif // #if VMA_USE_STL_UNORDERED_MAP
3556 
3557 #endif // #if 0
3558 
3560 
3561 class VmaDeviceMemoryBlock;
3562 
3563 struct VmaAllocation_T
3564 {
3565 private:
3566  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3567 
3568  enum FLAGS
3569  {
3570  FLAG_USER_DATA_STRING = 0x01,
3571  };
3572 
3573 public:
3574  enum ALLOCATION_TYPE
3575  {
3576  ALLOCATION_TYPE_NONE,
3577  ALLOCATION_TYPE_BLOCK,
3578  ALLOCATION_TYPE_DEDICATED,
3579  };
3580 
3581  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3582  m_Alignment(1),
3583  m_Size(0),
3584  m_pUserData(VMA_NULL),
3585  m_LastUseFrameIndex(currentFrameIndex),
3586  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3587  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3588  m_MapCount(0),
3589  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3590  {
3591  }
3592 
3593  ~VmaAllocation_T()
3594  {
3595  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3596 
3597  // Check if owned string was freed.
3598  VMA_ASSERT(m_pUserData == VMA_NULL);
3599  }
3600 
3601  void InitBlockAllocation(
3602  VmaPool hPool,
3603  VmaDeviceMemoryBlock* block,
3604  VkDeviceSize offset,
3605  VkDeviceSize alignment,
3606  VkDeviceSize size,
3607  VmaSuballocationType suballocationType,
3608  bool mapped,
3609  bool canBecomeLost)
3610  {
3611  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3612  VMA_ASSERT(block != VMA_NULL);
3613  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3614  m_Alignment = alignment;
3615  m_Size = size;
3616  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3617  m_SuballocationType = (uint8_t)suballocationType;
3618  m_BlockAllocation.m_hPool = hPool;
3619  m_BlockAllocation.m_Block = block;
3620  m_BlockAllocation.m_Offset = offset;
3621  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3622  }
3623 
3624  void InitLost()
3625  {
3626  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3627  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3628  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3629  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3630  m_BlockAllocation.m_Block = VMA_NULL;
3631  m_BlockAllocation.m_Offset = 0;
3632  m_BlockAllocation.m_CanBecomeLost = true;
3633  }
3634 
3635  void ChangeBlockAllocation(
3636  VmaAllocator hAllocator,
3637  VmaDeviceMemoryBlock* block,
3638  VkDeviceSize offset);
3639 
3640  // pMappedData not null means allocation is created with MAPPED flag.
3641  void InitDedicatedAllocation(
3642  uint32_t memoryTypeIndex,
3643  VkDeviceMemory hMemory,
3644  VmaSuballocationType suballocationType,
3645  void* pMappedData,
3646  VkDeviceSize size)
3647  {
3648  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3649  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3650  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3651  m_Alignment = 0;
3652  m_Size = size;
3653  m_SuballocationType = (uint8_t)suballocationType;
3654  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3655  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3656  m_DedicatedAllocation.m_hMemory = hMemory;
3657  m_DedicatedAllocation.m_pMappedData = pMappedData;
3658  }
3659 
3660  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3661  VkDeviceSize GetAlignment() const { return m_Alignment; }
3662  VkDeviceSize GetSize() const { return m_Size; }
3663  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3664  void* GetUserData() const { return m_pUserData; }
3665  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3666  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3667 
3668  VmaDeviceMemoryBlock* GetBlock() const
3669  {
3670  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3671  return m_BlockAllocation.m_Block;
3672  }
3673  VkDeviceSize GetOffset() const;
3674  VkDeviceMemory GetMemory() const;
3675  uint32_t GetMemoryTypeIndex() const;
3676  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3677  void* GetMappedData() const;
3678  bool CanBecomeLost() const;
3679  VmaPool GetPool() const;
3680 
3681  uint32_t GetLastUseFrameIndex() const
3682  {
3683  return m_LastUseFrameIndex.load();
3684  }
3685  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3686  {
3687  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3688  }
3689  /*
3690  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3691  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3692  - Else, returns false.
3693 
3694  If hAllocation is already lost, assert - you should not call it then.
3695  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3696  */
3697  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3698 
3699  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3700  {
3701  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3702  outInfo.blockCount = 1;
3703  outInfo.allocationCount = 1;
3704  outInfo.unusedRangeCount = 0;
3705  outInfo.usedBytes = m_Size;
3706  outInfo.unusedBytes = 0;
3707  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3708  outInfo.unusedRangeSizeMin = UINT64_MAX;
3709  outInfo.unusedRangeSizeMax = 0;
3710  }
3711 
3712  void BlockAllocMap();
3713  void BlockAllocUnmap();
3714  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3715  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3716 
3717 private:
3718  VkDeviceSize m_Alignment;
3719  VkDeviceSize m_Size;
3720  void* m_pUserData;
3721  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3722  uint8_t m_Type; // ALLOCATION_TYPE
3723  uint8_t m_SuballocationType; // VmaSuballocationType
3724  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3725  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
3726  uint8_t m_MapCount;
3727  uint8_t m_Flags; // enum FLAGS
3728 
3729  // Allocation out of VmaDeviceMemoryBlock.
3730  struct BlockAllocation
3731  {
3732  VmaPool m_hPool; // Null if belongs to general memory.
3733  VmaDeviceMemoryBlock* m_Block;
3734  VkDeviceSize m_Offset;
3735  bool m_CanBecomeLost;
3736  };
3737 
3738  // Allocation for an object that has its own private VkDeviceMemory.
3739  struct DedicatedAllocation
3740  {
3741  uint32_t m_MemoryTypeIndex;
3742  VkDeviceMemory m_hMemory;
3743  void* m_pMappedData; // Not null means memory is mapped.
3744  };
3745 
3746  union
3747  {
3748  // Allocation out of VmaDeviceMemoryBlock.
3749  BlockAllocation m_BlockAllocation;
3750  // Allocation for an object that has its own private VkDeviceMemory.
3751  DedicatedAllocation m_DedicatedAllocation;
3752  };
3753 
3754  void FreeUserDataString(VmaAllocator hAllocator);
3755 };
3756 
3757 /*
3758 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3759 allocated memory block or free.
3760 */
3761 struct VmaSuballocation
3762 {
3763  VkDeviceSize offset;
3764  VkDeviceSize size;
3765  VmaAllocation hAllocation;
3766  VmaSuballocationType type;
3767 };
3768 
3769 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3770 
3771 // Cost of one additional allocation lost, as equivalent in bytes.
3772 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3773 
3774 /*
3775 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3776 
3777 If canMakeOtherLost was false:
3778 - item points to a FREE suballocation.
3779 - itemsToMakeLostCount is 0.
3780 
3781 If canMakeOtherLost was true:
3782 - item points to first of sequence of suballocations, which are either FREE,
3783  or point to VmaAllocations that can become lost.
3784 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3785  the requested allocation to succeed.
3786 */
3787 struct VmaAllocationRequest
3788 {
3789  VkDeviceSize offset;
3790  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3791  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3792  VmaSuballocationList::iterator item;
3793  size_t itemsToMakeLostCount;
3794 
3795  VkDeviceSize CalcCost() const
3796  {
3797  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3798  }
3799 };
3800 
3801 /*
3802 Data structure used for bookkeeping of allocations and unused ranges of memory
3803 in a single VkDeviceMemory block.
3804 */
3805 class VmaBlockMetadata
3806 {
3807 public:
3808  VmaBlockMetadata(VmaAllocator hAllocator);
3809  ~VmaBlockMetadata();
3810  void Init(VkDeviceSize size);
3811 
3812  // Validates all data structures inside this object. If not valid, returns false.
3813  bool Validate() const;
3814  VkDeviceSize GetSize() const { return m_Size; }
3815  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3816  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3817  VkDeviceSize GetUnusedRangeSizeMax() const;
3818  // Returns true if this block is empty - contains only single free suballocation.
3819  bool IsEmpty() const;
3820 
3821  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3822  void AddPoolStats(VmaPoolStats& inoutStats) const;
3823 
3824 #if VMA_STATS_STRING_ENABLED
3825  void PrintDetailedMap(class VmaJsonWriter& json) const;
3826 #endif
3827 
3828  // Creates trivial request for case when block is empty.
3829  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3830 
3831  // Tries to find a place for suballocation with given parameters inside this block.
3832  // If succeeded, fills pAllocationRequest and returns true.
3833  // If failed, returns false.
3834  bool CreateAllocationRequest(
3835  uint32_t currentFrameIndex,
3836  uint32_t frameInUseCount,
3837  VkDeviceSize bufferImageGranularity,
3838  VkDeviceSize allocSize,
3839  VkDeviceSize allocAlignment,
3840  VmaSuballocationType allocType,
3841  bool canMakeOtherLost,
3842  VmaAllocationRequest* pAllocationRequest);
3843 
3844  bool MakeRequestedAllocationsLost(
3845  uint32_t currentFrameIndex,
3846  uint32_t frameInUseCount,
3847  VmaAllocationRequest* pAllocationRequest);
3848 
3849  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3850 
3851  // Makes actual allocation based on request. Request must already be checked and valid.
3852  void Alloc(
3853  const VmaAllocationRequest& request,
3854  VmaSuballocationType type,
3855  VkDeviceSize allocSize,
3856  VmaAllocation hAllocation);
3857 
3858  // Frees suballocation assigned to given memory region.
3859  void Free(const VmaAllocation allocation);
3860  void FreeAtOffset(VkDeviceSize offset);
3861 
3862 private:
3863  VkDeviceSize m_Size;
3864  uint32_t m_FreeCount;
3865  VkDeviceSize m_SumFreeSize;
3866  VmaSuballocationList m_Suballocations;
3867  // Suballocations that are free and have size greater than certain threshold.
3868  // Sorted by size, ascending.
3869  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3870 
3871  bool ValidateFreeSuballocationList() const;
3872 
3873  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3874  // If yes, fills pOffset and returns true. If no, returns false.
3875  bool CheckAllocation(
3876  uint32_t currentFrameIndex,
3877  uint32_t frameInUseCount,
3878  VkDeviceSize bufferImageGranularity,
3879  VkDeviceSize allocSize,
3880  VkDeviceSize allocAlignment,
3881  VmaSuballocationType allocType,
3882  VmaSuballocationList::const_iterator suballocItem,
3883  bool canMakeOtherLost,
3884  VkDeviceSize* pOffset,
3885  size_t* itemsToMakeLostCount,
3886  VkDeviceSize* pSumFreeSize,
3887  VkDeviceSize* pSumItemSize) const;
3888  // Given free suballocation, it merges it with following one, which must also be free.
3889  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3890  // Releases given suballocation, making it free.
3891  // Merges it with adjacent free suballocations if applicable.
3892  // Returns iterator to new free suballocation at this place.
3893  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3894  // Given free suballocation, it inserts it into sorted list of
3895  // m_FreeSuballocationsBySize if it's suitable.
3896  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3897  // Given free suballocation, it removes it from sorted list of
3898  // m_FreeSuballocationsBySize if it's suitable.
3899  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3900 };
3901 
3902 // Helper class that represents mapped memory. Synchronized internally.
3903 class VmaDeviceMemoryMapping
3904 {
3905 public:
3906  VmaDeviceMemoryMapping();
3907  ~VmaDeviceMemoryMapping();
3908 
3909  void* GetMappedData() const { return m_pMappedData; }
3910 
3911  // ppData can be null.
3912  VkResult Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData);
3913  void Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count);
3914 
3915 private:
3916  VMA_MUTEX m_Mutex;
3917  uint32_t m_MapCount;
3918  void* m_pMappedData;
3919 };
3920 
3921 /*
3922 Represents a single block of device memory (`VkDeviceMemory`) with all the
3923 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
3924 
3925 Thread-safety: This class must be externally synchronized.
3926 */
3927 class VmaDeviceMemoryBlock
3928 {
3929 public:
3930  uint32_t m_MemoryTypeIndex;
3931  VkDeviceMemory m_hMemory;
3932  VmaDeviceMemoryMapping m_Mapping;
3933  VmaBlockMetadata m_Metadata;
3934 
3935  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3936 
3937  ~VmaDeviceMemoryBlock()
3938  {
3939  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3940  }
3941 
3942  // Always call after construction.
3943  void Init(
3944  uint32_t newMemoryTypeIndex,
3945  VkDeviceMemory newMemory,
3946  VkDeviceSize newSize);
3947  // Always call before destruction.
3948  void Destroy(VmaAllocator allocator);
3949 
3950  // Validates all data structures inside this object. If not valid, returns false.
3951  bool Validate() const;
3952 
3953  // ppData can be null.
3954  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
3955  void Unmap(VmaAllocator hAllocator, uint32_t count);
3956 };
3957 
3958 struct VmaPointerLess
3959 {
3960  bool operator()(const void* lhs, const void* rhs) const
3961  {
3962  return lhs < rhs;
3963  }
3964 };
3965 
3966 class VmaDefragmentator;
3967 
3968 /*
3969 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3970 Vulkan memory type.
3971 
3972 Synchronized internally with a mutex.
3973 */
3974 struct VmaBlockVector
3975 {
3976  VmaBlockVector(
3977  VmaAllocator hAllocator,
3978  uint32_t memoryTypeIndex,
3979  VkDeviceSize preferredBlockSize,
3980  size_t minBlockCount,
3981  size_t maxBlockCount,
3982  VkDeviceSize bufferImageGranularity,
3983  uint32_t frameInUseCount,
3984  bool isCustomPool);
3985  ~VmaBlockVector();
3986 
3987  VkResult CreateMinBlocks();
3988 
3989  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3990  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3991  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3992  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3993 
3994  void GetPoolStats(VmaPoolStats* pStats);
3995 
3996  bool IsEmpty() const { return m_Blocks.empty(); }
3997 
3998  VkResult Allocate(
3999  VmaPool hCurrentPool,
4000  uint32_t currentFrameIndex,
4001  const VkMemoryRequirements& vkMemReq,
4002  const VmaAllocationCreateInfo& createInfo,
4003  VmaSuballocationType suballocType,
4004  VmaAllocation* pAllocation);
4005 
4006  void Free(
4007  VmaAllocation hAllocation);
4008 
4009  // Adds statistics of this BlockVector to pStats.
4010  void AddStats(VmaStats* pStats);
4011 
4012 #if VMA_STATS_STRING_ENABLED
4013  void PrintDetailedMap(class VmaJsonWriter& json);
4014 #endif
4015 
4016  void MakePoolAllocationsLost(
4017  uint32_t currentFrameIndex,
4018  size_t* pLostAllocationCount);
4019 
4020  VmaDefragmentator* EnsureDefragmentator(
4021  VmaAllocator hAllocator,
4022  uint32_t currentFrameIndex);
4023 
4024  VkResult Defragment(
4025  VmaDefragmentationStats* pDefragmentationStats,
4026  VkDeviceSize& maxBytesToMove,
4027  uint32_t& maxAllocationsToMove);
4028 
4029  void DestroyDefragmentator();
4030 
4031 private:
4032  friend class VmaDefragmentator;
4033 
4034  const VmaAllocator m_hAllocator;
4035  const uint32_t m_MemoryTypeIndex;
4036  const VkDeviceSize m_PreferredBlockSize;
4037  const size_t m_MinBlockCount;
4038  const size_t m_MaxBlockCount;
4039  const VkDeviceSize m_BufferImageGranularity;
4040  const uint32_t m_FrameInUseCount;
4041  const bool m_IsCustomPool;
4042  VMA_MUTEX m_Mutex;
4043  // Incrementally sorted by sumFreeSize, ascending.
4044  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
4045  /* There can be at most one allocation that is completely empty - a
4046  hysteresis to avoid pessimistic case of alternating creation and destruction
4047  of a VkDeviceMemory. */
4048  bool m_HasEmptyBlock;
4049  VmaDefragmentator* m_pDefragmentator;
4050 
4051  size_t CalcMaxBlockSize() const;
4052 
4053  // Finds and removes given block from vector.
4054  void Remove(VmaDeviceMemoryBlock* pBlock);
4055 
4056  // Performs single step in sorting m_Blocks. They may not be fully sorted
4057  // after this call.
4058  void IncrementallySortBlocks();
4059 
4060  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
4061 };
4062 
4063 struct VmaPool_T
4064 {
4065 public:
4066  VmaBlockVector m_BlockVector;
4067 
4068  // Takes ownership.
4069  VmaPool_T(
4070  VmaAllocator hAllocator,
4071  const VmaPoolCreateInfo& createInfo);
4072  ~VmaPool_T();
4073 
4074  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
4075 
4076 #if VMA_STATS_STRING_ENABLED
4077  //void PrintDetailedMap(class VmaStringBuilder& sb);
4078 #endif
4079 };
4080 
4081 class VmaDefragmentator
4082 {
4083  const VmaAllocator m_hAllocator;
4084  VmaBlockVector* const m_pBlockVector;
4085  uint32_t m_CurrentFrameIndex;
4086  VkDeviceSize m_BytesMoved;
4087  uint32_t m_AllocationsMoved;
4088 
4089  struct AllocationInfo
4090  {
4091  VmaAllocation m_hAllocation;
4092  VkBool32* m_pChanged;
4093 
4094  AllocationInfo() :
4095  m_hAllocation(VK_NULL_HANDLE),
4096  m_pChanged(VMA_NULL)
4097  {
4098  }
4099  };
4100 
4101  struct AllocationInfoSizeGreater
4102  {
4103  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
4104  {
4105  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
4106  }
4107  };
4108 
4109  // Used between AddAllocation and Defragment.
4110  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4111 
4112  struct BlockInfo
4113  {
4114  VmaDeviceMemoryBlock* m_pBlock;
4115  bool m_HasNonMovableAllocations;
4116  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4117 
4118  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
4119  m_pBlock(VMA_NULL),
4120  m_HasNonMovableAllocations(true),
4121  m_Allocations(pAllocationCallbacks),
4122  m_pMappedDataForDefragmentation(VMA_NULL)
4123  {
4124  }
4125 
4126  void CalcHasNonMovableAllocations()
4127  {
4128  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
4129  const size_t defragmentAllocCount = m_Allocations.size();
4130  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
4131  }
4132 
4133  void SortAllocationsBySizeDescecnding()
4134  {
4135  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
4136  }
4137 
4138  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
4139  void Unmap(VmaAllocator hAllocator);
4140 
4141  private:
4142  // Not null if mapped for defragmentation only, not originally mapped.
4143  void* m_pMappedDataForDefragmentation;
4144  };
4145 
4146  struct BlockPointerLess
4147  {
4148  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
4149  {
4150  return pLhsBlockInfo->m_pBlock < pRhsBlock;
4151  }
4152  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4153  {
4154  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
4155  }
4156  };
4157 
4158  // 1. Blocks with some non-movable allocations go first.
4159  // 2. Blocks with smaller sumFreeSize go first.
4160  struct BlockInfoCompareMoveDestination
4161  {
4162  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4163  {
4164  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
4165  {
4166  return true;
4167  }
4168  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
4169  {
4170  return false;
4171  }
4172  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
4173  {
4174  return true;
4175  }
4176  return false;
4177  }
4178  };
4179 
4180  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
4181  BlockInfoVector m_Blocks;
4182 
4183  VkResult DefragmentRound(
4184  VkDeviceSize maxBytesToMove,
4185  uint32_t maxAllocationsToMove);
4186 
4187  static bool MoveMakesSense(
4188  size_t dstBlockIndex, VkDeviceSize dstOffset,
4189  size_t srcBlockIndex, VkDeviceSize srcOffset);
4190 
4191 public:
4192  VmaDefragmentator(
4193  VmaAllocator hAllocator,
4194  VmaBlockVector* pBlockVector,
4195  uint32_t currentFrameIndex);
4196 
4197  ~VmaDefragmentator();
4198 
4199  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
4200  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
4201 
4202  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
4203 
4204  VkResult Defragment(
4205  VkDeviceSize maxBytesToMove,
4206  uint32_t maxAllocationsToMove);
4207 };
4208 
4209 // Main allocator object.
4210 struct VmaAllocator_T
4211 {
4212  bool m_UseMutex;
4213  bool m_UseKhrDedicatedAllocation;
4214  VkDevice m_hDevice;
4215  bool m_AllocationCallbacksSpecified;
4216  VkAllocationCallbacks m_AllocationCallbacks;
4217  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
4218 
4219  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
4220  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
4221  VMA_MUTEX m_HeapSizeLimitMutex;
4222 
4223  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
4224  VkPhysicalDeviceMemoryProperties m_MemProps;
4225 
4226  // Default pools.
4227  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
4228 
4229  // Each vector is sorted by memory (handle value).
4230  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
4231  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
4232  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
4233 
4234  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
4235  ~VmaAllocator_T();
4236 
4237  const VkAllocationCallbacks* GetAllocationCallbacks() const
4238  {
4239  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
4240  }
4241  const VmaVulkanFunctions& GetVulkanFunctions() const
4242  {
4243  return m_VulkanFunctions;
4244  }
4245 
4246  VkDeviceSize GetBufferImageGranularity() const
4247  {
4248  return VMA_MAX(
4249  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
4250  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
4251  }
4252 
4253  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
4254  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
4255 
4256  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
4257  {
4258  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
4259  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
4260  }
4261 
4262  void GetBufferMemoryRequirements(
4263  VkBuffer hBuffer,
4264  VkMemoryRequirements& memReq,
4265  bool& requiresDedicatedAllocation,
4266  bool& prefersDedicatedAllocation) const;
4267  void GetImageMemoryRequirements(
4268  VkImage hImage,
4269  VkMemoryRequirements& memReq,
4270  bool& requiresDedicatedAllocation,
4271  bool& prefersDedicatedAllocation) const;
4272 
4273  // Main allocation function.
4274  VkResult AllocateMemory(
4275  const VkMemoryRequirements& vkMemReq,
4276  bool requiresDedicatedAllocation,
4277  bool prefersDedicatedAllocation,
4278  VkBuffer dedicatedBuffer,
4279  VkImage dedicatedImage,
4280  const VmaAllocationCreateInfo& createInfo,
4281  VmaSuballocationType suballocType,
4282  VmaAllocation* pAllocation);
4283 
4284  // Main deallocation function.
4285  void FreeMemory(const VmaAllocation allocation);
4286 
4287  void CalculateStats(VmaStats* pStats);
4288 
4289 #if VMA_STATS_STRING_ENABLED
4290  void PrintDetailedMap(class VmaJsonWriter& json);
4291 #endif
4292 
4293  VkResult Defragment(
4294  VmaAllocation* pAllocations,
4295  size_t allocationCount,
4296  VkBool32* pAllocationsChanged,
4297  const VmaDefragmentationInfo* pDefragmentationInfo,
4298  VmaDefragmentationStats* pDefragmentationStats);
4299 
4300  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
4301  bool TouchAllocation(VmaAllocation hAllocation);
4302 
4303  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
4304  void DestroyPool(VmaPool pool);
4305  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
4306 
4307  void SetCurrentFrameIndex(uint32_t frameIndex);
4308 
4309  void MakePoolAllocationsLost(
4310  VmaPool hPool,
4311  size_t* pLostAllocationCount);
4312 
4313  void CreateLostAllocation(VmaAllocation* pAllocation);
4314 
4315  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
4316  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
4317 
4318  VkResult Map(VmaAllocation hAllocation, void** ppData);
4319  void Unmap(VmaAllocation hAllocation);
4320 
4321 private:
4322  VkDeviceSize m_PreferredLargeHeapBlockSize;
4323 
4324  VkPhysicalDevice m_PhysicalDevice;
4325  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
4326 
4327  VMA_MUTEX m_PoolsMutex;
4328  // Protected by m_PoolsMutex. Sorted by pointer value.
4329  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
4330 
4331  VmaVulkanFunctions m_VulkanFunctions;
4332 
4333  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
4334 
4335  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
4336 
4337  VkResult AllocateMemoryOfType(
4338  const VkMemoryRequirements& vkMemReq,
4339  bool dedicatedAllocation,
4340  VkBuffer dedicatedBuffer,
4341  VkImage dedicatedImage,
4342  const VmaAllocationCreateInfo& createInfo,
4343  uint32_t memTypeIndex,
4344  VmaSuballocationType suballocType,
4345  VmaAllocation* pAllocation);
4346 
4347  // Allocates and registers new VkDeviceMemory specifically for single allocation.
4348  VkResult AllocateDedicatedMemory(
4349  VkDeviceSize size,
4350  VmaSuballocationType suballocType,
4351  uint32_t memTypeIndex,
4352  bool map,
4353  bool isUserDataString,
4354  void* pUserData,
4355  VkBuffer dedicatedBuffer,
4356  VkImage dedicatedImage,
4357  VmaAllocation* pAllocation);
4358 
4359  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
4360  void FreeDedicatedMemory(VmaAllocation allocation);
4361 };
4362 
4364 // Memory allocation #2 after VmaAllocator_T definition
4365 
4366 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
4367 {
4368  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
4369 }
4370 
4371 static void VmaFree(VmaAllocator hAllocator, void* ptr)
4372 {
4373  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
4374 }
4375 
4376 template<typename T>
4377 static T* VmaAllocate(VmaAllocator hAllocator)
4378 {
4379  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
4380 }
4381 
4382 template<typename T>
4383 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
4384 {
4385  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
4386 }
4387 
4388 template<typename T>
4389 static void vma_delete(VmaAllocator hAllocator, T* ptr)
4390 {
4391  if(ptr != VMA_NULL)
4392  {
4393  ptr->~T();
4394  VmaFree(hAllocator, ptr);
4395  }
4396 }
4397 
4398 template<typename T>
4399 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
4400 {
4401  if(ptr != VMA_NULL)
4402  {
4403  for(size_t i = count; i--; )
4404  ptr[i].~T();
4405  VmaFree(hAllocator, ptr);
4406  }
4407 }
4408 
4410 // VmaStringBuilder
4411 
4412 #if VMA_STATS_STRING_ENABLED
4413 
4414 class VmaStringBuilder
4415 {
4416 public:
4417  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
4418  size_t GetLength() const { return m_Data.size(); }
4419  const char* GetData() const { return m_Data.data(); }
4420 
4421  void Add(char ch) { m_Data.push_back(ch); }
4422  void Add(const char* pStr);
4423  void AddNewLine() { Add('\n'); }
4424  void AddNumber(uint32_t num);
4425  void AddNumber(uint64_t num);
4426  void AddPointer(const void* ptr);
4427 
4428 private:
4429  VmaVector< char, VmaStlAllocator<char> > m_Data;
4430 };
4431 
4432 void VmaStringBuilder::Add(const char* pStr)
4433 {
4434  const size_t strLen = strlen(pStr);
4435  if(strLen > 0)
4436  {
4437  const size_t oldCount = m_Data.size();
4438  m_Data.resize(oldCount + strLen);
4439  memcpy(m_Data.data() + oldCount, pStr, strLen);
4440  }
4441 }
4442 
4443 void VmaStringBuilder::AddNumber(uint32_t num)
4444 {
4445  char buf[11];
4446  VmaUint32ToStr(buf, sizeof(buf), num);
4447  Add(buf);
4448 }
4449 
4450 void VmaStringBuilder::AddNumber(uint64_t num)
4451 {
4452  char buf[21];
4453  VmaUint64ToStr(buf, sizeof(buf), num);
4454  Add(buf);
4455 }
4456 
4457 void VmaStringBuilder::AddPointer(const void* ptr)
4458 {
4459  char buf[21];
4460  VmaPtrToStr(buf, sizeof(buf), ptr);
4461  Add(buf);
4462 }
4463 
4464 #endif // #if VMA_STATS_STRING_ENABLED
4465 
4467 // VmaJsonWriter
4468 
4469 #if VMA_STATS_STRING_ENABLED
4470 
4471 class VmaJsonWriter
4472 {
4473 public:
4474  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
4475  ~VmaJsonWriter();
4476 
4477  void BeginObject(bool singleLine = false);
4478  void EndObject();
4479 
4480  void BeginArray(bool singleLine = false);
4481  void EndArray();
4482 
4483  void WriteString(const char* pStr);
4484  void BeginString(const char* pStr = VMA_NULL);
4485  void ContinueString(const char* pStr);
4486  void ContinueString(uint32_t n);
4487  void ContinueString(uint64_t n);
4488  void ContinueString_Pointer(const void* ptr);
4489  void EndString(const char* pStr = VMA_NULL);
4490 
4491  void WriteNumber(uint32_t n);
4492  void WriteNumber(uint64_t n);
4493  void WriteBool(bool b);
4494  void WriteNull();
4495 
4496 private:
4497  static const char* const INDENT;
4498 
4499  enum COLLECTION_TYPE
4500  {
4501  COLLECTION_TYPE_OBJECT,
4502  COLLECTION_TYPE_ARRAY,
4503  };
4504  struct StackItem
4505  {
4506  COLLECTION_TYPE type;
4507  uint32_t valueCount;
4508  bool singleLineMode;
4509  };
4510 
4511  VmaStringBuilder& m_SB;
4512  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
4513  bool m_InsideString;
4514 
4515  void BeginValue(bool isString);
4516  void WriteIndent(bool oneLess = false);
4517 };
4518 
4519 const char* const VmaJsonWriter::INDENT = " ";
4520 
4521 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
4522  m_SB(sb),
4523  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4524  m_InsideString(false)
4525 {
4526 }
4527 
4528 VmaJsonWriter::~VmaJsonWriter()
4529 {
4530  VMA_ASSERT(!m_InsideString);
4531  VMA_ASSERT(m_Stack.empty());
4532 }
4533 
4534 void VmaJsonWriter::BeginObject(bool singleLine)
4535 {
4536  VMA_ASSERT(!m_InsideString);
4537 
4538  BeginValue(false);
4539  m_SB.Add('{');
4540 
4541  StackItem item;
4542  item.type = COLLECTION_TYPE_OBJECT;
4543  item.valueCount = 0;
4544  item.singleLineMode = singleLine;
4545  m_Stack.push_back(item);
4546 }
4547 
4548 void VmaJsonWriter::EndObject()
4549 {
4550  VMA_ASSERT(!m_InsideString);
4551 
4552  WriteIndent(true);
4553  m_SB.Add('}');
4554 
4555  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4556  m_Stack.pop_back();
4557 }
4558 
4559 void VmaJsonWriter::BeginArray(bool singleLine)
4560 {
4561  VMA_ASSERT(!m_InsideString);
4562 
4563  BeginValue(false);
4564  m_SB.Add('[');
4565 
4566  StackItem item;
4567  item.type = COLLECTION_TYPE_ARRAY;
4568  item.valueCount = 0;
4569  item.singleLineMode = singleLine;
4570  m_Stack.push_back(item);
4571 }
4572 
4573 void VmaJsonWriter::EndArray()
4574 {
4575  VMA_ASSERT(!m_InsideString);
4576 
4577  WriteIndent(true);
4578  m_SB.Add(']');
4579 
4580  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4581  m_Stack.pop_back();
4582 }
4583 
4584 void VmaJsonWriter::WriteString(const char* pStr)
4585 {
4586  BeginString(pStr);
4587  EndString();
4588 }
4589 
4590 void VmaJsonWriter::BeginString(const char* pStr)
4591 {
4592  VMA_ASSERT(!m_InsideString);
4593 
4594  BeginValue(true);
4595  m_SB.Add('"');
4596  m_InsideString = true;
4597  if(pStr != VMA_NULL && pStr[0] != '\0')
4598  {
4599  ContinueString(pStr);
4600  }
4601 }
4602 
4603 void VmaJsonWriter::ContinueString(const char* pStr)
4604 {
4605  VMA_ASSERT(m_InsideString);
4606 
4607  const size_t strLen = strlen(pStr);
4608  for(size_t i = 0; i < strLen; ++i)
4609  {
4610  char ch = pStr[i];
4611  if(ch == '\'')
4612  {
4613  m_SB.Add("\\\\");
4614  }
4615  else if(ch == '"')
4616  {
4617  m_SB.Add("\\\"");
4618  }
4619  else if(ch >= 32)
4620  {
4621  m_SB.Add(ch);
4622  }
4623  else switch(ch)
4624  {
4625  case '\b':
4626  m_SB.Add("\\b");
4627  break;
4628  case '\f':
4629  m_SB.Add("\\f");
4630  break;
4631  case '\n':
4632  m_SB.Add("\\n");
4633  break;
4634  case '\r':
4635  m_SB.Add("\\r");
4636  break;
4637  case '\t':
4638  m_SB.Add("\\t");
4639  break;
4640  default:
4641  VMA_ASSERT(0 && "Character not currently supported.");
4642  break;
4643  }
4644  }
4645 }
4646 
4647 void VmaJsonWriter::ContinueString(uint32_t n)
4648 {
4649  VMA_ASSERT(m_InsideString);
4650  m_SB.AddNumber(n);
4651 }
4652 
4653 void VmaJsonWriter::ContinueString(uint64_t n)
4654 {
4655  VMA_ASSERT(m_InsideString);
4656  m_SB.AddNumber(n);
4657 }
4658 
4659 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4660 {
4661  VMA_ASSERT(m_InsideString);
4662  m_SB.AddPointer(ptr);
4663 }
4664 
4665 void VmaJsonWriter::EndString(const char* pStr)
4666 {
4667  VMA_ASSERT(m_InsideString);
4668  if(pStr != VMA_NULL && pStr[0] != '\0')
4669  {
4670  ContinueString(pStr);
4671  }
4672  m_SB.Add('"');
4673  m_InsideString = false;
4674 }
4675 
4676 void VmaJsonWriter::WriteNumber(uint32_t n)
4677 {
4678  VMA_ASSERT(!m_InsideString);
4679  BeginValue(false);
4680  m_SB.AddNumber(n);
4681 }
4682 
4683 void VmaJsonWriter::WriteNumber(uint64_t n)
4684 {
4685  VMA_ASSERT(!m_InsideString);
4686  BeginValue(false);
4687  m_SB.AddNumber(n);
4688 }
4689 
4690 void VmaJsonWriter::WriteBool(bool b)
4691 {
4692  VMA_ASSERT(!m_InsideString);
4693  BeginValue(false);
4694  m_SB.Add(b ? "true" : "false");
4695 }
4696 
4697 void VmaJsonWriter::WriteNull()
4698 {
4699  VMA_ASSERT(!m_InsideString);
4700  BeginValue(false);
4701  m_SB.Add("null");
4702 }
4703 
4704 void VmaJsonWriter::BeginValue(bool isString)
4705 {
4706  if(!m_Stack.empty())
4707  {
4708  StackItem& currItem = m_Stack.back();
4709  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4710  currItem.valueCount % 2 == 0)
4711  {
4712  VMA_ASSERT(isString);
4713  }
4714 
4715  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4716  currItem.valueCount % 2 != 0)
4717  {
4718  m_SB.Add(": ");
4719  }
4720  else if(currItem.valueCount > 0)
4721  {
4722  m_SB.Add(", ");
4723  WriteIndent();
4724  }
4725  else
4726  {
4727  WriteIndent();
4728  }
4729  ++currItem.valueCount;
4730  }
4731 }
4732 
4733 void VmaJsonWriter::WriteIndent(bool oneLess)
4734 {
4735  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4736  {
4737  m_SB.AddNewLine();
4738 
4739  size_t count = m_Stack.size();
4740  if(count > 0 && oneLess)
4741  {
4742  --count;
4743  }
4744  for(size_t i = 0; i < count; ++i)
4745  {
4746  m_SB.Add(INDENT);
4747  }
4748  }
4749 }
4750 
4751 #endif // #if VMA_STATS_STRING_ENABLED
4752 
4754 
4755 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
4756 {
4757  if(IsUserDataString())
4758  {
4759  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
4760 
4761  FreeUserDataString(hAllocator);
4762 
4763  if(pUserData != VMA_NULL)
4764  {
4765  const char* const newStrSrc = (char*)pUserData;
4766  const size_t newStrLen = strlen(newStrSrc);
4767  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
4768  memcpy(newStrDst, newStrSrc, newStrLen + 1);
4769  m_pUserData = newStrDst;
4770  }
4771  }
4772  else
4773  {
4774  m_pUserData = pUserData;
4775  }
4776 }
4777 
4778 void VmaAllocation_T::ChangeBlockAllocation(
4779  VmaAllocator hAllocator,
4780  VmaDeviceMemoryBlock* block,
4781  VkDeviceSize offset)
4782 {
4783  VMA_ASSERT(block != VMA_NULL);
4784  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4785 
4786  // Move mapping reference counter from old block to new block.
4787  if(block != m_BlockAllocation.m_Block)
4788  {
4789  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
4790  if(IsPersistentMap())
4791  ++mapRefCount;
4792  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
4793  block->Map(hAllocator, mapRefCount, VMA_NULL);
4794  }
4795 
4796  m_BlockAllocation.m_Block = block;
4797  m_BlockAllocation.m_Offset = offset;
4798 }
4799 
4800 VkDeviceSize VmaAllocation_T::GetOffset() const
4801 {
4802  switch(m_Type)
4803  {
4804  case ALLOCATION_TYPE_BLOCK:
4805  return m_BlockAllocation.m_Offset;
4806  case ALLOCATION_TYPE_DEDICATED:
4807  return 0;
4808  default:
4809  VMA_ASSERT(0);
4810  return 0;
4811  }
4812 }
4813 
4814 VkDeviceMemory VmaAllocation_T::GetMemory() const
4815 {
4816  switch(m_Type)
4817  {
4818  case ALLOCATION_TYPE_BLOCK:
4819  return m_BlockAllocation.m_Block->m_hMemory;
4820  case ALLOCATION_TYPE_DEDICATED:
4821  return m_DedicatedAllocation.m_hMemory;
4822  default:
4823  VMA_ASSERT(0);
4824  return VK_NULL_HANDLE;
4825  }
4826 }
4827 
4828 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
4829 {
4830  switch(m_Type)
4831  {
4832  case ALLOCATION_TYPE_BLOCK:
4833  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
4834  case ALLOCATION_TYPE_DEDICATED:
4835  return m_DedicatedAllocation.m_MemoryTypeIndex;
4836  default:
4837  VMA_ASSERT(0);
4838  return UINT32_MAX;
4839  }
4840 }
4841 
4842 void* VmaAllocation_T::GetMappedData() const
4843 {
4844  switch(m_Type)
4845  {
4846  case ALLOCATION_TYPE_BLOCK:
4847  if(m_MapCount != 0)
4848  {
4849  void* pBlockData = m_BlockAllocation.m_Block->m_Mapping.GetMappedData();
4850  VMA_ASSERT(pBlockData != VMA_NULL);
4851  return (char*)pBlockData + m_BlockAllocation.m_Offset;
4852  }
4853  else
4854  {
4855  return VMA_NULL;
4856  }
4857  break;
4858  case ALLOCATION_TYPE_DEDICATED:
4859  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
4860  return m_DedicatedAllocation.m_pMappedData;
4861  default:
4862  VMA_ASSERT(0);
4863  return VMA_NULL;
4864  }
4865 }
4866 
4867 bool VmaAllocation_T::CanBecomeLost() const
4868 {
4869  switch(m_Type)
4870  {
4871  case ALLOCATION_TYPE_BLOCK:
4872  return m_BlockAllocation.m_CanBecomeLost;
4873  case ALLOCATION_TYPE_DEDICATED:
4874  return false;
4875  default:
4876  VMA_ASSERT(0);
4877  return false;
4878  }
4879 }
4880 
4881 VmaPool VmaAllocation_T::GetPool() const
4882 {
4883  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4884  return m_BlockAllocation.m_hPool;
4885 }
4886 
4887 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4888 {
4889  VMA_ASSERT(CanBecomeLost());
4890 
4891  /*
4892  Warning: This is a carefully designed algorithm.
4893  Do not modify unless you really know what you're doing :)
4894  */
4895  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4896  for(;;)
4897  {
4898  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4899  {
4900  VMA_ASSERT(0);
4901  return false;
4902  }
4903  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4904  {
4905  return false;
4906  }
4907  else // Last use time earlier than current time.
4908  {
4909  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4910  {
4911  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4912  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4913  return true;
4914  }
4915  }
4916  }
4917 }
4918 
4919 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
4920 {
4921  VMA_ASSERT(IsUserDataString());
4922  if(m_pUserData != VMA_NULL)
4923  {
4924  char* const oldStr = (char*)m_pUserData;
4925  const size_t oldStrLen = strlen(oldStr);
4926  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
4927  m_pUserData = VMA_NULL;
4928  }
4929 }
4930 
4931 void VmaAllocation_T::BlockAllocMap()
4932 {
4933  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4934 
4935  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4936  {
4937  ++m_MapCount;
4938  }
4939  else
4940  {
4941  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
4942  }
4943 }
4944 
4945 void VmaAllocation_T::BlockAllocUnmap()
4946 {
4947  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4948 
4949  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4950  {
4951  --m_MapCount;
4952  }
4953  else
4954  {
4955  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
4956  }
4957 }
4958 
4959 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
4960 {
4961  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4962 
4963  if(m_MapCount != 0)
4964  {
4965  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4966  {
4967  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
4968  *ppData = m_DedicatedAllocation.m_pMappedData;
4969  ++m_MapCount;
4970  return VK_SUCCESS;
4971  }
4972  else
4973  {
4974  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
4975  return VK_ERROR_MEMORY_MAP_FAILED;
4976  }
4977  }
4978  else
4979  {
4980  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4981  hAllocator->m_hDevice,
4982  m_DedicatedAllocation.m_hMemory,
4983  0, // offset
4984  VK_WHOLE_SIZE,
4985  0, // flags
4986  ppData);
4987  if(result == VK_SUCCESS)
4988  {
4989  m_DedicatedAllocation.m_pMappedData = *ppData;
4990  m_MapCount = 1;
4991  }
4992  return result;
4993  }
4994 }
4995 
4996 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
4997 {
4998  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4999 
5000  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
5001  {
5002  --m_MapCount;
5003  if(m_MapCount == 0)
5004  {
5005  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
5006  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
5007  hAllocator->m_hDevice,
5008  m_DedicatedAllocation.m_hMemory);
5009  }
5010  }
5011  else
5012  {
5013  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
5014  }
5015 }
5016 
5017 #if VMA_STATS_STRING_ENABLED
5018 
5019 // Correspond to values of enum VmaSuballocationType.
5020 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
5021  "FREE",
5022  "UNKNOWN",
5023  "BUFFER",
5024  "IMAGE_UNKNOWN",
5025  "IMAGE_LINEAR",
5026  "IMAGE_OPTIMAL",
5027 };
5028 
5029 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
5030 {
5031  json.BeginObject();
5032 
5033  json.WriteString("Blocks");
5034  json.WriteNumber(stat.blockCount);
5035 
5036  json.WriteString("Allocations");
5037  json.WriteNumber(stat.allocationCount);
5038 
5039  json.WriteString("UnusedRanges");
5040  json.WriteNumber(stat.unusedRangeCount);
5041 
5042  json.WriteString("UsedBytes");
5043  json.WriteNumber(stat.usedBytes);
5044 
5045  json.WriteString("UnusedBytes");
5046  json.WriteNumber(stat.unusedBytes);
5047 
5048  if(stat.allocationCount > 1)
5049  {
5050  json.WriteString("AllocationSize");
5051  json.BeginObject(true);
5052  json.WriteString("Min");
5053  json.WriteNumber(stat.allocationSizeMin);
5054  json.WriteString("Avg");
5055  json.WriteNumber(stat.allocationSizeAvg);
5056  json.WriteString("Max");
5057  json.WriteNumber(stat.allocationSizeMax);
5058  json.EndObject();
5059  }
5060 
5061  if(stat.unusedRangeCount > 1)
5062  {
5063  json.WriteString("UnusedRangeSize");
5064  json.BeginObject(true);
5065  json.WriteString("Min");
5066  json.WriteNumber(stat.unusedRangeSizeMin);
5067  json.WriteString("Avg");
5068  json.WriteNumber(stat.unusedRangeSizeAvg);
5069  json.WriteString("Max");
5070  json.WriteNumber(stat.unusedRangeSizeMax);
5071  json.EndObject();
5072  }
5073 
5074  json.EndObject();
5075 }
5076 
5077 #endif // #if VMA_STATS_STRING_ENABLED
5078 
5079 struct VmaSuballocationItemSizeLess
5080 {
5081  bool operator()(
5082  const VmaSuballocationList::iterator lhs,
5083  const VmaSuballocationList::iterator rhs) const
5084  {
5085  return lhs->size < rhs->size;
5086  }
5087  bool operator()(
5088  const VmaSuballocationList::iterator lhs,
5089  VkDeviceSize rhsSize) const
5090  {
5091  return lhs->size < rhsSize;
5092  }
5093 };
5094 
5096 // class VmaBlockMetadata
5097 
5098 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
5099  m_Size(0),
5100  m_FreeCount(0),
5101  m_SumFreeSize(0),
5102  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
5103  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
5104 {
5105 }
5106 
5107 VmaBlockMetadata::~VmaBlockMetadata()
5108 {
5109 }
5110 
5111 void VmaBlockMetadata::Init(VkDeviceSize size)
5112 {
5113  m_Size = size;
5114  m_FreeCount = 1;
5115  m_SumFreeSize = size;
5116 
5117  VmaSuballocation suballoc = {};
5118  suballoc.offset = 0;
5119  suballoc.size = size;
5120  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5121  suballoc.hAllocation = VK_NULL_HANDLE;
5122 
5123  m_Suballocations.push_back(suballoc);
5124  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
5125  --suballocItem;
5126  m_FreeSuballocationsBySize.push_back(suballocItem);
5127 }
5128 
5129 bool VmaBlockMetadata::Validate() const
5130 {
5131  if(m_Suballocations.empty())
5132  {
5133  return false;
5134  }
5135 
5136  // Expected offset of new suballocation as calculates from previous ones.
5137  VkDeviceSize calculatedOffset = 0;
5138  // Expected number of free suballocations as calculated from traversing their list.
5139  uint32_t calculatedFreeCount = 0;
5140  // Expected sum size of free suballocations as calculated from traversing their list.
5141  VkDeviceSize calculatedSumFreeSize = 0;
5142  // Expected number of free suballocations that should be registered in
5143  // m_FreeSuballocationsBySize calculated from traversing their list.
5144  size_t freeSuballocationsToRegister = 0;
5145  // True if previous visisted suballocation was free.
5146  bool prevFree = false;
5147 
5148  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5149  suballocItem != m_Suballocations.cend();
5150  ++suballocItem)
5151  {
5152  const VmaSuballocation& subAlloc = *suballocItem;
5153 
5154  // Actual offset of this suballocation doesn't match expected one.
5155  if(subAlloc.offset != calculatedOffset)
5156  {
5157  return false;
5158  }
5159 
5160  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
5161  // Two adjacent free suballocations are invalid. They should be merged.
5162  if(prevFree && currFree)
5163  {
5164  return false;
5165  }
5166 
5167  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
5168  {
5169  return false;
5170  }
5171 
5172  if(currFree)
5173  {
5174  calculatedSumFreeSize += subAlloc.size;
5175  ++calculatedFreeCount;
5176  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5177  {
5178  ++freeSuballocationsToRegister;
5179  }
5180  }
5181  else
5182  {
5183  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
5184  {
5185  return false;
5186  }
5187  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
5188  {
5189  return false;
5190  }
5191  }
5192 
5193  calculatedOffset += subAlloc.size;
5194  prevFree = currFree;
5195  }
5196 
5197  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
5198  // match expected one.
5199  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
5200  {
5201  return false;
5202  }
5203 
5204  VkDeviceSize lastSize = 0;
5205  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
5206  {
5207  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
5208 
5209  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
5210  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5211  {
5212  return false;
5213  }
5214  // They must be sorted by size ascending.
5215  if(suballocItem->size < lastSize)
5216  {
5217  return false;
5218  }
5219 
5220  lastSize = suballocItem->size;
5221  }
5222 
5223  // Check if totals match calculacted values.
5224  if(!ValidateFreeSuballocationList() ||
5225  (calculatedOffset != m_Size) ||
5226  (calculatedSumFreeSize != m_SumFreeSize) ||
5227  (calculatedFreeCount != m_FreeCount))
5228  {
5229  return false;
5230  }
5231 
5232  return true;
5233 }
5234 
5235 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
5236 {
5237  if(!m_FreeSuballocationsBySize.empty())
5238  {
5239  return m_FreeSuballocationsBySize.back()->size;
5240  }
5241  else
5242  {
5243  return 0;
5244  }
5245 }
5246 
5247 bool VmaBlockMetadata::IsEmpty() const
5248 {
5249  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
5250 }
5251 
5252 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
5253 {
5254  outInfo.blockCount = 1;
5255 
5256  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5257  outInfo.allocationCount = rangeCount - m_FreeCount;
5258  outInfo.unusedRangeCount = m_FreeCount;
5259 
5260  outInfo.unusedBytes = m_SumFreeSize;
5261  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
5262 
5263  outInfo.allocationSizeMin = UINT64_MAX;
5264  outInfo.allocationSizeMax = 0;
5265  outInfo.unusedRangeSizeMin = UINT64_MAX;
5266  outInfo.unusedRangeSizeMax = 0;
5267 
5268  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5269  suballocItem != m_Suballocations.cend();
5270  ++suballocItem)
5271  {
5272  const VmaSuballocation& suballoc = *suballocItem;
5273  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5274  {
5275  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
5276  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
5277  }
5278  else
5279  {
5280  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
5281  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
5282  }
5283  }
5284 }
5285 
5286 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
5287 {
5288  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5289 
5290  inoutStats.size += m_Size;
5291  inoutStats.unusedSize += m_SumFreeSize;
5292  inoutStats.allocationCount += rangeCount - m_FreeCount;
5293  inoutStats.unusedRangeCount += m_FreeCount;
5294  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
5295 }
5296 
5297 #if VMA_STATS_STRING_ENABLED
5298 
5299 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
5300 {
5301  json.BeginObject();
5302 
5303  json.WriteString("TotalBytes");
5304  json.WriteNumber(m_Size);
5305 
5306  json.WriteString("UnusedBytes");
5307  json.WriteNumber(m_SumFreeSize);
5308 
5309  json.WriteString("Allocations");
5310  json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);
5311 
5312  json.WriteString("UnusedRanges");
5313  json.WriteNumber(m_FreeCount);
5314 
5315  json.WriteString("Suballocations");
5316  json.BeginArray();
5317  size_t i = 0;
5318  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5319  suballocItem != m_Suballocations.cend();
5320  ++suballocItem, ++i)
5321  {
5322  json.BeginObject(true);
5323 
5324  json.WriteString("Type");
5325  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
5326 
5327  json.WriteString("Size");
5328  json.WriteNumber(suballocItem->size);
5329 
5330  json.WriteString("Offset");
5331  json.WriteNumber(suballocItem->offset);
5332 
5333  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5334  {
5335  const void* pUserData = suballocItem->hAllocation->GetUserData();
5336  if(pUserData != VMA_NULL)
5337  {
5338  json.WriteString("UserData");
5339  if(suballocItem->hAllocation->IsUserDataString())
5340  {
5341  json.WriteString((const char*)pUserData);
5342  }
5343  else
5344  {
5345  json.BeginString();
5346  json.ContinueString_Pointer(pUserData);
5347  json.EndString();
5348  }
5349  }
5350  }
5351 
5352  json.EndObject();
5353  }
5354  json.EndArray();
5355 
5356  json.EndObject();
5357 }
5358 
5359 #endif // #if VMA_STATS_STRING_ENABLED
5360 
5361 /*
5362 How many suitable free suballocations to analyze before choosing best one.
5363 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
5364  be chosen.
5365 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
5366  suballocations will be analized and best one will be chosen.
5367 - Any other value is also acceptable.
5368 */
5369 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
5370 
5371 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
5372 {
5373  VMA_ASSERT(IsEmpty());
5374  pAllocationRequest->offset = 0;
5375  pAllocationRequest->sumFreeSize = m_SumFreeSize;
5376  pAllocationRequest->sumItemSize = 0;
5377  pAllocationRequest->item = m_Suballocations.begin();
5378  pAllocationRequest->itemsToMakeLostCount = 0;
5379 }
5380 
5381 bool VmaBlockMetadata::CreateAllocationRequest(
5382  uint32_t currentFrameIndex,
5383  uint32_t frameInUseCount,
5384  VkDeviceSize bufferImageGranularity,
5385  VkDeviceSize allocSize,
5386  VkDeviceSize allocAlignment,
5387  VmaSuballocationType allocType,
5388  bool canMakeOtherLost,
5389  VmaAllocationRequest* pAllocationRequest)
5390 {
5391  VMA_ASSERT(allocSize > 0);
5392  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5393  VMA_ASSERT(pAllocationRequest != VMA_NULL);
5394  VMA_HEAVY_ASSERT(Validate());
5395 
5396  // There is not enough total free space in this block to fullfill the request: Early return.
5397  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
5398  {
5399  return false;
5400  }
5401 
5402  // New algorithm, efficiently searching freeSuballocationsBySize.
5403  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
5404  if(freeSuballocCount > 0)
5405  {
5406  if(VMA_BEST_FIT)
5407  {
5408  // Find first free suballocation with size not less than allocSize.
5409  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5410  m_FreeSuballocationsBySize.data(),
5411  m_FreeSuballocationsBySize.data() + freeSuballocCount,
5412  allocSize,
5413  VmaSuballocationItemSizeLess());
5414  size_t index = it - m_FreeSuballocationsBySize.data();
5415  for(; index < freeSuballocCount; ++index)
5416  {
5417  if(CheckAllocation(
5418  currentFrameIndex,
5419  frameInUseCount,
5420  bufferImageGranularity,
5421  allocSize,
5422  allocAlignment,
5423  allocType,
5424  m_FreeSuballocationsBySize[index],
5425  false, // canMakeOtherLost
5426  &pAllocationRequest->offset,
5427  &pAllocationRequest->itemsToMakeLostCount,
5428  &pAllocationRequest->sumFreeSize,
5429  &pAllocationRequest->sumItemSize))
5430  {
5431  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5432  return true;
5433  }
5434  }
5435  }
5436  else
5437  {
5438  // Search staring from biggest suballocations.
5439  for(size_t index = freeSuballocCount; index--; )
5440  {
5441  if(CheckAllocation(
5442  currentFrameIndex,
5443  frameInUseCount,
5444  bufferImageGranularity,
5445  allocSize,
5446  allocAlignment,
5447  allocType,
5448  m_FreeSuballocationsBySize[index],
5449  false, // canMakeOtherLost
5450  &pAllocationRequest->offset,
5451  &pAllocationRequest->itemsToMakeLostCount,
5452  &pAllocationRequest->sumFreeSize,
5453  &pAllocationRequest->sumItemSize))
5454  {
5455  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5456  return true;
5457  }
5458  }
5459  }
5460  }
5461 
5462  if(canMakeOtherLost)
5463  {
5464  // Brute-force algorithm. TODO: Come up with something better.
5465 
5466  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
5467  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
5468 
5469  VmaAllocationRequest tmpAllocRequest = {};
5470  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
5471  suballocIt != m_Suballocations.end();
5472  ++suballocIt)
5473  {
5474  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
5475  suballocIt->hAllocation->CanBecomeLost())
5476  {
5477  if(CheckAllocation(
5478  currentFrameIndex,
5479  frameInUseCount,
5480  bufferImageGranularity,
5481  allocSize,
5482  allocAlignment,
5483  allocType,
5484  suballocIt,
5485  canMakeOtherLost,
5486  &tmpAllocRequest.offset,
5487  &tmpAllocRequest.itemsToMakeLostCount,
5488  &tmpAllocRequest.sumFreeSize,
5489  &tmpAllocRequest.sumItemSize))
5490  {
5491  tmpAllocRequest.item = suballocIt;
5492 
5493  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
5494  {
5495  *pAllocationRequest = tmpAllocRequest;
5496  }
5497  }
5498  }
5499  }
5500 
5501  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
5502  {
5503  return true;
5504  }
5505  }
5506 
5507  return false;
5508 }
5509 
5510 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
5511  uint32_t currentFrameIndex,
5512  uint32_t frameInUseCount,
5513  VmaAllocationRequest* pAllocationRequest)
5514 {
5515  while(pAllocationRequest->itemsToMakeLostCount > 0)
5516  {
5517  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
5518  {
5519  ++pAllocationRequest->item;
5520  }
5521  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5522  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
5523  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
5524  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5525  {
5526  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
5527  --pAllocationRequest->itemsToMakeLostCount;
5528  }
5529  else
5530  {
5531  return false;
5532  }
5533  }
5534 
5535  VMA_HEAVY_ASSERT(Validate());
5536  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5537  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
5538 
5539  return true;
5540 }
5541 
5542 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5543 {
5544  uint32_t lostAllocationCount = 0;
5545  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
5546  it != m_Suballocations.end();
5547  ++it)
5548  {
5549  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
5550  it->hAllocation->CanBecomeLost() &&
5551  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5552  {
5553  it = FreeSuballocation(it);
5554  ++lostAllocationCount;
5555  }
5556  }
5557  return lostAllocationCount;
5558 }
5559 
5560 void VmaBlockMetadata::Alloc(
5561  const VmaAllocationRequest& request,
5562  VmaSuballocationType type,
5563  VkDeviceSize allocSize,
5564  VmaAllocation hAllocation)
5565 {
5566  VMA_ASSERT(request.item != m_Suballocations.end());
5567  VmaSuballocation& suballoc = *request.item;
5568  // Given suballocation is a free block.
5569  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5570  // Given offset is inside this suballocation.
5571  VMA_ASSERT(request.offset >= suballoc.offset);
5572  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
5573  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
5574  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
5575 
5576  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
5577  // it to become used.
5578  UnregisterFreeSuballocation(request.item);
5579 
5580  suballoc.offset = request.offset;
5581  suballoc.size = allocSize;
5582  suballoc.type = type;
5583  suballoc.hAllocation = hAllocation;
5584 
5585  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
5586  if(paddingEnd)
5587  {
5588  VmaSuballocation paddingSuballoc = {};
5589  paddingSuballoc.offset = request.offset + allocSize;
5590  paddingSuballoc.size = paddingEnd;
5591  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5592  VmaSuballocationList::iterator next = request.item;
5593  ++next;
5594  const VmaSuballocationList::iterator paddingEndItem =
5595  m_Suballocations.insert(next, paddingSuballoc);
5596  RegisterFreeSuballocation(paddingEndItem);
5597  }
5598 
5599  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5600  if(paddingBegin)
5601  {
5602  VmaSuballocation paddingSuballoc = {};
5603  paddingSuballoc.offset = request.offset - paddingBegin;
5604  paddingSuballoc.size = paddingBegin;
5605  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5606  const VmaSuballocationList::iterator paddingBeginItem =
5607  m_Suballocations.insert(request.item, paddingSuballoc);
5608  RegisterFreeSuballocation(paddingBeginItem);
5609  }
5610 
5611  // Update totals.
5612  m_FreeCount = m_FreeCount - 1;
5613  if(paddingBegin > 0)
5614  {
5615  ++m_FreeCount;
5616  }
5617  if(paddingEnd > 0)
5618  {
5619  ++m_FreeCount;
5620  }
5621  m_SumFreeSize -= allocSize;
5622 }
5623 
5624 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5625 {
5626  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5627  suballocItem != m_Suballocations.end();
5628  ++suballocItem)
5629  {
5630  VmaSuballocation& suballoc = *suballocItem;
5631  if(suballoc.hAllocation == allocation)
5632  {
5633  FreeSuballocation(suballocItem);
5634  VMA_HEAVY_ASSERT(Validate());
5635  return;
5636  }
5637  }
5638  VMA_ASSERT(0 && "Not found!");
5639 }
5640 
5641 void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)
5642 {
5643  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5644  suballocItem != m_Suballocations.end();
5645  ++suballocItem)
5646  {
5647  VmaSuballocation& suballoc = *suballocItem;
5648  if(suballoc.offset == offset)
5649  {
5650  FreeSuballocation(suballocItem);
5651  return;
5652  }
5653  }
5654  VMA_ASSERT(0 && "Not found!");
5655 }
5656 
5657 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5658 {
5659  VkDeviceSize lastSize = 0;
5660  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5661  {
5662  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5663 
5664  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5665  {
5666  VMA_ASSERT(0);
5667  return false;
5668  }
5669  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5670  {
5671  VMA_ASSERT(0);
5672  return false;
5673  }
5674  if(it->size < lastSize)
5675  {
5676  VMA_ASSERT(0);
5677  return false;
5678  }
5679 
5680  lastSize = it->size;
5681  }
5682  return true;
5683 }
5684 
5685 bool VmaBlockMetadata::CheckAllocation(
5686  uint32_t currentFrameIndex,
5687  uint32_t frameInUseCount,
5688  VkDeviceSize bufferImageGranularity,
5689  VkDeviceSize allocSize,
5690  VkDeviceSize allocAlignment,
5691  VmaSuballocationType allocType,
5692  VmaSuballocationList::const_iterator suballocItem,
5693  bool canMakeOtherLost,
5694  VkDeviceSize* pOffset,
5695  size_t* itemsToMakeLostCount,
5696  VkDeviceSize* pSumFreeSize,
5697  VkDeviceSize* pSumItemSize) const
5698 {
5699  VMA_ASSERT(allocSize > 0);
5700  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5701  VMA_ASSERT(suballocItem != m_Suballocations.cend());
5702  VMA_ASSERT(pOffset != VMA_NULL);
5703 
5704  *itemsToMakeLostCount = 0;
5705  *pSumFreeSize = 0;
5706  *pSumItemSize = 0;
5707 
5708  if(canMakeOtherLost)
5709  {
5710  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5711  {
5712  *pSumFreeSize = suballocItem->size;
5713  }
5714  else
5715  {
5716  if(suballocItem->hAllocation->CanBecomeLost() &&
5717  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5718  {
5719  ++*itemsToMakeLostCount;
5720  *pSumItemSize = suballocItem->size;
5721  }
5722  else
5723  {
5724  return false;
5725  }
5726  }
5727 
5728  // Remaining size is too small for this request: Early return.
5729  if(m_Size - suballocItem->offset < allocSize)
5730  {
5731  return false;
5732  }
5733 
5734  // Start from offset equal to beginning of this suballocation.
5735  *pOffset = suballocItem->offset;
5736 
5737  // Apply VMA_DEBUG_MARGIN at the beginning.
5738  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5739  {
5740  *pOffset += VMA_DEBUG_MARGIN;
5741  }
5742 
5743  // Apply alignment.
5744  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5745  *pOffset = VmaAlignUp(*pOffset, alignment);
5746 
5747  // Check previous suballocations for BufferImageGranularity conflicts.
5748  // Make bigger alignment if necessary.
5749  if(bufferImageGranularity > 1)
5750  {
5751  bool bufferImageGranularityConflict = false;
5752  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5753  while(prevSuballocItem != m_Suballocations.cbegin())
5754  {
5755  --prevSuballocItem;
5756  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5757  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5758  {
5759  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5760  {
5761  bufferImageGranularityConflict = true;
5762  break;
5763  }
5764  }
5765  else
5766  // Already on previous page.
5767  break;
5768  }
5769  if(bufferImageGranularityConflict)
5770  {
5771  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5772  }
5773  }
5774 
5775  // Now that we have final *pOffset, check if we are past suballocItem.
5776  // If yes, return false - this function should be called for another suballocItem as starting point.
5777  if(*pOffset >= suballocItem->offset + suballocItem->size)
5778  {
5779  return false;
5780  }
5781 
5782  // Calculate padding at the beginning based on current offset.
5783  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
5784 
5785  // Calculate required margin at the end if this is not last suballocation.
5786  VmaSuballocationList::const_iterator next = suballocItem;
5787  ++next;
5788  const VkDeviceSize requiredEndMargin =
5789  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5790 
5791  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
5792  // Another early return check.
5793  if(suballocItem->offset + totalSize > m_Size)
5794  {
5795  return false;
5796  }
5797 
5798  // Advance lastSuballocItem until desired size is reached.
5799  // Update itemsToMakeLostCount.
5800  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
5801  if(totalSize > suballocItem->size)
5802  {
5803  VkDeviceSize remainingSize = totalSize - suballocItem->size;
5804  while(remainingSize > 0)
5805  {
5806  ++lastSuballocItem;
5807  if(lastSuballocItem == m_Suballocations.cend())
5808  {
5809  return false;
5810  }
5811  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5812  {
5813  *pSumFreeSize += lastSuballocItem->size;
5814  }
5815  else
5816  {
5817  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
5818  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
5819  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5820  {
5821  ++*itemsToMakeLostCount;
5822  *pSumItemSize += lastSuballocItem->size;
5823  }
5824  else
5825  {
5826  return false;
5827  }
5828  }
5829  remainingSize = (lastSuballocItem->size < remainingSize) ?
5830  remainingSize - lastSuballocItem->size : 0;
5831  }
5832  }
5833 
5834  // Check next suballocations for BufferImageGranularity conflicts.
5835  // If conflict exists, we must mark more allocations lost or fail.
5836  if(bufferImageGranularity > 1)
5837  {
5838  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
5839  ++nextSuballocItem;
5840  while(nextSuballocItem != m_Suballocations.cend())
5841  {
5842  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5843  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5844  {
5845  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5846  {
5847  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
5848  if(nextSuballoc.hAllocation->CanBecomeLost() &&
5849  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5850  {
5851  ++*itemsToMakeLostCount;
5852  }
5853  else
5854  {
5855  return false;
5856  }
5857  }
5858  }
5859  else
5860  {
5861  // Already on next page.
5862  break;
5863  }
5864  ++nextSuballocItem;
5865  }
5866  }
5867  }
5868  else
5869  {
5870  const VmaSuballocation& suballoc = *suballocItem;
5871  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5872 
5873  *pSumFreeSize = suballoc.size;
5874 
5875  // Size of this suballocation is too small for this request: Early return.
5876  if(suballoc.size < allocSize)
5877  {
5878  return false;
5879  }
5880 
5881  // Start from offset equal to beginning of this suballocation.
5882  *pOffset = suballoc.offset;
5883 
5884  // Apply VMA_DEBUG_MARGIN at the beginning.
5885  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5886  {
5887  *pOffset += VMA_DEBUG_MARGIN;
5888  }
5889 
5890  // Apply alignment.
5891  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5892  *pOffset = VmaAlignUp(*pOffset, alignment);
5893 
5894  // Check previous suballocations for BufferImageGranularity conflicts.
5895  // Make bigger alignment if necessary.
5896  if(bufferImageGranularity > 1)
5897  {
5898  bool bufferImageGranularityConflict = false;
5899  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5900  while(prevSuballocItem != m_Suballocations.cbegin())
5901  {
5902  --prevSuballocItem;
5903  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5904  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5905  {
5906  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5907  {
5908  bufferImageGranularityConflict = true;
5909  break;
5910  }
5911  }
5912  else
5913  // Already on previous page.
5914  break;
5915  }
5916  if(bufferImageGranularityConflict)
5917  {
5918  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5919  }
5920  }
5921 
5922  // Calculate padding at the beginning based on current offset.
5923  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
5924 
5925  // Calculate required margin at the end if this is not last suballocation.
5926  VmaSuballocationList::const_iterator next = suballocItem;
5927  ++next;
5928  const VkDeviceSize requiredEndMargin =
5929  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5930 
5931  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
5932  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
5933  {
5934  return false;
5935  }
5936 
5937  // Check next suballocations for BufferImageGranularity conflicts.
5938  // If conflict exists, allocation cannot be made here.
5939  if(bufferImageGranularity > 1)
5940  {
5941  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
5942  ++nextSuballocItem;
5943  while(nextSuballocItem != m_Suballocations.cend())
5944  {
5945  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5946  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5947  {
5948  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5949  {
5950  return false;
5951  }
5952  }
5953  else
5954  {
5955  // Already on next page.
5956  break;
5957  }
5958  ++nextSuballocItem;
5959  }
5960  }
5961  }
5962 
5963  // All tests passed: Success. pOffset is already filled.
5964  return true;
5965 }
5966 
5967 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
5968 {
5969  VMA_ASSERT(item != m_Suballocations.end());
5970  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5971 
5972  VmaSuballocationList::iterator nextItem = item;
5973  ++nextItem;
5974  VMA_ASSERT(nextItem != m_Suballocations.end());
5975  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
5976 
5977  item->size += nextItem->size;
5978  --m_FreeCount;
5979  m_Suballocations.erase(nextItem);
5980 }
5981 
5982 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
5983 {
5984  // Change this suballocation to be marked as free.
5985  VmaSuballocation& suballoc = *suballocItem;
5986  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5987  suballoc.hAllocation = VK_NULL_HANDLE;
5988 
5989  // Update totals.
5990  ++m_FreeCount;
5991  m_SumFreeSize += suballoc.size;
5992 
5993  // Merge with previous and/or next suballocation if it's also free.
5994  bool mergeWithNext = false;
5995  bool mergeWithPrev = false;
5996 
5997  VmaSuballocationList::iterator nextItem = suballocItem;
5998  ++nextItem;
5999  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
6000  {
6001  mergeWithNext = true;
6002  }
6003 
6004  VmaSuballocationList::iterator prevItem = suballocItem;
6005  if(suballocItem != m_Suballocations.begin())
6006  {
6007  --prevItem;
6008  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6009  {
6010  mergeWithPrev = true;
6011  }
6012  }
6013 
6014  if(mergeWithNext)
6015  {
6016  UnregisterFreeSuballocation(nextItem);
6017  MergeFreeWithNext(suballocItem);
6018  }
6019 
6020  if(mergeWithPrev)
6021  {
6022  UnregisterFreeSuballocation(prevItem);
6023  MergeFreeWithNext(prevItem);
6024  RegisterFreeSuballocation(prevItem);
6025  return prevItem;
6026  }
6027  else
6028  {
6029  RegisterFreeSuballocation(suballocItem);
6030  return suballocItem;
6031  }
6032 }
6033 
6034 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
6035 {
6036  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6037  VMA_ASSERT(item->size > 0);
6038 
6039  // You may want to enable this validation at the beginning or at the end of
6040  // this function, depending on what do you want to check.
6041  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6042 
6043  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6044  {
6045  if(m_FreeSuballocationsBySize.empty())
6046  {
6047  m_FreeSuballocationsBySize.push_back(item);
6048  }
6049  else
6050  {
6051  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
6052  }
6053  }
6054 
6055  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6056 }
6057 
6058 
6059 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
6060 {
6061  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6062  VMA_ASSERT(item->size > 0);
6063 
6064  // You may want to enable this validation at the beginning or at the end of
6065  // this function, depending on what do you want to check.
6066  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6067 
6068  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6069  {
6070  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6071  m_FreeSuballocationsBySize.data(),
6072  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
6073  item,
6074  VmaSuballocationItemSizeLess());
6075  for(size_t index = it - m_FreeSuballocationsBySize.data();
6076  index < m_FreeSuballocationsBySize.size();
6077  ++index)
6078  {
6079  if(m_FreeSuballocationsBySize[index] == item)
6080  {
6081  VmaVectorRemove(m_FreeSuballocationsBySize, index);
6082  return;
6083  }
6084  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
6085  }
6086  VMA_ASSERT(0 && "Not found.");
6087  }
6088 
6089  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6090 }
6091 
6093 // class VmaDeviceMemoryMapping
6094 
6095 VmaDeviceMemoryMapping::VmaDeviceMemoryMapping() :
6096  m_MapCount(0),
6097  m_pMappedData(VMA_NULL)
6098 {
6099 }
6100 
6101 VmaDeviceMemoryMapping::~VmaDeviceMemoryMapping()
6102 {
6103  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
6104 }
6105 
6106 VkResult VmaDeviceMemoryMapping::Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData)
6107 {
6108  if(count == 0)
6109  {
6110  return VK_SUCCESS;
6111  }
6112 
6113  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6114  if(m_MapCount != 0)
6115  {
6116  m_MapCount += count;
6117  VMA_ASSERT(m_pMappedData != VMA_NULL);
6118  if(ppData != VMA_NULL)
6119  {
6120  *ppData = m_pMappedData;
6121  }
6122  return VK_SUCCESS;
6123  }
6124  else
6125  {
6126  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6127  hAllocator->m_hDevice,
6128  hMemory,
6129  0, // offset
6130  VK_WHOLE_SIZE,
6131  0, // flags
6132  &m_pMappedData);
6133  if(result == VK_SUCCESS)
6134  {
6135  if(ppData != VMA_NULL)
6136  {
6137  *ppData = m_pMappedData;
6138  }
6139  m_MapCount = count;
6140  }
6141  return result;
6142  }
6143 }
6144 
6145 void VmaDeviceMemoryMapping::Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count)
6146 {
6147  if(count == 0)
6148  {
6149  return;
6150  }
6151 
6152  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6153  if(m_MapCount >= count)
6154  {
6155  m_MapCount -= count;
6156  if(m_MapCount == 0)
6157  {
6158  m_pMappedData = VMA_NULL;
6159  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, hMemory);
6160  }
6161  }
6162  else
6163  {
6164  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
6165  }
6166 }
6167 
6169 // class VmaDeviceMemoryBlock
6170 
6171 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
6172  m_MemoryTypeIndex(UINT32_MAX),
6173  m_hMemory(VK_NULL_HANDLE),
6174  m_Metadata(hAllocator)
6175 {
6176 }
6177 
6178 void VmaDeviceMemoryBlock::Init(
6179  uint32_t newMemoryTypeIndex,
6180  VkDeviceMemory newMemory,
6181  VkDeviceSize newSize)
6182 {
6183  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6184 
6185  m_MemoryTypeIndex = newMemoryTypeIndex;
6186  m_hMemory = newMemory;
6187 
6188  m_Metadata.Init(newSize);
6189 }
6190 
6191 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
6192 {
6193  // This is the most important assert in the entire library.
6194  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
6195  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
6196 
6197  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
6198  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
6199  m_hMemory = VK_NULL_HANDLE;
6200 }
6201 
6202 bool VmaDeviceMemoryBlock::Validate() const
6203 {
6204  if((m_hMemory == VK_NULL_HANDLE) ||
6205  (m_Metadata.GetSize() == 0))
6206  {
6207  return false;
6208  }
6209 
6210  return m_Metadata.Validate();
6211 }
6212 
6213 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
6214 {
6215  return m_Mapping.Map(hAllocator, m_hMemory, count, ppData);
6216 }
6217 
6218 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
6219 {
6220  m_Mapping.Unmap(hAllocator, m_hMemory, count);
6221 }
6222 
6223 static void InitStatInfo(VmaStatInfo& outInfo)
6224 {
6225  memset(&outInfo, 0, sizeof(outInfo));
6226  outInfo.allocationSizeMin = UINT64_MAX;
6227  outInfo.unusedRangeSizeMin = UINT64_MAX;
6228 }
6229 
6230 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
6231 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
6232 {
6233  inoutInfo.blockCount += srcInfo.blockCount;
6234  inoutInfo.allocationCount += srcInfo.allocationCount;
6235  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
6236  inoutInfo.usedBytes += srcInfo.usedBytes;
6237  inoutInfo.unusedBytes += srcInfo.unusedBytes;
6238  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
6239  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
6240  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
6241  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
6242 }
6243 
6244 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
6245 {
6246  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
6247  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
6248  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
6249  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
6250 }
6251 
6252 VmaPool_T::VmaPool_T(
6253  VmaAllocator hAllocator,
6254  const VmaPoolCreateInfo& createInfo) :
6255  m_BlockVector(
6256  hAllocator,
6257  createInfo.memoryTypeIndex,
6258  createInfo.blockSize,
6259  createInfo.minBlockCount,
6260  createInfo.maxBlockCount,
6261  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
6262  createInfo.frameInUseCount,
6263  true) // isCustomPool
6264 {
6265 }
6266 
6267 VmaPool_T::~VmaPool_T()
6268 {
6269 }
6270 
6271 #if VMA_STATS_STRING_ENABLED
6272 
6273 #endif // #if VMA_STATS_STRING_ENABLED
6274 
6275 VmaBlockVector::VmaBlockVector(
6276  VmaAllocator hAllocator,
6277  uint32_t memoryTypeIndex,
6278  VkDeviceSize preferredBlockSize,
6279  size_t minBlockCount,
6280  size_t maxBlockCount,
6281  VkDeviceSize bufferImageGranularity,
6282  uint32_t frameInUseCount,
6283  bool isCustomPool) :
6284  m_hAllocator(hAllocator),
6285  m_MemoryTypeIndex(memoryTypeIndex),
6286  m_PreferredBlockSize(preferredBlockSize),
6287  m_MinBlockCount(minBlockCount),
6288  m_MaxBlockCount(maxBlockCount),
6289  m_BufferImageGranularity(bufferImageGranularity),
6290  m_FrameInUseCount(frameInUseCount),
6291  m_IsCustomPool(isCustomPool),
6292  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
6293  m_HasEmptyBlock(false),
6294  m_pDefragmentator(VMA_NULL)
6295 {
6296 }
6297 
6298 VmaBlockVector::~VmaBlockVector()
6299 {
6300  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
6301 
6302  for(size_t i = m_Blocks.size(); i--; )
6303  {
6304  m_Blocks[i]->Destroy(m_hAllocator);
6305  vma_delete(m_hAllocator, m_Blocks[i]);
6306  }
6307 }
6308 
6309 VkResult VmaBlockVector::CreateMinBlocks()
6310 {
6311  for(size_t i = 0; i < m_MinBlockCount; ++i)
6312  {
6313  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
6314  if(res != VK_SUCCESS)
6315  {
6316  return res;
6317  }
6318  }
6319  return VK_SUCCESS;
6320 }
6321 
6322 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
6323 {
6324  pStats->size = 0;
6325  pStats->unusedSize = 0;
6326  pStats->allocationCount = 0;
6327  pStats->unusedRangeCount = 0;
6328  pStats->unusedRangeSizeMax = 0;
6329 
6330  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6331 
6332  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6333  {
6334  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6335  VMA_ASSERT(pBlock);
6336  VMA_HEAVY_ASSERT(pBlock->Validate());
6337  pBlock->m_Metadata.AddPoolStats(*pStats);
6338  }
6339 }
6340 
6341 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
6342 
6343 VkResult VmaBlockVector::Allocate(
6344  VmaPool hCurrentPool,
6345  uint32_t currentFrameIndex,
6346  const VkMemoryRequirements& vkMemReq,
6347  const VmaAllocationCreateInfo& createInfo,
6348  VmaSuballocationType suballocType,
6349  VmaAllocation* pAllocation)
6350 {
6351  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
6352  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
6353 
6354  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6355 
6356  // 1. Search existing allocations. Try to allocate without making other allocations lost.
6357  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6358  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6359  {
6360  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6361  VMA_ASSERT(pCurrBlock);
6362  VmaAllocationRequest currRequest = {};
6363  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6364  currentFrameIndex,
6365  m_FrameInUseCount,
6366  m_BufferImageGranularity,
6367  vkMemReq.size,
6368  vkMemReq.alignment,
6369  suballocType,
6370  false, // canMakeOtherLost
6371  &currRequest))
6372  {
6373  // Allocate from pCurrBlock.
6374  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
6375 
6376  if(mapped)
6377  {
6378  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
6379  if(res != VK_SUCCESS)
6380  {
6381  return res;
6382  }
6383  }
6384 
6385  // We no longer have an empty Allocation.
6386  if(pCurrBlock->m_Metadata.IsEmpty())
6387  {
6388  m_HasEmptyBlock = false;
6389  }
6390 
6391  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6392  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
6393  (*pAllocation)->InitBlockAllocation(
6394  hCurrentPool,
6395  pCurrBlock,
6396  currRequest.offset,
6397  vkMemReq.alignment,
6398  vkMemReq.size,
6399  suballocType,
6400  mapped,
6401  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6402  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
6403  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6404  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6405  return VK_SUCCESS;
6406  }
6407  }
6408 
6409  const bool canCreateNewBlock =
6410  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
6411  (m_Blocks.size() < m_MaxBlockCount);
6412 
6413  // 2. Try to create new block.
6414  if(canCreateNewBlock)
6415  {
6416  // Calculate optimal size for new block.
6417  VkDeviceSize newBlockSize = m_PreferredBlockSize;
6418  uint32_t newBlockSizeShift = 0;
6419  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
6420 
6421  // Allocating blocks of other sizes is allowed only in default pools.
6422  // In custom pools block size is fixed.
6423  if(m_IsCustomPool == false)
6424  {
6425  // Allocate 1/8, 1/4, 1/2 as first blocks.
6426  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
6427  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
6428  {
6429  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6430  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= vkMemReq.size * 2)
6431  {
6432  newBlockSize = smallerNewBlockSize;
6433  ++newBlockSizeShift;
6434  }
6435  else
6436  {
6437  break;
6438  }
6439  }
6440  }
6441 
6442  size_t newBlockIndex = 0;
6443  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
6444  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
6445  if(m_IsCustomPool == false)
6446  {
6447  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
6448  {
6449  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6450  if(smallerNewBlockSize >= vkMemReq.size)
6451  {
6452  newBlockSize = smallerNewBlockSize;
6453  ++newBlockSizeShift;
6454  res = CreateBlock(newBlockSize, &newBlockIndex);
6455  }
6456  else
6457  {
6458  break;
6459  }
6460  }
6461  }
6462 
6463  if(res == VK_SUCCESS)
6464  {
6465  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
6466  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
6467 
6468  if(mapped)
6469  {
6470  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
6471  if(res != VK_SUCCESS)
6472  {
6473  return res;
6474  }
6475  }
6476 
6477  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
6478  VmaAllocationRequest allocRequest;
6479  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
6480  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6481  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
6482  (*pAllocation)->InitBlockAllocation(
6483  hCurrentPool,
6484  pBlock,
6485  allocRequest.offset,
6486  vkMemReq.alignment,
6487  vkMemReq.size,
6488  suballocType,
6489  mapped,
6490  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6491  VMA_HEAVY_ASSERT(pBlock->Validate());
6492  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
6493  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6494  return VK_SUCCESS;
6495  }
6496  }
6497 
6498  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
6499 
6500  // 3. Try to allocate from existing blocks with making other allocations lost.
6501  if(canMakeOtherLost)
6502  {
6503  uint32_t tryIndex = 0;
6504  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
6505  {
6506  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
6507  VmaAllocationRequest bestRequest = {};
6508  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
6509 
6510  // 1. Search existing allocations.
6511  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6512  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6513  {
6514  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6515  VMA_ASSERT(pCurrBlock);
6516  VmaAllocationRequest currRequest = {};
6517  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6518  currentFrameIndex,
6519  m_FrameInUseCount,
6520  m_BufferImageGranularity,
6521  vkMemReq.size,
6522  vkMemReq.alignment,
6523  suballocType,
6524  canMakeOtherLost,
6525  &currRequest))
6526  {
6527  const VkDeviceSize currRequestCost = currRequest.CalcCost();
6528  if(pBestRequestBlock == VMA_NULL ||
6529  currRequestCost < bestRequestCost)
6530  {
6531  pBestRequestBlock = pCurrBlock;
6532  bestRequest = currRequest;
6533  bestRequestCost = currRequestCost;
6534 
6535  if(bestRequestCost == 0)
6536  {
6537  break;
6538  }
6539  }
6540  }
6541  }
6542 
6543  if(pBestRequestBlock != VMA_NULL)
6544  {
6545  if(mapped)
6546  {
6547  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
6548  if(res != VK_SUCCESS)
6549  {
6550  return res;
6551  }
6552  }
6553 
6554  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
6555  currentFrameIndex,
6556  m_FrameInUseCount,
6557  &bestRequest))
6558  {
6559  // We no longer have an empty Allocation.
6560  if(pBestRequestBlock->m_Metadata.IsEmpty())
6561  {
6562  m_HasEmptyBlock = false;
6563  }
6564  // Allocate from this pBlock.
6565  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6566  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
6567  (*pAllocation)->InitBlockAllocation(
6568  hCurrentPool,
6569  pBestRequestBlock,
6570  bestRequest.offset,
6571  vkMemReq.alignment,
6572  vkMemReq.size,
6573  suballocType,
6574  mapped,
6575  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6576  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
6577  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6578  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6579  return VK_SUCCESS;
6580  }
6581  // else: Some allocations must have been touched while we are here. Next try.
6582  }
6583  else
6584  {
6585  // Could not find place in any of the blocks - break outer loop.
6586  break;
6587  }
6588  }
6589  /* Maximum number of tries exceeded - a very unlike event when many other
6590  threads are simultaneously touching allocations making it impossible to make
6591  lost at the same time as we try to allocate. */
6592  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
6593  {
6594  return VK_ERROR_TOO_MANY_OBJECTS;
6595  }
6596  }
6597 
6598  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6599 }
6600 
6601 void VmaBlockVector::Free(
6602  VmaAllocation hAllocation)
6603 {
6604  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
6605 
6606  // Scope for lock.
6607  {
6608  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6609 
6610  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
6611 
6612  if(hAllocation->IsPersistentMap())
6613  {
6614  pBlock->m_Mapping.Unmap(m_hAllocator, pBlock->m_hMemory, 1);
6615  }
6616 
6617  pBlock->m_Metadata.Free(hAllocation);
6618  VMA_HEAVY_ASSERT(pBlock->Validate());
6619 
6620  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
6621 
6622  // pBlock became empty after this deallocation.
6623  if(pBlock->m_Metadata.IsEmpty())
6624  {
6625  // Already has empty Allocation. We don't want to have two, so delete this one.
6626  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
6627  {
6628  pBlockToDelete = pBlock;
6629  Remove(pBlock);
6630  }
6631  // We now have first empty Allocation.
6632  else
6633  {
6634  m_HasEmptyBlock = true;
6635  }
6636  }
6637  // pBlock didn't become empty, but we have another empty block - find and free that one.
6638  // (This is optional, heuristics.)
6639  else if(m_HasEmptyBlock)
6640  {
6641  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6642  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6643  {
6644  pBlockToDelete = pLastBlock;
6645  m_Blocks.pop_back();
6646  m_HasEmptyBlock = false;
6647  }
6648  }
6649 
6650  IncrementallySortBlocks();
6651  }
6652 
6653  // Destruction of a free Allocation. Deferred until this point, outside of mutex
6654  // lock, for performance reason.
6655  if(pBlockToDelete != VMA_NULL)
6656  {
6657  VMA_DEBUG_LOG(" Deleted empty allocation");
6658  pBlockToDelete->Destroy(m_hAllocator);
6659  vma_delete(m_hAllocator, pBlockToDelete);
6660  }
6661 }
6662 
6663 size_t VmaBlockVector::CalcMaxBlockSize() const
6664 {
6665  size_t result = 0;
6666  for(size_t i = m_Blocks.size(); i--; )
6667  {
6668  result = VMA_MAX((uint64_t)result, (uint64_t)m_Blocks[i]->m_Metadata.GetSize());
6669  if(result >= m_PreferredBlockSize)
6670  {
6671  break;
6672  }
6673  }
6674  return result;
6675 }
6676 
6677 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
6678 {
6679  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6680  {
6681  if(m_Blocks[blockIndex] == pBlock)
6682  {
6683  VmaVectorRemove(m_Blocks, blockIndex);
6684  return;
6685  }
6686  }
6687  VMA_ASSERT(0);
6688 }
6689 
6690 void VmaBlockVector::IncrementallySortBlocks()
6691 {
6692  // Bubble sort only until first swap.
6693  for(size_t i = 1; i < m_Blocks.size(); ++i)
6694  {
6695  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
6696  {
6697  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
6698  return;
6699  }
6700  }
6701 }
6702 
6703 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
6704 {
6705  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6706  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
6707  allocInfo.allocationSize = blockSize;
6708  VkDeviceMemory mem = VK_NULL_HANDLE;
6709  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
6710  if(res < 0)
6711  {
6712  return res;
6713  }
6714 
6715  // New VkDeviceMemory successfully created.
6716 
6717  // Create new Allocation for it.
6718  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
6719  pBlock->Init(
6720  m_MemoryTypeIndex,
6721  mem,
6722  allocInfo.allocationSize);
6723 
6724  m_Blocks.push_back(pBlock);
6725  if(pNewBlockIndex != VMA_NULL)
6726  {
6727  *pNewBlockIndex = m_Blocks.size() - 1;
6728  }
6729 
6730  return VK_SUCCESS;
6731 }
6732 
6733 #if VMA_STATS_STRING_ENABLED
6734 
6735 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
6736 {
6737  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6738 
6739  json.BeginObject();
6740 
6741  if(m_IsCustomPool)
6742  {
6743  json.WriteString("MemoryTypeIndex");
6744  json.WriteNumber(m_MemoryTypeIndex);
6745 
6746  json.WriteString("BlockSize");
6747  json.WriteNumber(m_PreferredBlockSize);
6748 
6749  json.WriteString("BlockCount");
6750  json.BeginObject(true);
6751  if(m_MinBlockCount > 0)
6752  {
6753  json.WriteString("Min");
6754  json.WriteNumber((uint64_t)m_MinBlockCount);
6755  }
6756  if(m_MaxBlockCount < SIZE_MAX)
6757  {
6758  json.WriteString("Max");
6759  json.WriteNumber((uint64_t)m_MaxBlockCount);
6760  }
6761  json.WriteString("Cur");
6762  json.WriteNumber((uint64_t)m_Blocks.size());
6763  json.EndObject();
6764 
6765  if(m_FrameInUseCount > 0)
6766  {
6767  json.WriteString("FrameInUseCount");
6768  json.WriteNumber(m_FrameInUseCount);
6769  }
6770  }
6771  else
6772  {
6773  json.WriteString("PreferredBlockSize");
6774  json.WriteNumber(m_PreferredBlockSize);
6775  }
6776 
6777  json.WriteString("Blocks");
6778  json.BeginArray();
6779  for(size_t i = 0; i < m_Blocks.size(); ++i)
6780  {
6781  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
6782  }
6783  json.EndArray();
6784 
6785  json.EndObject();
6786 }
6787 
6788 #endif // #if VMA_STATS_STRING_ENABLED
6789 
6790 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
6791  VmaAllocator hAllocator,
6792  uint32_t currentFrameIndex)
6793 {
6794  if(m_pDefragmentator == VMA_NULL)
6795  {
6796  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
6797  hAllocator,
6798  this,
6799  currentFrameIndex);
6800  }
6801 
6802  return m_pDefragmentator;
6803 }
6804 
6805 VkResult VmaBlockVector::Defragment(
6806  VmaDefragmentationStats* pDefragmentationStats,
6807  VkDeviceSize& maxBytesToMove,
6808  uint32_t& maxAllocationsToMove)
6809 {
6810  if(m_pDefragmentator == VMA_NULL)
6811  {
6812  return VK_SUCCESS;
6813  }
6814 
6815  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6816 
6817  // Defragment.
6818  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
6819 
6820  // Accumulate statistics.
6821  if(pDefragmentationStats != VMA_NULL)
6822  {
6823  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
6824  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
6825  pDefragmentationStats->bytesMoved += bytesMoved;
6826  pDefragmentationStats->allocationsMoved += allocationsMoved;
6827  VMA_ASSERT(bytesMoved <= maxBytesToMove);
6828  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
6829  maxBytesToMove -= bytesMoved;
6830  maxAllocationsToMove -= allocationsMoved;
6831  }
6832 
6833  // Free empty blocks.
6834  m_HasEmptyBlock = false;
6835  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
6836  {
6837  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
6838  if(pBlock->m_Metadata.IsEmpty())
6839  {
6840  if(m_Blocks.size() > m_MinBlockCount)
6841  {
6842  if(pDefragmentationStats != VMA_NULL)
6843  {
6844  ++pDefragmentationStats->deviceMemoryBlocksFreed;
6845  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
6846  }
6847 
6848  VmaVectorRemove(m_Blocks, blockIndex);
6849  pBlock->Destroy(m_hAllocator);
6850  vma_delete(m_hAllocator, pBlock);
6851  }
6852  else
6853  {
6854  m_HasEmptyBlock = true;
6855  }
6856  }
6857  }
6858 
6859  return result;
6860 }
6861 
6862 void VmaBlockVector::DestroyDefragmentator()
6863 {
6864  if(m_pDefragmentator != VMA_NULL)
6865  {
6866  vma_delete(m_hAllocator, m_pDefragmentator);
6867  m_pDefragmentator = VMA_NULL;
6868  }
6869 }
6870 
6871 void VmaBlockVector::MakePoolAllocationsLost(
6872  uint32_t currentFrameIndex,
6873  size_t* pLostAllocationCount)
6874 {
6875  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6876  size_t lostAllocationCount = 0;
6877  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6878  {
6879  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6880  VMA_ASSERT(pBlock);
6881  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
6882  }
6883  if(pLostAllocationCount != VMA_NULL)
6884  {
6885  *pLostAllocationCount = lostAllocationCount;
6886  }
6887 }
6888 
6889 void VmaBlockVector::AddStats(VmaStats* pStats)
6890 {
6891  const uint32_t memTypeIndex = m_MemoryTypeIndex;
6892  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
6893 
6894  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6895 
6896  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6897  {
6898  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6899  VMA_ASSERT(pBlock);
6900  VMA_HEAVY_ASSERT(pBlock->Validate());
6901  VmaStatInfo allocationStatInfo;
6902  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
6903  VmaAddStatInfo(pStats->total, allocationStatInfo);
6904  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6905  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6906  }
6907 }
6908 
6910 // VmaDefragmentator members definition
6911 
6912 VmaDefragmentator::VmaDefragmentator(
6913  VmaAllocator hAllocator,
6914  VmaBlockVector* pBlockVector,
6915  uint32_t currentFrameIndex) :
6916  m_hAllocator(hAllocator),
6917  m_pBlockVector(pBlockVector),
6918  m_CurrentFrameIndex(currentFrameIndex),
6919  m_BytesMoved(0),
6920  m_AllocationsMoved(0),
6921  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
6922  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
6923 {
6924 }
6925 
6926 VmaDefragmentator::~VmaDefragmentator()
6927 {
6928  for(size_t i = m_Blocks.size(); i--; )
6929  {
6930  vma_delete(m_hAllocator, m_Blocks[i]);
6931  }
6932 }
6933 
6934 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
6935 {
6936  AllocationInfo allocInfo;
6937  allocInfo.m_hAllocation = hAlloc;
6938  allocInfo.m_pChanged = pChanged;
6939  m_Allocations.push_back(allocInfo);
6940 }
6941 
6942 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
6943 {
6944  // It has already been mapped for defragmentation.
6945  if(m_pMappedDataForDefragmentation)
6946  {
6947  *ppMappedData = m_pMappedDataForDefragmentation;
6948  return VK_SUCCESS;
6949  }
6950 
6951  // It is originally mapped.
6952  if(m_pBlock->m_Mapping.GetMappedData())
6953  {
6954  *ppMappedData = m_pBlock->m_Mapping.GetMappedData();
6955  return VK_SUCCESS;
6956  }
6957 
6958  // Map on first usage.
6959  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
6960  *ppMappedData = m_pMappedDataForDefragmentation;
6961  return res;
6962 }
6963 
6964 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
6965 {
6966  if(m_pMappedDataForDefragmentation != VMA_NULL)
6967  {
6968  m_pBlock->Unmap(hAllocator, 1);
6969  }
6970 }
6971 
6972 VkResult VmaDefragmentator::DefragmentRound(
6973  VkDeviceSize maxBytesToMove,
6974  uint32_t maxAllocationsToMove)
6975 {
6976  if(m_Blocks.empty())
6977  {
6978  return VK_SUCCESS;
6979  }
6980 
6981  size_t srcBlockIndex = m_Blocks.size() - 1;
6982  size_t srcAllocIndex = SIZE_MAX;
6983  for(;;)
6984  {
6985  // 1. Find next allocation to move.
6986  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
6987  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
6988  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
6989  {
6990  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
6991  {
6992  // Finished: no more allocations to process.
6993  if(srcBlockIndex == 0)
6994  {
6995  return VK_SUCCESS;
6996  }
6997  else
6998  {
6999  --srcBlockIndex;
7000  srcAllocIndex = SIZE_MAX;
7001  }
7002  }
7003  else
7004  {
7005  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
7006  }
7007  }
7008 
7009  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
7010  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
7011 
7012  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
7013  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
7014  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
7015  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
7016 
7017  // 2. Try to find new place for this allocation in preceding or current block.
7018  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
7019  {
7020  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
7021  VmaAllocationRequest dstAllocRequest;
7022  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
7023  m_CurrentFrameIndex,
7024  m_pBlockVector->GetFrameInUseCount(),
7025  m_pBlockVector->GetBufferImageGranularity(),
7026  size,
7027  alignment,
7028  suballocType,
7029  false, // canMakeOtherLost
7030  &dstAllocRequest) &&
7031  MoveMakesSense(
7032  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
7033  {
7034  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
7035 
7036  // Reached limit on number of allocations or bytes to move.
7037  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
7038  (m_BytesMoved + size > maxBytesToMove))
7039  {
7040  return VK_INCOMPLETE;
7041  }
7042 
7043  void* pDstMappedData = VMA_NULL;
7044  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
7045  if(res != VK_SUCCESS)
7046  {
7047  return res;
7048  }
7049 
7050  void* pSrcMappedData = VMA_NULL;
7051  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
7052  if(res != VK_SUCCESS)
7053  {
7054  return res;
7055  }
7056 
7057  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
7058  memcpy(
7059  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
7060  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
7061  static_cast<size_t>(size));
7062 
7063  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
7064  pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);
7065 
7066  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
7067 
7068  if(allocInfo.m_pChanged != VMA_NULL)
7069  {
7070  *allocInfo.m_pChanged = VK_TRUE;
7071  }
7072 
7073  ++m_AllocationsMoved;
7074  m_BytesMoved += size;
7075 
7076  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
7077 
7078  break;
7079  }
7080  }
7081 
7082  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
7083 
7084  if(srcAllocIndex > 0)
7085  {
7086  --srcAllocIndex;
7087  }
7088  else
7089  {
7090  if(srcBlockIndex > 0)
7091  {
7092  --srcBlockIndex;
7093  srcAllocIndex = SIZE_MAX;
7094  }
7095  else
7096  {
7097  return VK_SUCCESS;
7098  }
7099  }
7100  }
7101 }
7102 
7103 VkResult VmaDefragmentator::Defragment(
7104  VkDeviceSize maxBytesToMove,
7105  uint32_t maxAllocationsToMove)
7106 {
7107  if(m_Allocations.empty())
7108  {
7109  return VK_SUCCESS;
7110  }
7111 
7112  // Create block info for each block.
7113  const size_t blockCount = m_pBlockVector->m_Blocks.size();
7114  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7115  {
7116  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
7117  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
7118  m_Blocks.push_back(pBlockInfo);
7119  }
7120 
7121  // Sort them by m_pBlock pointer value.
7122  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
7123 
7124  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
7125  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
7126  {
7127  AllocationInfo& allocInfo = m_Allocations[blockIndex];
7128  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
7129  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7130  {
7131  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
7132  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
7133  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
7134  {
7135  (*it)->m_Allocations.push_back(allocInfo);
7136  }
7137  else
7138  {
7139  VMA_ASSERT(0);
7140  }
7141  }
7142  }
7143  m_Allocations.clear();
7144 
7145  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7146  {
7147  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
7148  pBlockInfo->CalcHasNonMovableAllocations();
7149  pBlockInfo->SortAllocationsBySizeDescecnding();
7150  }
7151 
7152  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
7153  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
7154 
7155  // Execute defragmentation rounds (the main part).
7156  VkResult result = VK_SUCCESS;
7157  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
7158  {
7159  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
7160  }
7161 
7162  // Unmap blocks that were mapped for defragmentation.
7163  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7164  {
7165  m_Blocks[blockIndex]->Unmap(m_hAllocator);
7166  }
7167 
7168  return result;
7169 }
7170 
7171 bool VmaDefragmentator::MoveMakesSense(
7172  size_t dstBlockIndex, VkDeviceSize dstOffset,
7173  size_t srcBlockIndex, VkDeviceSize srcOffset)
7174 {
7175  if(dstBlockIndex < srcBlockIndex)
7176  {
7177  return true;
7178  }
7179  if(dstBlockIndex > srcBlockIndex)
7180  {
7181  return false;
7182  }
7183  if(dstOffset < srcOffset)
7184  {
7185  return true;
7186  }
7187  return false;
7188 }
7189 
7191 // VmaAllocator_T
7192 
7193 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
7194  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
7195  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
7196  m_hDevice(pCreateInfo->device),
7197  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
7198  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
7199  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
7200  m_PreferredLargeHeapBlockSize(0),
7201  m_PhysicalDevice(pCreateInfo->physicalDevice),
7202  m_CurrentFrameIndex(0),
7203  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
7204 {
7205  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
7206 
7207  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
7208  memset(&m_MemProps, 0, sizeof(m_MemProps));
7209  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
7210 
7211  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
7212  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
7213 
7214  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7215  {
7216  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
7217  }
7218 
7219  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
7220  {
7221  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
7222  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
7223  }
7224 
7225  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
7226 
7227  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
7228  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
7229 
7230  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
7231  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
7232 
7233  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
7234  {
7235  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
7236  {
7237  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
7238  if(limit != VK_WHOLE_SIZE)
7239  {
7240  m_HeapSizeLimit[heapIndex] = limit;
7241  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
7242  {
7243  m_MemProps.memoryHeaps[heapIndex].size = limit;
7244  }
7245  }
7246  }
7247  }
7248 
7249  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7250  {
7251  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
7252 
7253  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
7254  this,
7255  memTypeIndex,
7256  preferredBlockSize,
7257  0,
7258  SIZE_MAX,
7259  GetBufferImageGranularity(),
7260  pCreateInfo->frameInUseCount,
7261  false); // isCustomPool
7262  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
7263  // becase minBlockCount is 0.
7264  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
7265  }
7266 }
7267 
7268 VmaAllocator_T::~VmaAllocator_T()
7269 {
7270  VMA_ASSERT(m_Pools.empty());
7271 
7272  for(size_t i = GetMemoryTypeCount(); i--; )
7273  {
7274  vma_delete(this, m_pDedicatedAllocations[i]);
7275  vma_delete(this, m_pBlockVectors[i]);
7276  }
7277 }
7278 
7279 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
7280 {
7281 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7282  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
7283  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
7284  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
7285  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
7286  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
7287  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
7288  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
7289  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
7290  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
7291  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
7292  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
7293  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
7294  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
7295  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
7296  if(m_UseKhrDedicatedAllocation)
7297  {
7298  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
7299  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
7300  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
7301  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
7302  }
7303 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7304 
7305 #define VMA_COPY_IF_NOT_NULL(funcName) \
7306  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
7307 
7308  if(pVulkanFunctions != VMA_NULL)
7309  {
7310  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
7311  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
7312  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
7313  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
7314  VMA_COPY_IF_NOT_NULL(vkMapMemory);
7315  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
7316  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
7317  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
7318  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
7319  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
7320  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
7321  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
7322  VMA_COPY_IF_NOT_NULL(vkCreateImage);
7323  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
7324  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
7325  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
7326  }
7327 
7328 #undef VMA_COPY_IF_NOT_NULL
7329 
7330  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
7331  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
7332  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
7333  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
7334  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
7335  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
7336  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
7337  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
7338  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
7339  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
7340  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
7341  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
7342  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
7343  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
7344  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
7345  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
7346  if(m_UseKhrDedicatedAllocation)
7347  {
7348  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
7349  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
7350  }
7351 }
7352 
7353 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
7354 {
7355  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7356  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
7357  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
7358  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
7359 }
7360 
7361 VkResult VmaAllocator_T::AllocateMemoryOfType(
7362  const VkMemoryRequirements& vkMemReq,
7363  bool dedicatedAllocation,
7364  VkBuffer dedicatedBuffer,
7365  VkImage dedicatedImage,
7366  const VmaAllocationCreateInfo& createInfo,
7367  uint32_t memTypeIndex,
7368  VmaSuballocationType suballocType,
7369  VmaAllocation* pAllocation)
7370 {
7371  VMA_ASSERT(pAllocation != VMA_NULL);
7372  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
7373 
7374  VmaAllocationCreateInfo finalCreateInfo = createInfo;
7375 
7376  // If memory type is not HOST_VISIBLE, disable MAPPED.
7377  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7378  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
7379  {
7380  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
7381  }
7382 
7383  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
7384  VMA_ASSERT(blockVector);
7385 
7386  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
7387  bool preferDedicatedMemory =
7388  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
7389  dedicatedAllocation ||
7390  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
7391  vkMemReq.size > preferredBlockSize / 2;
7392 
7393  if(preferDedicatedMemory &&
7394  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
7395  finalCreateInfo.pool == VK_NULL_HANDLE)
7396  {
7398  }
7399 
7400  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
7401  {
7402  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7403  {
7404  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7405  }
7406  else
7407  {
7408  return AllocateDedicatedMemory(
7409  vkMemReq.size,
7410  suballocType,
7411  memTypeIndex,
7412  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7413  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7414  finalCreateInfo.pUserData,
7415  dedicatedBuffer,
7416  dedicatedImage,
7417  pAllocation);
7418  }
7419  }
7420  else
7421  {
7422  VkResult res = blockVector->Allocate(
7423  VK_NULL_HANDLE, // hCurrentPool
7424  m_CurrentFrameIndex.load(),
7425  vkMemReq,
7426  finalCreateInfo,
7427  suballocType,
7428  pAllocation);
7429  if(res == VK_SUCCESS)
7430  {
7431  return res;
7432  }
7433 
7434  // 5. Try dedicated memory.
7435  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7436  {
7437  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7438  }
7439  else
7440  {
7441  res = AllocateDedicatedMemory(
7442  vkMemReq.size,
7443  suballocType,
7444  memTypeIndex,
7445  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7446  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7447  finalCreateInfo.pUserData,
7448  dedicatedBuffer,
7449  dedicatedImage,
7450  pAllocation);
7451  if(res == VK_SUCCESS)
7452  {
7453  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
7454  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
7455  return VK_SUCCESS;
7456  }
7457  else
7458  {
7459  // Everything failed: Return error code.
7460  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7461  return res;
7462  }
7463  }
7464  }
7465 }
7466 
7467 VkResult VmaAllocator_T::AllocateDedicatedMemory(
7468  VkDeviceSize size,
7469  VmaSuballocationType suballocType,
7470  uint32_t memTypeIndex,
7471  bool map,
7472  bool isUserDataString,
7473  void* pUserData,
7474  VkBuffer dedicatedBuffer,
7475  VkImage dedicatedImage,
7476  VmaAllocation* pAllocation)
7477 {
7478  VMA_ASSERT(pAllocation);
7479 
7480  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
7481  allocInfo.memoryTypeIndex = memTypeIndex;
7482  allocInfo.allocationSize = size;
7483 
7484  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
7485  if(m_UseKhrDedicatedAllocation)
7486  {
7487  if(dedicatedBuffer != VK_NULL_HANDLE)
7488  {
7489  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
7490  dedicatedAllocInfo.buffer = dedicatedBuffer;
7491  allocInfo.pNext = &dedicatedAllocInfo;
7492  }
7493  else if(dedicatedImage != VK_NULL_HANDLE)
7494  {
7495  dedicatedAllocInfo.image = dedicatedImage;
7496  allocInfo.pNext = &dedicatedAllocInfo;
7497  }
7498  }
7499 
7500  // Allocate VkDeviceMemory.
7501  VkDeviceMemory hMemory = VK_NULL_HANDLE;
7502  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
7503  if(res < 0)
7504  {
7505  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7506  return res;
7507  }
7508 
7509  void* pMappedData = VMA_NULL;
7510  if(map)
7511  {
7512  res = (*m_VulkanFunctions.vkMapMemory)(
7513  m_hDevice,
7514  hMemory,
7515  0,
7516  VK_WHOLE_SIZE,
7517  0,
7518  &pMappedData);
7519  if(res < 0)
7520  {
7521  VMA_DEBUG_LOG(" vkMapMemory FAILED");
7522  FreeVulkanMemory(memTypeIndex, size, hMemory);
7523  return res;
7524  }
7525  }
7526 
7527  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
7528  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
7529  (*pAllocation)->SetUserData(this, pUserData);
7530 
7531  // Register it in m_pDedicatedAllocations.
7532  {
7533  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7534  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7535  VMA_ASSERT(pDedicatedAllocations);
7536  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
7537  }
7538 
7539  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
7540 
7541  return VK_SUCCESS;
7542 }
7543 
7544 void VmaAllocator_T::GetBufferMemoryRequirements(
7545  VkBuffer hBuffer,
7546  VkMemoryRequirements& memReq,
7547  bool& requiresDedicatedAllocation,
7548  bool& prefersDedicatedAllocation) const
7549 {
7550  if(m_UseKhrDedicatedAllocation)
7551  {
7552  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
7553  memReqInfo.buffer = hBuffer;
7554 
7555  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7556 
7557  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7558  memReq2.pNext = &memDedicatedReq;
7559 
7560  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7561 
7562  memReq = memReq2.memoryRequirements;
7563  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7564  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7565  }
7566  else
7567  {
7568  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
7569  requiresDedicatedAllocation = false;
7570  prefersDedicatedAllocation = false;
7571  }
7572 }
7573 
7574 void VmaAllocator_T::GetImageMemoryRequirements(
7575  VkImage hImage,
7576  VkMemoryRequirements& memReq,
7577  bool& requiresDedicatedAllocation,
7578  bool& prefersDedicatedAllocation) const
7579 {
7580  if(m_UseKhrDedicatedAllocation)
7581  {
7582  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
7583  memReqInfo.image = hImage;
7584 
7585  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7586 
7587  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7588  memReq2.pNext = &memDedicatedReq;
7589 
7590  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7591 
7592  memReq = memReq2.memoryRequirements;
7593  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7594  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7595  }
7596  else
7597  {
7598  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
7599  requiresDedicatedAllocation = false;
7600  prefersDedicatedAllocation = false;
7601  }
7602 }
7603 
7604 VkResult VmaAllocator_T::AllocateMemory(
7605  const VkMemoryRequirements& vkMemReq,
7606  bool requiresDedicatedAllocation,
7607  bool prefersDedicatedAllocation,
7608  VkBuffer dedicatedBuffer,
7609  VkImage dedicatedImage,
7610  const VmaAllocationCreateInfo& createInfo,
7611  VmaSuballocationType suballocType,
7612  VmaAllocation* pAllocation)
7613 {
7614  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
7615  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7616  {
7617  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
7618  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7619  }
7620  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7622  {
7623  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
7624  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7625  }
7626  if(requiresDedicatedAllocation)
7627  {
7628  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7629  {
7630  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
7631  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7632  }
7633  if(createInfo.pool != VK_NULL_HANDLE)
7634  {
7635  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
7636  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7637  }
7638  }
7639  if((createInfo.pool != VK_NULL_HANDLE) &&
7640  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
7641  {
7642  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
7643  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7644  }
7645 
7646  if(createInfo.pool != VK_NULL_HANDLE)
7647  {
7648  return createInfo.pool->m_BlockVector.Allocate(
7649  createInfo.pool,
7650  m_CurrentFrameIndex.load(),
7651  vkMemReq,
7652  createInfo,
7653  suballocType,
7654  pAllocation);
7655  }
7656  else
7657  {
7658  // Bit mask of memory Vulkan types acceptable for this allocation.
7659  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
7660  uint32_t memTypeIndex = UINT32_MAX;
7661  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7662  if(res == VK_SUCCESS)
7663  {
7664  res = AllocateMemoryOfType(
7665  vkMemReq,
7666  requiresDedicatedAllocation || prefersDedicatedAllocation,
7667  dedicatedBuffer,
7668  dedicatedImage,
7669  createInfo,
7670  memTypeIndex,
7671  suballocType,
7672  pAllocation);
7673  // Succeeded on first try.
7674  if(res == VK_SUCCESS)
7675  {
7676  return res;
7677  }
7678  // Allocation from this memory type failed. Try other compatible memory types.
7679  else
7680  {
7681  for(;;)
7682  {
7683  // Remove old memTypeIndex from list of possibilities.
7684  memoryTypeBits &= ~(1u << memTypeIndex);
7685  // Find alternative memTypeIndex.
7686  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7687  if(res == VK_SUCCESS)
7688  {
7689  res = AllocateMemoryOfType(
7690  vkMemReq,
7691  requiresDedicatedAllocation || prefersDedicatedAllocation,
7692  dedicatedBuffer,
7693  dedicatedImage,
7694  createInfo,
7695  memTypeIndex,
7696  suballocType,
7697  pAllocation);
7698  // Allocation from this alternative memory type succeeded.
7699  if(res == VK_SUCCESS)
7700  {
7701  return res;
7702  }
7703  // else: Allocation from this memory type failed. Try next one - next loop iteration.
7704  }
7705  // No other matching memory type index could be found.
7706  else
7707  {
7708  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
7709  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7710  }
7711  }
7712  }
7713  }
7714  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
7715  else
7716  return res;
7717  }
7718 }
7719 
7720 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
7721 {
7722  VMA_ASSERT(allocation);
7723 
7724  if(allocation->CanBecomeLost() == false ||
7725  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7726  {
7727  switch(allocation->GetType())
7728  {
7729  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7730  {
7731  VmaBlockVector* pBlockVector = VMA_NULL;
7732  VmaPool hPool = allocation->GetPool();
7733  if(hPool != VK_NULL_HANDLE)
7734  {
7735  pBlockVector = &hPool->m_BlockVector;
7736  }
7737  else
7738  {
7739  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7740  pBlockVector = m_pBlockVectors[memTypeIndex];
7741  }
7742  pBlockVector->Free(allocation);
7743  }
7744  break;
7745  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7746  FreeDedicatedMemory(allocation);
7747  break;
7748  default:
7749  VMA_ASSERT(0);
7750  }
7751  }
7752 
7753  allocation->SetUserData(this, VMA_NULL);
7754  vma_delete(this, allocation);
7755 }
7756 
7757 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
7758 {
7759  // Initialize.
7760  InitStatInfo(pStats->total);
7761  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
7762  InitStatInfo(pStats->memoryType[i]);
7763  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7764  InitStatInfo(pStats->memoryHeap[i]);
7765 
7766  // Process default pools.
7767  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7768  {
7769  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
7770  VMA_ASSERT(pBlockVector);
7771  pBlockVector->AddStats(pStats);
7772  }
7773 
7774  // Process custom pools.
7775  {
7776  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7777  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
7778  {
7779  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
7780  }
7781  }
7782 
7783  // Process dedicated allocations.
7784  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7785  {
7786  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7787  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7788  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7789  VMA_ASSERT(pDedicatedAllocVector);
7790  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
7791  {
7792  VmaStatInfo allocationStatInfo;
7793  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
7794  VmaAddStatInfo(pStats->total, allocationStatInfo);
7795  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7796  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7797  }
7798  }
7799 
7800  // Postprocess.
7801  VmaPostprocessCalcStatInfo(pStats->total);
7802  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
7803  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
7804  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
7805  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
7806 }
7807 
7808 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
7809 
7810 VkResult VmaAllocator_T::Defragment(
7811  VmaAllocation* pAllocations,
7812  size_t allocationCount,
7813  VkBool32* pAllocationsChanged,
7814  const VmaDefragmentationInfo* pDefragmentationInfo,
7815  VmaDefragmentationStats* pDefragmentationStats)
7816 {
7817  if(pAllocationsChanged != VMA_NULL)
7818  {
7819  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
7820  }
7821  if(pDefragmentationStats != VMA_NULL)
7822  {
7823  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
7824  }
7825 
7826  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
7827 
7828  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
7829 
7830  const size_t poolCount = m_Pools.size();
7831 
7832  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
7833  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
7834  {
7835  VmaAllocation hAlloc = pAllocations[allocIndex];
7836  VMA_ASSERT(hAlloc);
7837  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
7838  // DedicatedAlloc cannot be defragmented.
7839  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
7840  // Only HOST_VISIBLE memory types can be defragmented.
7841  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
7842  // Lost allocation cannot be defragmented.
7843  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
7844  {
7845  VmaBlockVector* pAllocBlockVector = VMA_NULL;
7846 
7847  const VmaPool hAllocPool = hAlloc->GetPool();
7848  // This allocation belongs to custom pool.
7849  if(hAllocPool != VK_NULL_HANDLE)
7850  {
7851  pAllocBlockVector = &hAllocPool->GetBlockVector();
7852  }
7853  // This allocation belongs to general pool.
7854  else
7855  {
7856  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
7857  }
7858 
7859  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
7860 
7861  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
7862  &pAllocationsChanged[allocIndex] : VMA_NULL;
7863  pDefragmentator->AddAllocation(hAlloc, pChanged);
7864  }
7865  }
7866 
7867  VkResult result = VK_SUCCESS;
7868 
7869  // ======== Main processing.
7870 
7871  VkDeviceSize maxBytesToMove = SIZE_MAX;
7872  uint32_t maxAllocationsToMove = UINT32_MAX;
7873  if(pDefragmentationInfo != VMA_NULL)
7874  {
7875  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
7876  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
7877  }
7878 
7879  // Process standard memory.
7880  for(uint32_t memTypeIndex = 0;
7881  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
7882  ++memTypeIndex)
7883  {
7884  // Only HOST_VISIBLE memory types can be defragmented.
7885  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7886  {
7887  result = m_pBlockVectors[memTypeIndex]->Defragment(
7888  pDefragmentationStats,
7889  maxBytesToMove,
7890  maxAllocationsToMove);
7891  }
7892  }
7893 
7894  // Process custom pools.
7895  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
7896  {
7897  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
7898  pDefragmentationStats,
7899  maxBytesToMove,
7900  maxAllocationsToMove);
7901  }
7902 
7903  // ======== Destroy defragmentators.
7904 
7905  // Process custom pools.
7906  for(size_t poolIndex = poolCount; poolIndex--; )
7907  {
7908  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
7909  }
7910 
7911  // Process standard memory.
7912  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
7913  {
7914  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7915  {
7916  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
7917  }
7918  }
7919 
7920  return result;
7921 }
7922 
7923 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
7924 {
7925  if(hAllocation->CanBecomeLost())
7926  {
7927  /*
7928  Warning: This is a carefully designed algorithm.
7929  Do not modify unless you really know what you're doing :)
7930  */
7931  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7932  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7933  for(;;)
7934  {
7935  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7936  {
7937  pAllocationInfo->memoryType = UINT32_MAX;
7938  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
7939  pAllocationInfo->offset = 0;
7940  pAllocationInfo->size = hAllocation->GetSize();
7941  pAllocationInfo->pMappedData = VMA_NULL;
7942  pAllocationInfo->pUserData = hAllocation->GetUserData();
7943  return;
7944  }
7945  else if(localLastUseFrameIndex == localCurrFrameIndex)
7946  {
7947  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7948  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7949  pAllocationInfo->offset = hAllocation->GetOffset();
7950  pAllocationInfo->size = hAllocation->GetSize();
7951  pAllocationInfo->pMappedData = VMA_NULL;
7952  pAllocationInfo->pUserData = hAllocation->GetUserData();
7953  return;
7954  }
7955  else // Last use time earlier than current time.
7956  {
7957  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7958  {
7959  localLastUseFrameIndex = localCurrFrameIndex;
7960  }
7961  }
7962  }
7963  }
7964  else
7965  {
7966  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7967  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7968  pAllocationInfo->offset = hAllocation->GetOffset();
7969  pAllocationInfo->size = hAllocation->GetSize();
7970  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7971  pAllocationInfo->pUserData = hAllocation->GetUserData();
7972  }
7973 }
7974 
7975 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
7976 {
7977  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
7978  if(hAllocation->CanBecomeLost())
7979  {
7980  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7981  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7982  for(;;)
7983  {
7984  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7985  {
7986  return false;
7987  }
7988  else if(localLastUseFrameIndex == localCurrFrameIndex)
7989  {
7990  return true;
7991  }
7992  else // Last use time earlier than current time.
7993  {
7994  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7995  {
7996  localLastUseFrameIndex = localCurrFrameIndex;
7997  }
7998  }
7999  }
8000  }
8001  else
8002  {
8003  return true;
8004  }
8005 }
8006 
8007 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
8008 {
8009  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
8010 
8011  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
8012 
8013  if(newCreateInfo.maxBlockCount == 0)
8014  {
8015  newCreateInfo.maxBlockCount = SIZE_MAX;
8016  }
8017  if(newCreateInfo.blockSize == 0)
8018  {
8019  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
8020  }
8021 
8022  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
8023 
8024  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
8025  if(res != VK_SUCCESS)
8026  {
8027  vma_delete(this, *pPool);
8028  *pPool = VMA_NULL;
8029  return res;
8030  }
8031 
8032  // Add to m_Pools.
8033  {
8034  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8035  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
8036  }
8037 
8038  return VK_SUCCESS;
8039 }
8040 
8041 void VmaAllocator_T::DestroyPool(VmaPool pool)
8042 {
8043  // Remove from m_Pools.
8044  {
8045  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8046  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
8047  VMA_ASSERT(success && "Pool not found in Allocator.");
8048  }
8049 
8050  vma_delete(this, pool);
8051 }
8052 
8053 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
8054 {
8055  pool->m_BlockVector.GetPoolStats(pPoolStats);
8056 }
8057 
8058 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
8059 {
8060  m_CurrentFrameIndex.store(frameIndex);
8061 }
8062 
8063 void VmaAllocator_T::MakePoolAllocationsLost(
8064  VmaPool hPool,
8065  size_t* pLostAllocationCount)
8066 {
8067  hPool->m_BlockVector.MakePoolAllocationsLost(
8068  m_CurrentFrameIndex.load(),
8069  pLostAllocationCount);
8070 }
8071 
8072 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
8073 {
8074  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
8075  (*pAllocation)->InitLost();
8076 }
8077 
8078 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
8079 {
8080  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
8081 
8082  VkResult res;
8083  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8084  {
8085  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8086  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
8087  {
8088  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8089  if(res == VK_SUCCESS)
8090  {
8091  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
8092  }
8093  }
8094  else
8095  {
8096  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
8097  }
8098  }
8099  else
8100  {
8101  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8102  }
8103 
8104  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
8105  {
8106  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
8107  }
8108 
8109  return res;
8110 }
8111 
8112 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
8113 {
8114  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
8115  {
8116  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
8117  }
8118 
8119  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
8120 
8121  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
8122  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8123  {
8124  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8125  m_HeapSizeLimit[heapIndex] += size;
8126  }
8127 }
8128 
8129 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
8130 {
8131  if(hAllocation->CanBecomeLost())
8132  {
8133  return VK_ERROR_MEMORY_MAP_FAILED;
8134  }
8135 
8136  switch(hAllocation->GetType())
8137  {
8138  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8139  {
8140  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8141  char *pBytes = VMA_NULL;
8142  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
8143  if(res == VK_SUCCESS)
8144  {
8145  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
8146  hAllocation->BlockAllocMap();
8147  }
8148  return res;
8149  }
8150  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8151  return hAllocation->DedicatedAllocMap(this, ppData);
8152  default:
8153  VMA_ASSERT(0);
8154  return VK_ERROR_MEMORY_MAP_FAILED;
8155  }
8156 }
8157 
8158 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
8159 {
8160  switch(hAllocation->GetType())
8161  {
8162  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8163  {
8164  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8165  hAllocation->BlockAllocUnmap();
8166  pBlock->Unmap(this, 1);
8167  }
8168  break;
8169  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8170  hAllocation->DedicatedAllocUnmap(this);
8171  break;
8172  default:
8173  VMA_ASSERT(0);
8174  }
8175 }
8176 
8177 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
8178 {
8179  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
8180 
8181  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
8182  {
8183  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8184  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
8185  VMA_ASSERT(pDedicatedAllocations);
8186  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
8187  VMA_ASSERT(success);
8188  }
8189 
8190  VkDeviceMemory hMemory = allocation->GetMemory();
8191 
8192  if(allocation->GetMappedData() != VMA_NULL)
8193  {
8194  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
8195  }
8196 
8197  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
8198 
8199  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
8200 }
8201 
8202 #if VMA_STATS_STRING_ENABLED
8203 
8204 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
8205 {
8206  bool dedicatedAllocationsStarted = false;
8207  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8208  {
8209  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8210  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
8211  VMA_ASSERT(pDedicatedAllocVector);
8212  if(pDedicatedAllocVector->empty() == false)
8213  {
8214  if(dedicatedAllocationsStarted == false)
8215  {
8216  dedicatedAllocationsStarted = true;
8217  json.WriteString("DedicatedAllocations");
8218  json.BeginObject();
8219  }
8220 
8221  json.BeginString("Type ");
8222  json.ContinueString(memTypeIndex);
8223  json.EndString();
8224 
8225  json.BeginArray();
8226 
8227  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
8228  {
8229  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
8230  json.BeginObject(true);
8231 
8232  json.WriteString("Type");
8233  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
8234 
8235  json.WriteString("Size");
8236  json.WriteNumber(hAlloc->GetSize());
8237 
8238  const void* pUserData = hAlloc->GetUserData();
8239  if(pUserData != VMA_NULL)
8240  {
8241  json.WriteString("UserData");
8242  if(hAlloc->IsUserDataString())
8243  {
8244  json.WriteString((const char*)pUserData);
8245  }
8246  else
8247  {
8248  json.BeginString();
8249  json.ContinueString_Pointer(pUserData);
8250  json.EndString();
8251  }
8252  }
8253 
8254  json.EndObject();
8255  }
8256 
8257  json.EndArray();
8258  }
8259  }
8260  if(dedicatedAllocationsStarted)
8261  {
8262  json.EndObject();
8263  }
8264 
8265  {
8266  bool allocationsStarted = false;
8267  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8268  {
8269  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
8270  {
8271  if(allocationsStarted == false)
8272  {
8273  allocationsStarted = true;
8274  json.WriteString("DefaultPools");
8275  json.BeginObject();
8276  }
8277 
8278  json.BeginString("Type ");
8279  json.ContinueString(memTypeIndex);
8280  json.EndString();
8281 
8282  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
8283  }
8284  }
8285  if(allocationsStarted)
8286  {
8287  json.EndObject();
8288  }
8289  }
8290 
8291  {
8292  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8293  const size_t poolCount = m_Pools.size();
8294  if(poolCount > 0)
8295  {
8296  json.WriteString("Pools");
8297  json.BeginArray();
8298  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
8299  {
8300  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
8301  }
8302  json.EndArray();
8303  }
8304  }
8305 }
8306 
8307 #endif // #if VMA_STATS_STRING_ENABLED
8308 
8309 static VkResult AllocateMemoryForImage(
8310  VmaAllocator allocator,
8311  VkImage image,
8312  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8313  VmaSuballocationType suballocType,
8314  VmaAllocation* pAllocation)
8315 {
8316  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
8317 
8318  VkMemoryRequirements vkMemReq = {};
8319  bool requiresDedicatedAllocation = false;
8320  bool prefersDedicatedAllocation = false;
8321  allocator->GetImageMemoryRequirements(image, vkMemReq,
8322  requiresDedicatedAllocation, prefersDedicatedAllocation);
8323 
8324  return allocator->AllocateMemory(
8325  vkMemReq,
8326  requiresDedicatedAllocation,
8327  prefersDedicatedAllocation,
8328  VK_NULL_HANDLE, // dedicatedBuffer
8329  image, // dedicatedImage
8330  *pAllocationCreateInfo,
8331  suballocType,
8332  pAllocation);
8333 }
8334 
8336 // Public interface
8337 
8338 VkResult vmaCreateAllocator(
8339  const VmaAllocatorCreateInfo* pCreateInfo,
8340  VmaAllocator* pAllocator)
8341 {
8342  VMA_ASSERT(pCreateInfo && pAllocator);
8343  VMA_DEBUG_LOG("vmaCreateAllocator");
8344  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
8345  return VK_SUCCESS;
8346 }
8347 
8348 void vmaDestroyAllocator(
8349  VmaAllocator allocator)
8350 {
8351  if(allocator != VK_NULL_HANDLE)
8352  {
8353  VMA_DEBUG_LOG("vmaDestroyAllocator");
8354  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
8355  vma_delete(&allocationCallbacks, allocator);
8356  }
8357 }
8358 
8360  VmaAllocator allocator,
8361  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
8362 {
8363  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
8364  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
8365 }
8366 
8368  VmaAllocator allocator,
8369  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
8370 {
8371  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
8372  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
8373 }
8374 
8376  VmaAllocator allocator,
8377  uint32_t memoryTypeIndex,
8378  VkMemoryPropertyFlags* pFlags)
8379 {
8380  VMA_ASSERT(allocator && pFlags);
8381  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
8382  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
8383 }
8384 
8386  VmaAllocator allocator,
8387  uint32_t frameIndex)
8388 {
8389  VMA_ASSERT(allocator);
8390  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
8391 
8392  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8393 
8394  allocator->SetCurrentFrameIndex(frameIndex);
8395 }
8396 
8397 void vmaCalculateStats(
8398  VmaAllocator allocator,
8399  VmaStats* pStats)
8400 {
8401  VMA_ASSERT(allocator && pStats);
8402  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8403  allocator->CalculateStats(pStats);
8404 }
8405 
8406 #if VMA_STATS_STRING_ENABLED
8407 
8408 void vmaBuildStatsString(
8409  VmaAllocator allocator,
8410  char** ppStatsString,
8411  VkBool32 detailedMap)
8412 {
8413  VMA_ASSERT(allocator && ppStatsString);
8414  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8415 
8416  VmaStringBuilder sb(allocator);
8417  {
8418  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
8419  json.BeginObject();
8420 
8421  VmaStats stats;
8422  allocator->CalculateStats(&stats);
8423 
8424  json.WriteString("Total");
8425  VmaPrintStatInfo(json, stats.total);
8426 
8427  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
8428  {
8429  json.BeginString("Heap ");
8430  json.ContinueString(heapIndex);
8431  json.EndString();
8432  json.BeginObject();
8433 
8434  json.WriteString("Size");
8435  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
8436 
8437  json.WriteString("Flags");
8438  json.BeginArray(true);
8439  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
8440  {
8441  json.WriteString("DEVICE_LOCAL");
8442  }
8443  json.EndArray();
8444 
8445  if(stats.memoryHeap[heapIndex].blockCount > 0)
8446  {
8447  json.WriteString("Stats");
8448  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
8449  }
8450 
8451  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
8452  {
8453  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
8454  {
8455  json.BeginString("Type ");
8456  json.ContinueString(typeIndex);
8457  json.EndString();
8458 
8459  json.BeginObject();
8460 
8461  json.WriteString("Flags");
8462  json.BeginArray(true);
8463  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
8464  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
8465  {
8466  json.WriteString("DEVICE_LOCAL");
8467  }
8468  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8469  {
8470  json.WriteString("HOST_VISIBLE");
8471  }
8472  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
8473  {
8474  json.WriteString("HOST_COHERENT");
8475  }
8476  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
8477  {
8478  json.WriteString("HOST_CACHED");
8479  }
8480  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
8481  {
8482  json.WriteString("LAZILY_ALLOCATED");
8483  }
8484  json.EndArray();
8485 
8486  if(stats.memoryType[typeIndex].blockCount > 0)
8487  {
8488  json.WriteString("Stats");
8489  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
8490  }
8491 
8492  json.EndObject();
8493  }
8494  }
8495 
8496  json.EndObject();
8497  }
8498  if(detailedMap == VK_TRUE)
8499  {
8500  allocator->PrintDetailedMap(json);
8501  }
8502 
8503  json.EndObject();
8504  }
8505 
8506  const size_t len = sb.GetLength();
8507  char* const pChars = vma_new_array(allocator, char, len + 1);
8508  if(len > 0)
8509  {
8510  memcpy(pChars, sb.GetData(), len);
8511  }
8512  pChars[len] = '\0';
8513  *ppStatsString = pChars;
8514 }
8515 
8516 void vmaFreeStatsString(
8517  VmaAllocator allocator,
8518  char* pStatsString)
8519 {
8520  if(pStatsString != VMA_NULL)
8521  {
8522  VMA_ASSERT(allocator);
8523  size_t len = strlen(pStatsString);
8524  vma_delete_array(allocator, pStatsString, len + 1);
8525  }
8526 }
8527 
8528 #endif // #if VMA_STATS_STRING_ENABLED
8529 
8530 /*
8531 This function is not protected by any mutex because it just reads immutable data.
8532 */
8533 VkResult vmaFindMemoryTypeIndex(
8534  VmaAllocator allocator,
8535  uint32_t memoryTypeBits,
8536  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8537  uint32_t* pMemoryTypeIndex)
8538 {
8539  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8540  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8541  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8542 
8543  if(pAllocationCreateInfo->memoryTypeBits != 0)
8544  {
8545  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
8546  }
8547 
8548  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
8549  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
8550 
8551  // Convert usage to requiredFlags and preferredFlags.
8552  switch(pAllocationCreateInfo->usage)
8553  {
8555  break;
8557  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8558  break;
8560  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
8561  break;
8563  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8564  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8565  break;
8567  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8568  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
8569  break;
8570  default:
8571  break;
8572  }
8573 
8574  *pMemoryTypeIndex = UINT32_MAX;
8575  uint32_t minCost = UINT32_MAX;
8576  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
8577  memTypeIndex < allocator->GetMemoryTypeCount();
8578  ++memTypeIndex, memTypeBit <<= 1)
8579  {
8580  // This memory type is acceptable according to memoryTypeBits bitmask.
8581  if((memTypeBit & memoryTypeBits) != 0)
8582  {
8583  const VkMemoryPropertyFlags currFlags =
8584  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
8585  // This memory type contains requiredFlags.
8586  if((requiredFlags & ~currFlags) == 0)
8587  {
8588  // Calculate cost as number of bits from preferredFlags not present in this memory type.
8589  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
8590  // Remember memory type with lowest cost.
8591  if(currCost < minCost)
8592  {
8593  *pMemoryTypeIndex = memTypeIndex;
8594  if(currCost == 0)
8595  {
8596  return VK_SUCCESS;
8597  }
8598  minCost = currCost;
8599  }
8600  }
8601  }
8602  }
8603  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
8604 }
8605 
8607  VmaAllocator allocator,
8608  const VkBufferCreateInfo* pBufferCreateInfo,
8609  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8610  uint32_t* pMemoryTypeIndex)
8611 {
8612  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8613  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
8614  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8615  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8616 
8617  const VkDevice hDev = allocator->m_hDevice;
8618  VkBuffer hBuffer = VK_NULL_HANDLE;
8619  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
8620  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
8621  if(res == VK_SUCCESS)
8622  {
8623  VkMemoryRequirements memReq = {};
8624  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
8625  hDev, hBuffer, &memReq);
8626 
8627  res = vmaFindMemoryTypeIndex(
8628  allocator,
8629  memReq.memoryTypeBits,
8630  pAllocationCreateInfo,
8631  pMemoryTypeIndex);
8632 
8633  allocator->GetVulkanFunctions().vkDestroyBuffer(
8634  hDev, hBuffer, allocator->GetAllocationCallbacks());
8635  }
8636  return res;
8637 }
8638 
8640  VmaAllocator allocator,
8641  const VkImageCreateInfo* pImageCreateInfo,
8642  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8643  uint32_t* pMemoryTypeIndex)
8644 {
8645  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8646  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
8647  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8648  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8649 
8650  const VkDevice hDev = allocator->m_hDevice;
8651  VkImage hImage = VK_NULL_HANDLE;
8652  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
8653  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
8654  if(res == VK_SUCCESS)
8655  {
8656  VkMemoryRequirements memReq = {};
8657  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
8658  hDev, hImage, &memReq);
8659 
8660  res = vmaFindMemoryTypeIndex(
8661  allocator,
8662  memReq.memoryTypeBits,
8663  pAllocationCreateInfo,
8664  pMemoryTypeIndex);
8665 
8666  allocator->GetVulkanFunctions().vkDestroyImage(
8667  hDev, hImage, allocator->GetAllocationCallbacks());
8668  }
8669  return res;
8670 }
8671 
8672 VkResult vmaCreatePool(
8673  VmaAllocator allocator,
8674  const VmaPoolCreateInfo* pCreateInfo,
8675  VmaPool* pPool)
8676 {
8677  VMA_ASSERT(allocator && pCreateInfo && pPool);
8678 
8679  VMA_DEBUG_LOG("vmaCreatePool");
8680 
8681  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8682 
8683  return allocator->CreatePool(pCreateInfo, pPool);
8684 }
8685 
8686 void vmaDestroyPool(
8687  VmaAllocator allocator,
8688  VmaPool pool)
8689 {
8690  VMA_ASSERT(allocator);
8691 
8692  if(pool == VK_NULL_HANDLE)
8693  {
8694  return;
8695  }
8696 
8697  VMA_DEBUG_LOG("vmaDestroyPool");
8698 
8699  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8700 
8701  allocator->DestroyPool(pool);
8702 }
8703 
8704 void vmaGetPoolStats(
8705  VmaAllocator allocator,
8706  VmaPool pool,
8707  VmaPoolStats* pPoolStats)
8708 {
8709  VMA_ASSERT(allocator && pool && pPoolStats);
8710 
8711  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8712 
8713  allocator->GetPoolStats(pool, pPoolStats);
8714 }
8715 
8717  VmaAllocator allocator,
8718  VmaPool pool,
8719  size_t* pLostAllocationCount)
8720 {
8721  VMA_ASSERT(allocator && pool);
8722 
8723  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8724 
8725  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
8726 }
8727 
8728 VkResult vmaAllocateMemory(
8729  VmaAllocator allocator,
8730  const VkMemoryRequirements* pVkMemoryRequirements,
8731  const VmaAllocationCreateInfo* pCreateInfo,
8732  VmaAllocation* pAllocation,
8733  VmaAllocationInfo* pAllocationInfo)
8734 {
8735  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
8736 
8737  VMA_DEBUG_LOG("vmaAllocateMemory");
8738 
8739  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8740 
8741  VkResult result = allocator->AllocateMemory(
8742  *pVkMemoryRequirements,
8743  false, // requiresDedicatedAllocation
8744  false, // prefersDedicatedAllocation
8745  VK_NULL_HANDLE, // dedicatedBuffer
8746  VK_NULL_HANDLE, // dedicatedImage
8747  *pCreateInfo,
8748  VMA_SUBALLOCATION_TYPE_UNKNOWN,
8749  pAllocation);
8750 
8751  if(pAllocationInfo && result == VK_SUCCESS)
8752  {
8753  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8754  }
8755 
8756  return result;
8757 }
8758 
8760  VmaAllocator allocator,
8761  VkBuffer buffer,
8762  const VmaAllocationCreateInfo* pCreateInfo,
8763  VmaAllocation* pAllocation,
8764  VmaAllocationInfo* pAllocationInfo)
8765 {
8766  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8767 
8768  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
8769 
8770  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8771 
8772  VkMemoryRequirements vkMemReq = {};
8773  bool requiresDedicatedAllocation = false;
8774  bool prefersDedicatedAllocation = false;
8775  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
8776  requiresDedicatedAllocation,
8777  prefersDedicatedAllocation);
8778 
8779  VkResult result = allocator->AllocateMemory(
8780  vkMemReq,
8781  requiresDedicatedAllocation,
8782  prefersDedicatedAllocation,
8783  buffer, // dedicatedBuffer
8784  VK_NULL_HANDLE, // dedicatedImage
8785  *pCreateInfo,
8786  VMA_SUBALLOCATION_TYPE_BUFFER,
8787  pAllocation);
8788 
8789  if(pAllocationInfo && result == VK_SUCCESS)
8790  {
8791  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8792  }
8793 
8794  return result;
8795 }
8796 
8797 VkResult vmaAllocateMemoryForImage(
8798  VmaAllocator allocator,
8799  VkImage image,
8800  const VmaAllocationCreateInfo* pCreateInfo,
8801  VmaAllocation* pAllocation,
8802  VmaAllocationInfo* pAllocationInfo)
8803 {
8804  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8805 
8806  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
8807 
8808  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8809 
8810  VkResult result = AllocateMemoryForImage(
8811  allocator,
8812  image,
8813  pCreateInfo,
8814  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
8815  pAllocation);
8816 
8817  if(pAllocationInfo && result == VK_SUCCESS)
8818  {
8819  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8820  }
8821 
8822  return result;
8823 }
8824 
8825 void vmaFreeMemory(
8826  VmaAllocator allocator,
8827  VmaAllocation allocation)
8828 {
8829  VMA_ASSERT(allocator && allocation);
8830 
8831  VMA_DEBUG_LOG("vmaFreeMemory");
8832 
8833  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8834 
8835  allocator->FreeMemory(allocation);
8836 }
8837 
8839  VmaAllocator allocator,
8840  VmaAllocation allocation,
8841  VmaAllocationInfo* pAllocationInfo)
8842 {
8843  VMA_ASSERT(allocator && allocation && pAllocationInfo);
8844 
8845  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8846 
8847  allocator->GetAllocationInfo(allocation, pAllocationInfo);
8848 }
8849 
8850 VkBool32 vmaTouchAllocation(
8851  VmaAllocator allocator,
8852  VmaAllocation allocation)
8853 {
8854  VMA_ASSERT(allocator && allocation);
8855 
8856  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8857 
8858  return allocator->TouchAllocation(allocation);
8859 }
8860 
8862  VmaAllocator allocator,
8863  VmaAllocation allocation,
8864  void* pUserData)
8865 {
8866  VMA_ASSERT(allocator && allocation);
8867 
8868  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8869 
8870  allocation->SetUserData(allocator, pUserData);
8871 }
8872 
8874  VmaAllocator allocator,
8875  VmaAllocation* pAllocation)
8876 {
8877  VMA_ASSERT(allocator && pAllocation);
8878 
8879  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
8880 
8881  allocator->CreateLostAllocation(pAllocation);
8882 }
8883 
8884 VkResult vmaMapMemory(
8885  VmaAllocator allocator,
8886  VmaAllocation allocation,
8887  void** ppData)
8888 {
8889  VMA_ASSERT(allocator && allocation && ppData);
8890 
8891  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8892 
8893  return allocator->Map(allocation, ppData);
8894 }
8895 
8896 void vmaUnmapMemory(
8897  VmaAllocator allocator,
8898  VmaAllocation allocation)
8899 {
8900  VMA_ASSERT(allocator && allocation);
8901 
8902  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8903 
8904  allocator->Unmap(allocation);
8905 }
8906 
8907 VkResult vmaDefragment(
8908  VmaAllocator allocator,
8909  VmaAllocation* pAllocations,
8910  size_t allocationCount,
8911  VkBool32* pAllocationsChanged,
8912  const VmaDefragmentationInfo *pDefragmentationInfo,
8913  VmaDefragmentationStats* pDefragmentationStats)
8914 {
8915  VMA_ASSERT(allocator && pAllocations);
8916 
8917  VMA_DEBUG_LOG("vmaDefragment");
8918 
8919  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8920 
8921  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
8922 }
8923 
8924 VkResult vmaCreateBuffer(
8925  VmaAllocator allocator,
8926  const VkBufferCreateInfo* pBufferCreateInfo,
8927  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8928  VkBuffer* pBuffer,
8929  VmaAllocation* pAllocation,
8930  VmaAllocationInfo* pAllocationInfo)
8931 {
8932  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
8933 
8934  VMA_DEBUG_LOG("vmaCreateBuffer");
8935 
8936  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8937 
8938  *pBuffer = VK_NULL_HANDLE;
8939  *pAllocation = VK_NULL_HANDLE;
8940 
8941  // 1. Create VkBuffer.
8942  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
8943  allocator->m_hDevice,
8944  pBufferCreateInfo,
8945  allocator->GetAllocationCallbacks(),
8946  pBuffer);
8947  if(res >= 0)
8948  {
8949  // 2. vkGetBufferMemoryRequirements.
8950  VkMemoryRequirements vkMemReq = {};
8951  bool requiresDedicatedAllocation = false;
8952  bool prefersDedicatedAllocation = false;
8953  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
8954  requiresDedicatedAllocation, prefersDedicatedAllocation);
8955 
8956  // Make sure alignment requirements for specific buffer usages reported
8957  // in Physical Device Properties are included in alignment reported by memory requirements.
8958  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
8959  {
8960  VMA_ASSERT(vkMemReq.alignment %
8961  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
8962  }
8963  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
8964  {
8965  VMA_ASSERT(vkMemReq.alignment %
8966  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
8967  }
8968  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
8969  {
8970  VMA_ASSERT(vkMemReq.alignment %
8971  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
8972  }
8973 
8974  // 3. Allocate memory using allocator.
8975  res = allocator->AllocateMemory(
8976  vkMemReq,
8977  requiresDedicatedAllocation,
8978  prefersDedicatedAllocation,
8979  *pBuffer, // dedicatedBuffer
8980  VK_NULL_HANDLE, // dedicatedImage
8981  *pAllocationCreateInfo,
8982  VMA_SUBALLOCATION_TYPE_BUFFER,
8983  pAllocation);
8984  if(res >= 0)
8985  {
8986  // 3. Bind buffer with memory.
8987  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
8988  allocator->m_hDevice,
8989  *pBuffer,
8990  (*pAllocation)->GetMemory(),
8991  (*pAllocation)->GetOffset());
8992  if(res >= 0)
8993  {
8994  // All steps succeeded.
8995  if(pAllocationInfo != VMA_NULL)
8996  {
8997  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8998  }
8999  return VK_SUCCESS;
9000  }
9001  allocator->FreeMemory(*pAllocation);
9002  *pAllocation = VK_NULL_HANDLE;
9003  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
9004  *pBuffer = VK_NULL_HANDLE;
9005  return res;
9006  }
9007  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
9008  *pBuffer = VK_NULL_HANDLE;
9009  return res;
9010  }
9011  return res;
9012 }
9013 
9014 void vmaDestroyBuffer(
9015  VmaAllocator allocator,
9016  VkBuffer buffer,
9017  VmaAllocation allocation)
9018 {
9019  if(buffer != VK_NULL_HANDLE)
9020  {
9021  VMA_ASSERT(allocator);
9022 
9023  VMA_DEBUG_LOG("vmaDestroyBuffer");
9024 
9025  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9026 
9027  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
9028 
9029  allocator->FreeMemory(allocation);
9030  }
9031 }
9032 
9033 VkResult vmaCreateImage(
9034  VmaAllocator allocator,
9035  const VkImageCreateInfo* pImageCreateInfo,
9036  const VmaAllocationCreateInfo* pAllocationCreateInfo,
9037  VkImage* pImage,
9038  VmaAllocation* pAllocation,
9039  VmaAllocationInfo* pAllocationInfo)
9040 {
9041  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
9042 
9043  VMA_DEBUG_LOG("vmaCreateImage");
9044 
9045  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9046 
9047  *pImage = VK_NULL_HANDLE;
9048  *pAllocation = VK_NULL_HANDLE;
9049 
9050  // 1. Create VkImage.
9051  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
9052  allocator->m_hDevice,
9053  pImageCreateInfo,
9054  allocator->GetAllocationCallbacks(),
9055  pImage);
9056  if(res >= 0)
9057  {
9058  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
9059  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
9060  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
9061 
9062  // 2. Allocate memory using allocator.
9063  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
9064  if(res >= 0)
9065  {
9066  // 3. Bind image with memory.
9067  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
9068  allocator->m_hDevice,
9069  *pImage,
9070  (*pAllocation)->GetMemory(),
9071  (*pAllocation)->GetOffset());
9072  if(res >= 0)
9073  {
9074  // All steps succeeded.
9075  if(pAllocationInfo != VMA_NULL)
9076  {
9077  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9078  }
9079  return VK_SUCCESS;
9080  }
9081  allocator->FreeMemory(*pAllocation);
9082  *pAllocation = VK_NULL_HANDLE;
9083  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
9084  *pImage = VK_NULL_HANDLE;
9085  return res;
9086  }
9087  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
9088  *pImage = VK_NULL_HANDLE;
9089  return res;
9090  }
9091  return res;
9092 }
9093 
9094 void vmaDestroyImage(
9095  VmaAllocator allocator,
9096  VkImage image,
9097  VmaAllocation allocation)
9098 {
9099  if(image != VK_NULL_HANDLE)
9100  {
9101  VMA_ASSERT(allocator);
9102 
9103  VMA_DEBUG_LOG("vmaDestroyImage");
9104 
9105  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9106 
9107  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
9108 
9109  allocator->FreeMemory(allocation);
9110  }
9111 }
9112 
9113 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1022
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1284
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1047
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
Represents single memory allocation.
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1032
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1241
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1026
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1614
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1044
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1813
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1460
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1514
Definition: vk_mem_alloc.h:1321
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1015
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1359
Definition: vk_mem_alloc.h:1268
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1056
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1109
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1041
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1272
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1174
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1029
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1173
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
Definition: vk_mem_alloc.h:1037
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1817
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1073
VmaStatInfo total
Definition: vk_mem_alloc.h:1183
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1825
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1343
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1808
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1030
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:957
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1050
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1468
Definition: vk_mem_alloc.h:1462
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1624
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1027
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1380
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1484
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1520
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1013
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1471
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaMemoryUsage
Definition: vk_mem_alloc.h:1219
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1803
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1821
Definition: vk_mem_alloc.h:1258
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1367
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1028
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1179
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:963
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:984
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:989
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1823
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1354
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1530
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1023
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1162
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1479
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:976
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1328
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1175
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:980
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1474
Definition: vk_mem_alloc.h:1267
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1349
Definition: vk_mem_alloc.h:1340
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1165
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1025
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:1492
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1059
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1523
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1338
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1373
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1097
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1181
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1308
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1174
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1034
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:978
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1033
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1506
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1638
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1053
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1174
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1171
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1511
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1619
Definition: vk_mem_alloc.h:1336
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1819
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1021
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
Definition: vk_mem_alloc.h:1036
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1169
Definition: vk_mem_alloc.h:1224
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1464
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1167
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1031
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1035
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1295
Definition: vk_mem_alloc.h:1251
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1633
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1011
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1024
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1600
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1442
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1175
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1182
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1517
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1175
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1605