Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1078 #include <vulkan/vulkan.h>
1079 
1089 VK_DEFINE_HANDLE(VmaAllocator)
1090 
1091 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1093  VmaAllocator allocator,
1094  uint32_t memoryType,
1095  VkDeviceMemory memory,
1096  VkDeviceSize size);
1098 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1099  VmaAllocator allocator,
1100  uint32_t memoryType,
1101  VkDeviceMemory memory,
1102  VkDeviceSize size);
1103 
1117 
1147 
1150 typedef VkFlags VmaAllocatorCreateFlags;
1151 
1156 typedef struct VmaVulkanFunctions {
1157  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1158  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1159  PFN_vkAllocateMemory vkAllocateMemory;
1160  PFN_vkFreeMemory vkFreeMemory;
1161  PFN_vkMapMemory vkMapMemory;
1162  PFN_vkUnmapMemory vkUnmapMemory;
1163  PFN_vkBindBufferMemory vkBindBufferMemory;
1164  PFN_vkBindImageMemory vkBindImageMemory;
1165  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1166  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1167  PFN_vkCreateBuffer vkCreateBuffer;
1168  PFN_vkDestroyBuffer vkDestroyBuffer;
1169  PFN_vkCreateImage vkCreateImage;
1170  PFN_vkDestroyImage vkDestroyImage;
1171  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1172  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1174 
1177 {
1179  VmaAllocatorCreateFlags flags;
1181 
1182  VkPhysicalDevice physicalDevice;
1184 
1185  VkDevice device;
1187 
1190 
1191  const VkAllocationCallbacks* pAllocationCallbacks;
1193 
1232  const VkDeviceSize* pHeapSizeLimit;
1246 
1248 VkResult vmaCreateAllocator(
1249  const VmaAllocatorCreateInfo* pCreateInfo,
1250  VmaAllocator* pAllocator);
1251 
1253 void vmaDestroyAllocator(
1254  VmaAllocator allocator);
1255 
1261  VmaAllocator allocator,
1262  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1263 
1269  VmaAllocator allocator,
1270  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1271 
1279  VmaAllocator allocator,
1280  uint32_t memoryTypeIndex,
1281  VkMemoryPropertyFlags* pFlags);
1282 
1292  VmaAllocator allocator,
1293  uint32_t frameIndex);
1294 
1297 typedef struct VmaStatInfo
1298 {
1300  uint32_t blockCount;
1306  VkDeviceSize usedBytes;
1308  VkDeviceSize unusedBytes;
1309  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1310  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1311 } VmaStatInfo;
1312 
1314 typedef struct VmaStats
1315 {
1316  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1317  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1319 } VmaStats;
1320 
1322 void vmaCalculateStats(
1323  VmaAllocator allocator,
1324  VmaStats* pStats);
1325 
1326 #define VMA_STATS_STRING_ENABLED 1
1327 
1328 #if VMA_STATS_STRING_ENABLED
1329 
1331 
1333 void vmaBuildStatsString(
1334  VmaAllocator allocator,
1335  char** ppStatsString,
1336  VkBool32 detailedMap);
1337 
1338 void vmaFreeStatsString(
1339  VmaAllocator allocator,
1340  char* pStatsString);
1341 
1342 #endif // #if VMA_STATS_STRING_ENABLED
1343 
1352 VK_DEFINE_HANDLE(VmaPool)
1353 
1354 typedef enum VmaMemoryUsage
1355 {
1404 } VmaMemoryUsage;
1405 
1420 
1470 
1474 
1476 {
1478  VmaAllocationCreateFlags flags;
1489  VkMemoryPropertyFlags requiredFlags;
1494  VkMemoryPropertyFlags preferredFlags;
1502  uint32_t memoryTypeBits;
1515  void* pUserData;
1517 
1534 VkResult vmaFindMemoryTypeIndex(
1535  VmaAllocator allocator,
1536  uint32_t memoryTypeBits,
1537  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1538  uint32_t* pMemoryTypeIndex);
1539 
1553  VmaAllocator allocator,
1554  const VkBufferCreateInfo* pBufferCreateInfo,
1555  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1556  uint32_t* pMemoryTypeIndex);
1557 
1571  VmaAllocator allocator,
1572  const VkImageCreateInfo* pImageCreateInfo,
1573  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1574  uint32_t* pMemoryTypeIndex);
1575 
1596 
1599 typedef VkFlags VmaPoolCreateFlags;
1600 
1603 typedef struct VmaPoolCreateInfo {
1609  VmaPoolCreateFlags flags;
1614  VkDeviceSize blockSize;
1643 
1646 typedef struct VmaPoolStats {
1649  VkDeviceSize size;
1652  VkDeviceSize unusedSize;
1665  VkDeviceSize unusedRangeSizeMax;
1666 } VmaPoolStats;
1667 
1674 VkResult vmaCreatePool(
1675  VmaAllocator allocator,
1676  const VmaPoolCreateInfo* pCreateInfo,
1677  VmaPool* pPool);
1678 
1681 void vmaDestroyPool(
1682  VmaAllocator allocator,
1683  VmaPool pool);
1684 
1691 void vmaGetPoolStats(
1692  VmaAllocator allocator,
1693  VmaPool pool,
1694  VmaPoolStats* pPoolStats);
1695 
1703  VmaAllocator allocator,
1704  VmaPool pool,
1705  size_t* pLostAllocationCount);
1706 
1731 VK_DEFINE_HANDLE(VmaAllocation)
1732 
1733 
1735 typedef struct VmaAllocationInfo {
1740  uint32_t memoryType;
1749  VkDeviceMemory deviceMemory;
1754  VkDeviceSize offset;
1759  VkDeviceSize size;
1773  void* pUserData;
1775 
1786 VkResult vmaAllocateMemory(
1787  VmaAllocator allocator,
1788  const VkMemoryRequirements* pVkMemoryRequirements,
1789  const VmaAllocationCreateInfo* pCreateInfo,
1790  VmaAllocation* pAllocation,
1791  VmaAllocationInfo* pAllocationInfo);
1792 
1800  VmaAllocator allocator,
1801  VkBuffer buffer,
1802  const VmaAllocationCreateInfo* pCreateInfo,
1803  VmaAllocation* pAllocation,
1804  VmaAllocationInfo* pAllocationInfo);
1805 
1807 VkResult vmaAllocateMemoryForImage(
1808  VmaAllocator allocator,
1809  VkImage image,
1810  const VmaAllocationCreateInfo* pCreateInfo,
1811  VmaAllocation* pAllocation,
1812  VmaAllocationInfo* pAllocationInfo);
1813 
1815 void vmaFreeMemory(
1816  VmaAllocator allocator,
1817  VmaAllocation allocation);
1818 
1836  VmaAllocator allocator,
1837  VmaAllocation allocation,
1838  VmaAllocationInfo* pAllocationInfo);
1839 
1854 VkBool32 vmaTouchAllocation(
1855  VmaAllocator allocator,
1856  VmaAllocation allocation);
1857 
1872  VmaAllocator allocator,
1873  VmaAllocation allocation,
1874  void* pUserData);
1875 
1887  VmaAllocator allocator,
1888  VmaAllocation* pAllocation);
1889 
1924 VkResult vmaMapMemory(
1925  VmaAllocator allocator,
1926  VmaAllocation allocation,
1927  void** ppData);
1928 
1933 void vmaUnmapMemory(
1934  VmaAllocator allocator,
1935  VmaAllocation allocation);
1936 
1938 typedef struct VmaDefragmentationInfo {
1943  VkDeviceSize maxBytesToMove;
1950 
1952 typedef struct VmaDefragmentationStats {
1954  VkDeviceSize bytesMoved;
1956  VkDeviceSize bytesFreed;
1962 
2045 VkResult vmaDefragment(
2046  VmaAllocator allocator,
2047  VmaAllocation* pAllocations,
2048  size_t allocationCount,
2049  VkBool32* pAllocationsChanged,
2050  const VmaDefragmentationInfo *pDefragmentationInfo,
2051  VmaDefragmentationStats* pDefragmentationStats);
2052 
2065 VkResult vmaBindBufferMemory(
2066  VmaAllocator allocator,
2067  VmaAllocation allocation,
2068  VkBuffer buffer);
2069 
2082 VkResult vmaBindImageMemory(
2083  VmaAllocator allocator,
2084  VmaAllocation allocation,
2085  VkImage image);
2086 
2113 VkResult vmaCreateBuffer(
2114  VmaAllocator allocator,
2115  const VkBufferCreateInfo* pBufferCreateInfo,
2116  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2117  VkBuffer* pBuffer,
2118  VmaAllocation* pAllocation,
2119  VmaAllocationInfo* pAllocationInfo);
2120 
2132 void vmaDestroyBuffer(
2133  VmaAllocator allocator,
2134  VkBuffer buffer,
2135  VmaAllocation allocation);
2136 
2138 VkResult vmaCreateImage(
2139  VmaAllocator allocator,
2140  const VkImageCreateInfo* pImageCreateInfo,
2141  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2142  VkImage* pImage,
2143  VmaAllocation* pAllocation,
2144  VmaAllocationInfo* pAllocationInfo);
2145 
2157 void vmaDestroyImage(
2158  VmaAllocator allocator,
2159  VkImage image,
2160  VmaAllocation allocation);
2161 
2162 #ifdef __cplusplus
2163 }
2164 #endif
2165 
2166 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2167 
2168 // For Visual Studio IntelliSense.
2169 #ifdef __INTELLISENSE__
2170 #define VMA_IMPLEMENTATION
2171 #endif
2172 
2173 #ifdef VMA_IMPLEMENTATION
2174 #undef VMA_IMPLEMENTATION
2175 
2176 #include <cstdint>
2177 #include <cstdlib>
2178 #include <cstring>
2179 
2180 /*******************************************************************************
2181 CONFIGURATION SECTION
2182 
2183 Define some of these macros before each #include of this header or change them
2184 here if you need other then default behavior depending on your environment.
2185 */
2186 
2187 /*
2188 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2189 internally, like:
2190 
2191  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2192 
2193 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2194 VmaAllocatorCreateInfo::pVulkanFunctions.
2195 */
2196 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2197 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2198 #endif
2199 
2200 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2201 //#define VMA_USE_STL_CONTAINERS 1
2202 
2203 /* Set this macro to 1 to make the library including and using STL containers:
2204 std::pair, std::vector, std::list, std::unordered_map.
2205 
2206 Set it to 0 or undefined to make the library using its own implementation of
2207 the containers.
2208 */
2209 #if VMA_USE_STL_CONTAINERS
2210  #define VMA_USE_STL_VECTOR 1
2211  #define VMA_USE_STL_UNORDERED_MAP 1
2212  #define VMA_USE_STL_LIST 1
2213 #endif
2214 
2215 #if VMA_USE_STL_VECTOR
2216  #include <vector>
2217 #endif
2218 
2219 #if VMA_USE_STL_UNORDERED_MAP
2220  #include <unordered_map>
2221 #endif
2222 
2223 #if VMA_USE_STL_LIST
2224  #include <list>
2225 #endif
2226 
2227 /*
2228 Following headers are used in this CONFIGURATION section only, so feel free to
2229 remove them if not needed.
2230 */
2231 #include <cassert> // for assert
2232 #include <algorithm> // for min, max
2233 #include <mutex> // for std::mutex
2234 #include <atomic> // for std::atomic
2235 
2236 #if !defined(_WIN32) && !defined(__APPLE__)
2237  #include <malloc.h> // for aligned_alloc()
2238 #endif
2239 
2240 #ifndef VMA_NULL
2241  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2242  #define VMA_NULL nullptr
2243 #endif
2244 
2245 #if defined(__APPLE__) || defined(__ANDROID__)
2246 #include <cstdlib>
2247 void *aligned_alloc(size_t alignment, size_t size)
2248 {
2249  // alignment must be >= sizeof(void*)
2250  if(alignment < sizeof(void*))
2251  {
2252  alignment = sizeof(void*);
2253  }
2254 
2255  void *pointer;
2256  if(posix_memalign(&pointer, alignment, size) == 0)
2257  return pointer;
2258  return VMA_NULL;
2259 }
2260 #endif
2261 
2262 // Normal assert to check for programmer's errors, especially in Debug configuration.
2263 #ifndef VMA_ASSERT
2264  #ifdef _DEBUG
2265  #define VMA_ASSERT(expr) assert(expr)
2266  #else
2267  #define VMA_ASSERT(expr)
2268  #endif
2269 #endif
2270 
2271 // Assert that will be called very often, like inside data structures e.g. operator[].
2272 // Making it non-empty can make program slow.
2273 #ifndef VMA_HEAVY_ASSERT
2274  #ifdef _DEBUG
2275  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2276  #else
2277  #define VMA_HEAVY_ASSERT(expr)
2278  #endif
2279 #endif
2280 
2281 #ifndef VMA_ALIGN_OF
2282  #define VMA_ALIGN_OF(type) (__alignof(type))
2283 #endif
2284 
2285 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2286  #if defined(_WIN32)
2287  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2288  #else
2289  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2290  #endif
2291 #endif
2292 
2293 #ifndef VMA_SYSTEM_FREE
2294  #if defined(_WIN32)
2295  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2296  #else
2297  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2298  #endif
2299 #endif
2300 
2301 #ifndef VMA_MIN
2302  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2303 #endif
2304 
2305 #ifndef VMA_MAX
2306  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2307 #endif
2308 
2309 #ifndef VMA_SWAP
2310  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2311 #endif
2312 
2313 #ifndef VMA_SORT
2314  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2315 #endif
2316 
2317 #ifndef VMA_DEBUG_LOG
2318  #define VMA_DEBUG_LOG(format, ...)
2319  /*
2320  #define VMA_DEBUG_LOG(format, ...) do { \
2321  printf(format, __VA_ARGS__); \
2322  printf("\n"); \
2323  } while(false)
2324  */
2325 #endif
2326 
2327 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2328 #if VMA_STATS_STRING_ENABLED
2329  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2330  {
2331  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2332  }
2333  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2334  {
2335  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2336  }
2337  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2338  {
2339  snprintf(outStr, strLen, "%p", ptr);
2340  }
2341 #endif
2342 
2343 #ifndef VMA_MUTEX
2344  class VmaMutex
2345  {
2346  public:
2347  VmaMutex() { }
2348  ~VmaMutex() { }
2349  void Lock() { m_Mutex.lock(); }
2350  void Unlock() { m_Mutex.unlock(); }
2351  private:
2352  std::mutex m_Mutex;
2353  };
2354  #define VMA_MUTEX VmaMutex
2355 #endif
2356 
2357 /*
2358 If providing your own implementation, you need to implement a subset of std::atomic:
2359 
2360 - Constructor(uint32_t desired)
2361 - uint32_t load() const
2362 - void store(uint32_t desired)
2363 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2364 */
2365 #ifndef VMA_ATOMIC_UINT32
2366  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2367 #endif
2368 
2369 #ifndef VMA_BEST_FIT
2370 
2382  #define VMA_BEST_FIT (1)
2383 #endif
2384 
2385 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2386 
2390  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2391 #endif
2392 
2393 #ifndef VMA_DEBUG_ALIGNMENT
2394 
2398  #define VMA_DEBUG_ALIGNMENT (1)
2399 #endif
2400 
2401 #ifndef VMA_DEBUG_MARGIN
2402 
2406  #define VMA_DEBUG_MARGIN (0)
2407 #endif
2408 
2409 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2410 
2414  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2415 #endif
2416 
2417 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2418 
2422  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2423 #endif
2424 
2425 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2426  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2428 #endif
2429 
2430 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2431  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2433 #endif
2434 
2435 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2436 
2437 /*******************************************************************************
2438 END OF CONFIGURATION
2439 */
2440 
2441 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2442  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2443 
2444 // Returns number of bits set to 1 in (v).
2445 static inline uint32_t VmaCountBitsSet(uint32_t v)
2446 {
2447  uint32_t c = v - ((v >> 1) & 0x55555555);
2448  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2449  c = ((c >> 4) + c) & 0x0F0F0F0F;
2450  c = ((c >> 8) + c) & 0x00FF00FF;
2451  c = ((c >> 16) + c) & 0x0000FFFF;
2452  return c;
2453 }
2454 
2455 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2456 // Use types like uint32_t, uint64_t as T.
2457 template <typename T>
2458 static inline T VmaAlignUp(T val, T align)
2459 {
2460  return (val + align - 1) / align * align;
2461 }
2462 
2463 // Division with mathematical rounding to nearest number.
2464 template <typename T>
2465 inline T VmaRoundDiv(T x, T y)
2466 {
2467  return (x + (y / (T)2)) / y;
2468 }
2469 
2470 #ifndef VMA_SORT
2471 
2472 template<typename Iterator, typename Compare>
2473 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
2474 {
2475  Iterator centerValue = end; --centerValue;
2476  Iterator insertIndex = beg;
2477  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
2478  {
2479  if(cmp(*memTypeIndex, *centerValue))
2480  {
2481  if(insertIndex != memTypeIndex)
2482  {
2483  VMA_SWAP(*memTypeIndex, *insertIndex);
2484  }
2485  ++insertIndex;
2486  }
2487  }
2488  if(insertIndex != centerValue)
2489  {
2490  VMA_SWAP(*insertIndex, *centerValue);
2491  }
2492  return insertIndex;
2493 }
2494 
2495 template<typename Iterator, typename Compare>
2496 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
2497 {
2498  if(beg < end)
2499  {
2500  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
2501  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
2502  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
2503  }
2504 }
2505 
2506 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
2507 
2508 #endif // #ifndef VMA_SORT
2509 
2510 /*
2511 Returns true if two memory blocks occupy overlapping pages.
2512 ResourceA must be in less memory offset than ResourceB.
2513 
2514 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
2515 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
2516 */
2517 static inline bool VmaBlocksOnSamePage(
2518  VkDeviceSize resourceAOffset,
2519  VkDeviceSize resourceASize,
2520  VkDeviceSize resourceBOffset,
2521  VkDeviceSize pageSize)
2522 {
2523  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
2524  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
2525  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
2526  VkDeviceSize resourceBStart = resourceBOffset;
2527  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
2528  return resourceAEndPage == resourceBStartPage;
2529 }
2530 
2531 enum VmaSuballocationType
2532 {
2533  VMA_SUBALLOCATION_TYPE_FREE = 0,
2534  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
2535  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
2536  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
2537  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
2538  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
2539  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
2540 };
2541 
2542 /*
2543 Returns true if given suballocation types could conflict and must respect
2544 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
2545 or linear image and another one is optimal image. If type is unknown, behave
2546 conservatively.
2547 */
2548 static inline bool VmaIsBufferImageGranularityConflict(
2549  VmaSuballocationType suballocType1,
2550  VmaSuballocationType suballocType2)
2551 {
2552  if(suballocType1 > suballocType2)
2553  {
2554  VMA_SWAP(suballocType1, suballocType2);
2555  }
2556 
2557  switch(suballocType1)
2558  {
2559  case VMA_SUBALLOCATION_TYPE_FREE:
2560  return false;
2561  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
2562  return true;
2563  case VMA_SUBALLOCATION_TYPE_BUFFER:
2564  return
2565  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2566  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2567  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
2568  return
2569  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2570  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
2571  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2572  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
2573  return
2574  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2575  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
2576  return false;
2577  default:
2578  VMA_ASSERT(0);
2579  return true;
2580  }
2581 }
2582 
2583 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
2584 struct VmaMutexLock
2585 {
2586 public:
2587  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
2588  m_pMutex(useMutex ? &mutex : VMA_NULL)
2589  {
2590  if(m_pMutex)
2591  {
2592  m_pMutex->Lock();
2593  }
2594  }
2595 
2596  ~VmaMutexLock()
2597  {
2598  if(m_pMutex)
2599  {
2600  m_pMutex->Unlock();
2601  }
2602  }
2603 
2604 private:
2605  VMA_MUTEX* m_pMutex;
2606 };
2607 
2608 #if VMA_DEBUG_GLOBAL_MUTEX
2609  static VMA_MUTEX gDebugGlobalMutex;
2610  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
2611 #else
2612  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
2613 #endif
2614 
2615 // Minimum size of a free suballocation to register it in the free suballocation collection.
2616 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
2617 
2618 /*
2619 Performs binary search and returns iterator to first element that is greater or
2620 equal to (key), according to comparison (cmp).
2621 
2622 Cmp should return true if first argument is less than second argument.
2623 
2624 Returned value is the found element, if present in the collection or place where
2625 new element with value (key) should be inserted.
2626 */
2627 template <typename IterT, typename KeyT, typename CmpT>
2628 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
2629 {
2630  size_t down = 0, up = (end - beg);
2631  while(down < up)
2632  {
2633  const size_t mid = (down + up) / 2;
2634  if(cmp(*(beg+mid), key))
2635  {
2636  down = mid + 1;
2637  }
2638  else
2639  {
2640  up = mid;
2641  }
2642  }
2643  return beg + down;
2644 }
2645 
2647 // Memory allocation
2648 
2649 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
2650 {
2651  if((pAllocationCallbacks != VMA_NULL) &&
2652  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
2653  {
2654  return (*pAllocationCallbacks->pfnAllocation)(
2655  pAllocationCallbacks->pUserData,
2656  size,
2657  alignment,
2658  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2659  }
2660  else
2661  {
2662  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
2663  }
2664 }
2665 
2666 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
2667 {
2668  if((pAllocationCallbacks != VMA_NULL) &&
2669  (pAllocationCallbacks->pfnFree != VMA_NULL))
2670  {
2671  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
2672  }
2673  else
2674  {
2675  VMA_SYSTEM_FREE(ptr);
2676  }
2677 }
2678 
2679 template<typename T>
2680 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
2681 {
2682  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
2683 }
2684 
2685 template<typename T>
2686 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
2687 {
2688  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
2689 }
2690 
2691 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2692 
2693 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2694 
2695 template<typename T>
2696 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2697 {
2698  ptr->~T();
2699  VmaFree(pAllocationCallbacks, ptr);
2700 }
2701 
2702 template<typename T>
2703 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2704 {
2705  if(ptr != VMA_NULL)
2706  {
2707  for(size_t i = count; i--; )
2708  {
2709  ptr[i].~T();
2710  }
2711  VmaFree(pAllocationCallbacks, ptr);
2712  }
2713 }
2714 
2715 // STL-compatible allocator.
2716 template<typename T>
2717 class VmaStlAllocator
2718 {
2719 public:
2720  const VkAllocationCallbacks* const m_pCallbacks;
2721  typedef T value_type;
2722 
2723  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2724  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2725 
2726  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2727  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2728 
2729  template<typename U>
2730  bool operator==(const VmaStlAllocator<U>& rhs) const
2731  {
2732  return m_pCallbacks == rhs.m_pCallbacks;
2733  }
2734  template<typename U>
2735  bool operator!=(const VmaStlAllocator<U>& rhs) const
2736  {
2737  return m_pCallbacks != rhs.m_pCallbacks;
2738  }
2739 
2740  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2741 };
2742 
2743 #if VMA_USE_STL_VECTOR
2744 
2745 #define VmaVector std::vector
2746 
2747 template<typename T, typename allocatorT>
2748 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2749 {
2750  vec.insert(vec.begin() + index, item);
2751 }
2752 
2753 template<typename T, typename allocatorT>
2754 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2755 {
2756  vec.erase(vec.begin() + index);
2757 }
2758 
2759 #else // #if VMA_USE_STL_VECTOR
2760 
2761 /* Class with interface compatible with subset of std::vector.
2762 T must be POD because constructors and destructors are not called and memcpy is
2763 used for these objects. */
2764 template<typename T, typename AllocatorT>
2765 class VmaVector
2766 {
2767 public:
2768  typedef T value_type;
2769 
2770  VmaVector(const AllocatorT& allocator) :
2771  m_Allocator(allocator),
2772  m_pArray(VMA_NULL),
2773  m_Count(0),
2774  m_Capacity(0)
2775  {
2776  }
2777 
2778  VmaVector(size_t count, const AllocatorT& allocator) :
2779  m_Allocator(allocator),
2780  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2781  m_Count(count),
2782  m_Capacity(count)
2783  {
2784  }
2785 
2786  VmaVector(const VmaVector<T, AllocatorT>& src) :
2787  m_Allocator(src.m_Allocator),
2788  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2789  m_Count(src.m_Count),
2790  m_Capacity(src.m_Count)
2791  {
2792  if(m_Count != 0)
2793  {
2794  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2795  }
2796  }
2797 
2798  ~VmaVector()
2799  {
2800  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2801  }
2802 
2803  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2804  {
2805  if(&rhs != this)
2806  {
2807  resize(rhs.m_Count);
2808  if(m_Count != 0)
2809  {
2810  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2811  }
2812  }
2813  return *this;
2814  }
2815 
2816  bool empty() const { return m_Count == 0; }
2817  size_t size() const { return m_Count; }
2818  T* data() { return m_pArray; }
2819  const T* data() const { return m_pArray; }
2820 
2821  T& operator[](size_t index)
2822  {
2823  VMA_HEAVY_ASSERT(index < m_Count);
2824  return m_pArray[index];
2825  }
2826  const T& operator[](size_t index) const
2827  {
2828  VMA_HEAVY_ASSERT(index < m_Count);
2829  return m_pArray[index];
2830  }
2831 
2832  T& front()
2833  {
2834  VMA_HEAVY_ASSERT(m_Count > 0);
2835  return m_pArray[0];
2836  }
2837  const T& front() const
2838  {
2839  VMA_HEAVY_ASSERT(m_Count > 0);
2840  return m_pArray[0];
2841  }
2842  T& back()
2843  {
2844  VMA_HEAVY_ASSERT(m_Count > 0);
2845  return m_pArray[m_Count - 1];
2846  }
2847  const T& back() const
2848  {
2849  VMA_HEAVY_ASSERT(m_Count > 0);
2850  return m_pArray[m_Count - 1];
2851  }
2852 
2853  void reserve(size_t newCapacity, bool freeMemory = false)
2854  {
2855  newCapacity = VMA_MAX(newCapacity, m_Count);
2856 
2857  if((newCapacity < m_Capacity) && !freeMemory)
2858  {
2859  newCapacity = m_Capacity;
2860  }
2861 
2862  if(newCapacity != m_Capacity)
2863  {
2864  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2865  if(m_Count != 0)
2866  {
2867  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2868  }
2869  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2870  m_Capacity = newCapacity;
2871  m_pArray = newArray;
2872  }
2873  }
2874 
2875  void resize(size_t newCount, bool freeMemory = false)
2876  {
2877  size_t newCapacity = m_Capacity;
2878  if(newCount > m_Capacity)
2879  {
2880  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2881  }
2882  else if(freeMemory)
2883  {
2884  newCapacity = newCount;
2885  }
2886 
2887  if(newCapacity != m_Capacity)
2888  {
2889  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2890  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2891  if(elementsToCopy != 0)
2892  {
2893  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2894  }
2895  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2896  m_Capacity = newCapacity;
2897  m_pArray = newArray;
2898  }
2899 
2900  m_Count = newCount;
2901  }
2902 
2903  void clear(bool freeMemory = false)
2904  {
2905  resize(0, freeMemory);
2906  }
2907 
2908  void insert(size_t index, const T& src)
2909  {
2910  VMA_HEAVY_ASSERT(index <= m_Count);
2911  const size_t oldCount = size();
2912  resize(oldCount + 1);
2913  if(index < oldCount)
2914  {
2915  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2916  }
2917  m_pArray[index] = src;
2918  }
2919 
2920  void remove(size_t index)
2921  {
2922  VMA_HEAVY_ASSERT(index < m_Count);
2923  const size_t oldCount = size();
2924  if(index < oldCount - 1)
2925  {
2926  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2927  }
2928  resize(oldCount - 1);
2929  }
2930 
2931  void push_back(const T& src)
2932  {
2933  const size_t newIndex = size();
2934  resize(newIndex + 1);
2935  m_pArray[newIndex] = src;
2936  }
2937 
2938  void pop_back()
2939  {
2940  VMA_HEAVY_ASSERT(m_Count > 0);
2941  resize(size() - 1);
2942  }
2943 
2944  void push_front(const T& src)
2945  {
2946  insert(0, src);
2947  }
2948 
2949  void pop_front()
2950  {
2951  VMA_HEAVY_ASSERT(m_Count > 0);
2952  remove(0);
2953  }
2954 
2955  typedef T* iterator;
2956 
2957  iterator begin() { return m_pArray; }
2958  iterator end() { return m_pArray + m_Count; }
2959 
2960 private:
2961  AllocatorT m_Allocator;
2962  T* m_pArray;
2963  size_t m_Count;
2964  size_t m_Capacity;
2965 };
2966 
2967 template<typename T, typename allocatorT>
2968 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2969 {
2970  vec.insert(index, item);
2971 }
2972 
2973 template<typename T, typename allocatorT>
2974 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2975 {
2976  vec.remove(index);
2977 }
2978 
2979 #endif // #if VMA_USE_STL_VECTOR
2980 
2981 template<typename CmpLess, typename VectorT>
2982 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2983 {
2984  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2985  vector.data(),
2986  vector.data() + vector.size(),
2987  value,
2988  CmpLess()) - vector.data();
2989  VmaVectorInsert(vector, indexToInsert, value);
2990  return indexToInsert;
2991 }
2992 
2993 template<typename CmpLess, typename VectorT>
2994 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2995 {
2996  CmpLess comparator;
2997  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2998  vector.begin(),
2999  vector.end(),
3000  value,
3001  comparator);
3002  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3003  {
3004  size_t indexToRemove = it - vector.begin();
3005  VmaVectorRemove(vector, indexToRemove);
3006  return true;
3007  }
3008  return false;
3009 }
3010 
3011 template<typename CmpLess, typename VectorT>
3012 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
3013 {
3014  CmpLess comparator;
3015  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3016  vector.data(),
3017  vector.data() + vector.size(),
3018  value,
3019  comparator);
3020  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
3021  {
3022  return it - vector.begin();
3023  }
3024  else
3025  {
3026  return vector.size();
3027  }
3028 }
3029 
3031 // class VmaPoolAllocator
3032 
3033 /*
3034 Allocator for objects of type T using a list of arrays (pools) to speed up
3035 allocation. Number of elements that can be allocated is not bounded because
3036 allocator can create multiple blocks.
3037 */
3038 template<typename T>
3039 class VmaPoolAllocator
3040 {
3041 public:
3042  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3043  ~VmaPoolAllocator();
3044  void Clear();
3045  T* Alloc();
3046  void Free(T* ptr);
3047 
3048 private:
3049  union Item
3050  {
3051  uint32_t NextFreeIndex;
3052  T Value;
3053  };
3054 
3055  struct ItemBlock
3056  {
3057  Item* pItems;
3058  uint32_t FirstFreeIndex;
3059  };
3060 
3061  const VkAllocationCallbacks* m_pAllocationCallbacks;
3062  size_t m_ItemsPerBlock;
3063  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3064 
3065  ItemBlock& CreateNewBlock();
3066 };
3067 
3068 template<typename T>
3069 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3070  m_pAllocationCallbacks(pAllocationCallbacks),
3071  m_ItemsPerBlock(itemsPerBlock),
3072  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3073 {
3074  VMA_ASSERT(itemsPerBlock > 0);
3075 }
3076 
3077 template<typename T>
3078 VmaPoolAllocator<T>::~VmaPoolAllocator()
3079 {
3080  Clear();
3081 }
3082 
3083 template<typename T>
3084 void VmaPoolAllocator<T>::Clear()
3085 {
3086  for(size_t i = m_ItemBlocks.size(); i--; )
3087  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3088  m_ItemBlocks.clear();
3089 }
3090 
3091 template<typename T>
3092 T* VmaPoolAllocator<T>::Alloc()
3093 {
3094  for(size_t i = m_ItemBlocks.size(); i--; )
3095  {
3096  ItemBlock& block = m_ItemBlocks[i];
3097  // This block has some free items: Use first one.
3098  if(block.FirstFreeIndex != UINT32_MAX)
3099  {
3100  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3101  block.FirstFreeIndex = pItem->NextFreeIndex;
3102  return &pItem->Value;
3103  }
3104  }
3105 
3106  // No block has free item: Create new one and use it.
3107  ItemBlock& newBlock = CreateNewBlock();
3108  Item* const pItem = &newBlock.pItems[0];
3109  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3110  return &pItem->Value;
3111 }
3112 
3113 template<typename T>
3114 void VmaPoolAllocator<T>::Free(T* ptr)
3115 {
3116  // Search all memory blocks to find ptr.
3117  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3118  {
3119  ItemBlock& block = m_ItemBlocks[i];
3120 
3121  // Casting to union.
3122  Item* pItemPtr;
3123  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3124 
3125  // Check if pItemPtr is in address range of this block.
3126  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3127  {
3128  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3129  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3130  block.FirstFreeIndex = index;
3131  return;
3132  }
3133  }
3134  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3135 }
3136 
3137 template<typename T>
3138 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3139 {
3140  ItemBlock newBlock = {
3141  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3142 
3143  m_ItemBlocks.push_back(newBlock);
3144 
3145  // Setup singly-linked list of all free items in this block.
3146  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3147  newBlock.pItems[i].NextFreeIndex = i + 1;
3148  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3149  return m_ItemBlocks.back();
3150 }
3151 
3153 // class VmaRawList, VmaList
3154 
3155 #if VMA_USE_STL_LIST
3156 
3157 #define VmaList std::list
3158 
3159 #else // #if VMA_USE_STL_LIST
3160 
3161 template<typename T>
3162 struct VmaListItem
3163 {
3164  VmaListItem* pPrev;
3165  VmaListItem* pNext;
3166  T Value;
3167 };
3168 
3169 // Doubly linked list.
3170 template<typename T>
3171 class VmaRawList
3172 {
3173 public:
3174  typedef VmaListItem<T> ItemType;
3175 
3176  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3177  ~VmaRawList();
3178  void Clear();
3179 
3180  size_t GetCount() const { return m_Count; }
3181  bool IsEmpty() const { return m_Count == 0; }
3182 
3183  ItemType* Front() { return m_pFront; }
3184  const ItemType* Front() const { return m_pFront; }
3185  ItemType* Back() { return m_pBack; }
3186  const ItemType* Back() const { return m_pBack; }
3187 
3188  ItemType* PushBack();
3189  ItemType* PushFront();
3190  ItemType* PushBack(const T& value);
3191  ItemType* PushFront(const T& value);
3192  void PopBack();
3193  void PopFront();
3194 
3195  // Item can be null - it means PushBack.
3196  ItemType* InsertBefore(ItemType* pItem);
3197  // Item can be null - it means PushFront.
3198  ItemType* InsertAfter(ItemType* pItem);
3199 
3200  ItemType* InsertBefore(ItemType* pItem, const T& value);
3201  ItemType* InsertAfter(ItemType* pItem, const T& value);
3202 
3203  void Remove(ItemType* pItem);
3204 
3205 private:
3206  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3207  VmaPoolAllocator<ItemType> m_ItemAllocator;
3208  ItemType* m_pFront;
3209  ItemType* m_pBack;
3210  size_t m_Count;
3211 
3212  // Declared not defined, to block copy constructor and assignment operator.
3213  VmaRawList(const VmaRawList<T>& src);
3214  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
3215 };
3216 
3217 template<typename T>
3218 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3219  m_pAllocationCallbacks(pAllocationCallbacks),
3220  m_ItemAllocator(pAllocationCallbacks, 128),
3221  m_pFront(VMA_NULL),
3222  m_pBack(VMA_NULL),
3223  m_Count(0)
3224 {
3225 }
3226 
3227 template<typename T>
3228 VmaRawList<T>::~VmaRawList()
3229 {
3230  // Intentionally not calling Clear, because that would be unnecessary
3231  // computations to return all items to m_ItemAllocator as free.
3232 }
3233 
3234 template<typename T>
3235 void VmaRawList<T>::Clear()
3236 {
3237  if(IsEmpty() == false)
3238  {
3239  ItemType* pItem = m_pBack;
3240  while(pItem != VMA_NULL)
3241  {
3242  ItemType* const pPrevItem = pItem->pPrev;
3243  m_ItemAllocator.Free(pItem);
3244  pItem = pPrevItem;
3245  }
3246  m_pFront = VMA_NULL;
3247  m_pBack = VMA_NULL;
3248  m_Count = 0;
3249  }
3250 }
3251 
3252 template<typename T>
3253 VmaListItem<T>* VmaRawList<T>::PushBack()
3254 {
3255  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3256  pNewItem->pNext = VMA_NULL;
3257  if(IsEmpty())
3258  {
3259  pNewItem->pPrev = VMA_NULL;
3260  m_pFront = pNewItem;
3261  m_pBack = pNewItem;
3262  m_Count = 1;
3263  }
3264  else
3265  {
3266  pNewItem->pPrev = m_pBack;
3267  m_pBack->pNext = pNewItem;
3268  m_pBack = pNewItem;
3269  ++m_Count;
3270  }
3271  return pNewItem;
3272 }
3273 
3274 template<typename T>
3275 VmaListItem<T>* VmaRawList<T>::PushFront()
3276 {
3277  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3278  pNewItem->pPrev = VMA_NULL;
3279  if(IsEmpty())
3280  {
3281  pNewItem->pNext = VMA_NULL;
3282  m_pFront = pNewItem;
3283  m_pBack = pNewItem;
3284  m_Count = 1;
3285  }
3286  else
3287  {
3288  pNewItem->pNext = m_pFront;
3289  m_pFront->pPrev = pNewItem;
3290  m_pFront = pNewItem;
3291  ++m_Count;
3292  }
3293  return pNewItem;
3294 }
3295 
3296 template<typename T>
3297 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3298 {
3299  ItemType* const pNewItem = PushBack();
3300  pNewItem->Value = value;
3301  return pNewItem;
3302 }
3303 
3304 template<typename T>
3305 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3306 {
3307  ItemType* const pNewItem = PushFront();
3308  pNewItem->Value = value;
3309  return pNewItem;
3310 }
3311 
3312 template<typename T>
3313 void VmaRawList<T>::PopBack()
3314 {
3315  VMA_HEAVY_ASSERT(m_Count > 0);
3316  ItemType* const pBackItem = m_pBack;
3317  ItemType* const pPrevItem = pBackItem->pPrev;
3318  if(pPrevItem != VMA_NULL)
3319  {
3320  pPrevItem->pNext = VMA_NULL;
3321  }
3322  m_pBack = pPrevItem;
3323  m_ItemAllocator.Free(pBackItem);
3324  --m_Count;
3325 }
3326 
3327 template<typename T>
3328 void VmaRawList<T>::PopFront()
3329 {
3330  VMA_HEAVY_ASSERT(m_Count > 0);
3331  ItemType* const pFrontItem = m_pFront;
3332  ItemType* const pNextItem = pFrontItem->pNext;
3333  if(pNextItem != VMA_NULL)
3334  {
3335  pNextItem->pPrev = VMA_NULL;
3336  }
3337  m_pFront = pNextItem;
3338  m_ItemAllocator.Free(pFrontItem);
3339  --m_Count;
3340 }
3341 
3342 template<typename T>
3343 void VmaRawList<T>::Remove(ItemType* pItem)
3344 {
3345  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
3346  VMA_HEAVY_ASSERT(m_Count > 0);
3347 
3348  if(pItem->pPrev != VMA_NULL)
3349  {
3350  pItem->pPrev->pNext = pItem->pNext;
3351  }
3352  else
3353  {
3354  VMA_HEAVY_ASSERT(m_pFront == pItem);
3355  m_pFront = pItem->pNext;
3356  }
3357 
3358  if(pItem->pNext != VMA_NULL)
3359  {
3360  pItem->pNext->pPrev = pItem->pPrev;
3361  }
3362  else
3363  {
3364  VMA_HEAVY_ASSERT(m_pBack == pItem);
3365  m_pBack = pItem->pPrev;
3366  }
3367 
3368  m_ItemAllocator.Free(pItem);
3369  --m_Count;
3370 }
3371 
3372 template<typename T>
3373 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
3374 {
3375  if(pItem != VMA_NULL)
3376  {
3377  ItemType* const prevItem = pItem->pPrev;
3378  ItemType* const newItem = m_ItemAllocator.Alloc();
3379  newItem->pPrev = prevItem;
3380  newItem->pNext = pItem;
3381  pItem->pPrev = newItem;
3382  if(prevItem != VMA_NULL)
3383  {
3384  prevItem->pNext = newItem;
3385  }
3386  else
3387  {
3388  VMA_HEAVY_ASSERT(m_pFront == pItem);
3389  m_pFront = newItem;
3390  }
3391  ++m_Count;
3392  return newItem;
3393  }
3394  else
3395  return PushBack();
3396 }
3397 
3398 template<typename T>
3399 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3400 {
3401  if(pItem != VMA_NULL)
3402  {
3403  ItemType* const nextItem = pItem->pNext;
3404  ItemType* const newItem = m_ItemAllocator.Alloc();
3405  newItem->pNext = nextItem;
3406  newItem->pPrev = pItem;
3407  pItem->pNext = newItem;
3408  if(nextItem != VMA_NULL)
3409  {
3410  nextItem->pPrev = newItem;
3411  }
3412  else
3413  {
3414  VMA_HEAVY_ASSERT(m_pBack == pItem);
3415  m_pBack = newItem;
3416  }
3417  ++m_Count;
3418  return newItem;
3419  }
3420  else
3421  return PushFront();
3422 }
3423 
3424 template<typename T>
3425 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
3426 {
3427  ItemType* const newItem = InsertBefore(pItem);
3428  newItem->Value = value;
3429  return newItem;
3430 }
3431 
3432 template<typename T>
3433 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
3434 {
3435  ItemType* const newItem = InsertAfter(pItem);
3436  newItem->Value = value;
3437  return newItem;
3438 }
3439 
3440 template<typename T, typename AllocatorT>
3441 class VmaList
3442 {
3443 public:
3444  class iterator
3445  {
3446  public:
3447  iterator() :
3448  m_pList(VMA_NULL),
3449  m_pItem(VMA_NULL)
3450  {
3451  }
3452 
3453  T& operator*() const
3454  {
3455  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3456  return m_pItem->Value;
3457  }
3458  T* operator->() const
3459  {
3460  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3461  return &m_pItem->Value;
3462  }
3463 
3464  iterator& operator++()
3465  {
3466  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3467  m_pItem = m_pItem->pNext;
3468  return *this;
3469  }
3470  iterator& operator--()
3471  {
3472  if(m_pItem != VMA_NULL)
3473  {
3474  m_pItem = m_pItem->pPrev;
3475  }
3476  else
3477  {
3478  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3479  m_pItem = m_pList->Back();
3480  }
3481  return *this;
3482  }
3483 
3484  iterator operator++(int)
3485  {
3486  iterator result = *this;
3487  ++*this;
3488  return result;
3489  }
3490  iterator operator--(int)
3491  {
3492  iterator result = *this;
3493  --*this;
3494  return result;
3495  }
3496 
3497  bool operator==(const iterator& rhs) const
3498  {
3499  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3500  return m_pItem == rhs.m_pItem;
3501  }
3502  bool operator!=(const iterator& rhs) const
3503  {
3504  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3505  return m_pItem != rhs.m_pItem;
3506  }
3507 
3508  private:
3509  VmaRawList<T>* m_pList;
3510  VmaListItem<T>* m_pItem;
3511 
3512  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
3513  m_pList(pList),
3514  m_pItem(pItem)
3515  {
3516  }
3517 
3518  friend class VmaList<T, AllocatorT>;
3519  };
3520 
3521  class const_iterator
3522  {
3523  public:
3524  const_iterator() :
3525  m_pList(VMA_NULL),
3526  m_pItem(VMA_NULL)
3527  {
3528  }
3529 
3530  const_iterator(const iterator& src) :
3531  m_pList(src.m_pList),
3532  m_pItem(src.m_pItem)
3533  {
3534  }
3535 
3536  const T& operator*() const
3537  {
3538  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3539  return m_pItem->Value;
3540  }
3541  const T* operator->() const
3542  {
3543  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3544  return &m_pItem->Value;
3545  }
3546 
3547  const_iterator& operator++()
3548  {
3549  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3550  m_pItem = m_pItem->pNext;
3551  return *this;
3552  }
3553  const_iterator& operator--()
3554  {
3555  if(m_pItem != VMA_NULL)
3556  {
3557  m_pItem = m_pItem->pPrev;
3558  }
3559  else
3560  {
3561  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3562  m_pItem = m_pList->Back();
3563  }
3564  return *this;
3565  }
3566 
3567  const_iterator operator++(int)
3568  {
3569  const_iterator result = *this;
3570  ++*this;
3571  return result;
3572  }
3573  const_iterator operator--(int)
3574  {
3575  const_iterator result = *this;
3576  --*this;
3577  return result;
3578  }
3579 
3580  bool operator==(const const_iterator& rhs) const
3581  {
3582  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3583  return m_pItem == rhs.m_pItem;
3584  }
3585  bool operator!=(const const_iterator& rhs) const
3586  {
3587  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3588  return m_pItem != rhs.m_pItem;
3589  }
3590 
3591  private:
3592  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
3593  m_pList(pList),
3594  m_pItem(pItem)
3595  {
3596  }
3597 
3598  const VmaRawList<T>* m_pList;
3599  const VmaListItem<T>* m_pItem;
3600 
3601  friend class VmaList<T, AllocatorT>;
3602  };
3603 
3604  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
3605 
3606  bool empty() const { return m_RawList.IsEmpty(); }
3607  size_t size() const { return m_RawList.GetCount(); }
3608 
3609  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
3610  iterator end() { return iterator(&m_RawList, VMA_NULL); }
3611 
3612  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
3613  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
3614 
3615  void clear() { m_RawList.Clear(); }
3616  void push_back(const T& value) { m_RawList.PushBack(value); }
3617  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
3618  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
3619 
3620 private:
3621  VmaRawList<T> m_RawList;
3622 };
3623 
3624 #endif // #if VMA_USE_STL_LIST
3625 
3627 // class VmaMap
3628 
3629 // Unused in this version.
3630 #if 0
3631 
3632 #if VMA_USE_STL_UNORDERED_MAP
3633 
3634 #define VmaPair std::pair
3635 
3636 #define VMA_MAP_TYPE(KeyT, ValueT) \
3637  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
3638 
3639 #else // #if VMA_USE_STL_UNORDERED_MAP
3640 
3641 template<typename T1, typename T2>
3642 struct VmaPair
3643 {
3644  T1 first;
3645  T2 second;
3646 
3647  VmaPair() : first(), second() { }
3648  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
3649 };
3650 
3651 /* Class compatible with subset of interface of std::unordered_map.
3652 KeyT, ValueT must be POD because they will be stored in VmaVector.
3653 */
3654 template<typename KeyT, typename ValueT>
3655 class VmaMap
3656 {
3657 public:
3658  typedef VmaPair<KeyT, ValueT> PairType;
3659  typedef PairType* iterator;
3660 
3661  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
3662 
3663  iterator begin() { return m_Vector.begin(); }
3664  iterator end() { return m_Vector.end(); }
3665 
3666  void insert(const PairType& pair);
3667  iterator find(const KeyT& key);
3668  void erase(iterator it);
3669 
3670 private:
3671  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
3672 };
3673 
3674 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
3675 
3676 template<typename FirstT, typename SecondT>
3677 struct VmaPairFirstLess
3678 {
3679  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
3680  {
3681  return lhs.first < rhs.first;
3682  }
3683  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
3684  {
3685  return lhs.first < rhsFirst;
3686  }
3687 };
3688 
3689 template<typename KeyT, typename ValueT>
3690 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
3691 {
3692  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3693  m_Vector.data(),
3694  m_Vector.data() + m_Vector.size(),
3695  pair,
3696  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3697  VmaVectorInsert(m_Vector, indexToInsert, pair);
3698 }
3699 
3700 template<typename KeyT, typename ValueT>
3701 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3702 {
3703  PairType* it = VmaBinaryFindFirstNotLess(
3704  m_Vector.data(),
3705  m_Vector.data() + m_Vector.size(),
3706  key,
3707  VmaPairFirstLess<KeyT, ValueT>());
3708  if((it != m_Vector.end()) && (it->first == key))
3709  {
3710  return it;
3711  }
3712  else
3713  {
3714  return m_Vector.end();
3715  }
3716 }
3717 
3718 template<typename KeyT, typename ValueT>
3719 void VmaMap<KeyT, ValueT>::erase(iterator it)
3720 {
3721  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3722 }
3723 
3724 #endif // #if VMA_USE_STL_UNORDERED_MAP
3725 
3726 #endif // #if 0
3727 
3729 
3730 class VmaDeviceMemoryBlock;
3731 
3732 struct VmaAllocation_T
3733 {
3734 private:
3735  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3736 
3737  enum FLAGS
3738  {
3739  FLAG_USER_DATA_STRING = 0x01,
3740  };
3741 
3742 public:
3743  enum ALLOCATION_TYPE
3744  {
3745  ALLOCATION_TYPE_NONE,
3746  ALLOCATION_TYPE_BLOCK,
3747  ALLOCATION_TYPE_DEDICATED,
3748  };
3749 
3750  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3751  m_Alignment(1),
3752  m_Size(0),
3753  m_pUserData(VMA_NULL),
3754  m_LastUseFrameIndex(currentFrameIndex),
3755  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3756  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3757  m_MapCount(0),
3758  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3759  {
3760  }
3761 
3762  ~VmaAllocation_T()
3763  {
3764  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3765 
3766  // Check if owned string was freed.
3767  VMA_ASSERT(m_pUserData == VMA_NULL);
3768  }
3769 
3770  void InitBlockAllocation(
3771  VmaPool hPool,
3772  VmaDeviceMemoryBlock* block,
3773  VkDeviceSize offset,
3774  VkDeviceSize alignment,
3775  VkDeviceSize size,
3776  VmaSuballocationType suballocationType,
3777  bool mapped,
3778  bool canBecomeLost)
3779  {
3780  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3781  VMA_ASSERT(block != VMA_NULL);
3782  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3783  m_Alignment = alignment;
3784  m_Size = size;
3785  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3786  m_SuballocationType = (uint8_t)suballocationType;
3787  m_BlockAllocation.m_hPool = hPool;
3788  m_BlockAllocation.m_Block = block;
3789  m_BlockAllocation.m_Offset = offset;
3790  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3791  }
3792 
3793  void InitLost()
3794  {
3795  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3796  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3797  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3798  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3799  m_BlockAllocation.m_Block = VMA_NULL;
3800  m_BlockAllocation.m_Offset = 0;
3801  m_BlockAllocation.m_CanBecomeLost = true;
3802  }
3803 
3804  void ChangeBlockAllocation(
3805  VmaAllocator hAllocator,
3806  VmaDeviceMemoryBlock* block,
3807  VkDeviceSize offset);
3808 
3809  // pMappedData not null means allocation is created with MAPPED flag.
3810  void InitDedicatedAllocation(
3811  uint32_t memoryTypeIndex,
3812  VkDeviceMemory hMemory,
3813  VmaSuballocationType suballocationType,
3814  void* pMappedData,
3815  VkDeviceSize size)
3816  {
3817  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3818  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3819  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3820  m_Alignment = 0;
3821  m_Size = size;
3822  m_SuballocationType = (uint8_t)suballocationType;
3823  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3824  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3825  m_DedicatedAllocation.m_hMemory = hMemory;
3826  m_DedicatedAllocation.m_pMappedData = pMappedData;
3827  }
3828 
3829  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3830  VkDeviceSize GetAlignment() const { return m_Alignment; }
3831  VkDeviceSize GetSize() const { return m_Size; }
3832  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3833  void* GetUserData() const { return m_pUserData; }
3834  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3835  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3836 
3837  VmaDeviceMemoryBlock* GetBlock() const
3838  {
3839  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3840  return m_BlockAllocation.m_Block;
3841  }
3842  VkDeviceSize GetOffset() const;
3843  VkDeviceMemory GetMemory() const;
3844  uint32_t GetMemoryTypeIndex() const;
3845  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3846  void* GetMappedData() const;
3847  bool CanBecomeLost() const;
3848  VmaPool GetPool() const;
3849 
3850  uint32_t GetLastUseFrameIndex() const
3851  {
3852  return m_LastUseFrameIndex.load();
3853  }
3854  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3855  {
3856  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3857  }
3858  /*
3859  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3860  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3861  - Else, returns false.
3862 
3863  If hAllocation is already lost, assert - you should not call it then.
3864  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3865  */
3866  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3867 
3868  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3869  {
3870  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3871  outInfo.blockCount = 1;
3872  outInfo.allocationCount = 1;
3873  outInfo.unusedRangeCount = 0;
3874  outInfo.usedBytes = m_Size;
3875  outInfo.unusedBytes = 0;
3876  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3877  outInfo.unusedRangeSizeMin = UINT64_MAX;
3878  outInfo.unusedRangeSizeMax = 0;
3879  }
3880 
3881  void BlockAllocMap();
3882  void BlockAllocUnmap();
3883  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3884  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3885 
3886 private:
3887  VkDeviceSize m_Alignment;
3888  VkDeviceSize m_Size;
3889  void* m_pUserData;
3890  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3891  uint8_t m_Type; // ALLOCATION_TYPE
3892  uint8_t m_SuballocationType; // VmaSuballocationType
3893  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3894  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
3895  uint8_t m_MapCount;
3896  uint8_t m_Flags; // enum FLAGS
3897 
3898  // Allocation out of VmaDeviceMemoryBlock.
3899  struct BlockAllocation
3900  {
3901  VmaPool m_hPool; // Null if belongs to general memory.
3902  VmaDeviceMemoryBlock* m_Block;
3903  VkDeviceSize m_Offset;
3904  bool m_CanBecomeLost;
3905  };
3906 
3907  // Allocation for an object that has its own private VkDeviceMemory.
3908  struct DedicatedAllocation
3909  {
3910  uint32_t m_MemoryTypeIndex;
3911  VkDeviceMemory m_hMemory;
3912  void* m_pMappedData; // Not null means memory is mapped.
3913  };
3914 
3915  union
3916  {
3917  // Allocation out of VmaDeviceMemoryBlock.
3918  BlockAllocation m_BlockAllocation;
3919  // Allocation for an object that has its own private VkDeviceMemory.
3920  DedicatedAllocation m_DedicatedAllocation;
3921  };
3922 
3923  void FreeUserDataString(VmaAllocator hAllocator);
3924 };
3925 
3926 /*
3927 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3928 allocated memory block or free.
3929 */
3930 struct VmaSuballocation
3931 {
3932  VkDeviceSize offset;
3933  VkDeviceSize size;
3934  VmaAllocation hAllocation;
3935  VmaSuballocationType type;
3936 };
3937 
3938 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3939 
3940 // Cost of one additional allocation lost, as equivalent in bytes.
3941 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3942 
3943 /*
3944 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3945 
3946 If canMakeOtherLost was false:
3947 - item points to a FREE suballocation.
3948 - itemsToMakeLostCount is 0.
3949 
3950 If canMakeOtherLost was true:
3951 - item points to first of sequence of suballocations, which are either FREE,
3952  or point to VmaAllocations that can become lost.
3953 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3954  the requested allocation to succeed.
3955 */
3956 struct VmaAllocationRequest
3957 {
3958  VkDeviceSize offset;
3959  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3960  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3961  VmaSuballocationList::iterator item;
3962  size_t itemsToMakeLostCount;
3963 
3964  VkDeviceSize CalcCost() const
3965  {
3966  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3967  }
3968 };
3969 
3970 /*
3971 Data structure used for bookkeeping of allocations and unused ranges of memory
3972 in a single VkDeviceMemory block.
3973 */
3974 class VmaBlockMetadata
3975 {
3976 public:
3977  VmaBlockMetadata(VmaAllocator hAllocator);
3978  ~VmaBlockMetadata();
3979  void Init(VkDeviceSize size);
3980 
3981  // Validates all data structures inside this object. If not valid, returns false.
3982  bool Validate() const;
3983  VkDeviceSize GetSize() const { return m_Size; }
3984  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3985  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3986  VkDeviceSize GetUnusedRangeSizeMax() const;
3987  // Returns true if this block is empty - contains only single free suballocation.
3988  bool IsEmpty() const;
3989 
3990  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3991  void AddPoolStats(VmaPoolStats& inoutStats) const;
3992 
3993 #if VMA_STATS_STRING_ENABLED
3994  void PrintDetailedMap(class VmaJsonWriter& json) const;
3995 #endif
3996 
3997  // Creates trivial request for case when block is empty.
3998  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3999 
4000  // Tries to find a place for suballocation with given parameters inside this block.
4001  // If succeeded, fills pAllocationRequest and returns true.
4002  // If failed, returns false.
4003  bool CreateAllocationRequest(
4004  uint32_t currentFrameIndex,
4005  uint32_t frameInUseCount,
4006  VkDeviceSize bufferImageGranularity,
4007  VkDeviceSize allocSize,
4008  VkDeviceSize allocAlignment,
4009  VmaSuballocationType allocType,
4010  bool canMakeOtherLost,
4011  VmaAllocationRequest* pAllocationRequest);
4012 
4013  bool MakeRequestedAllocationsLost(
4014  uint32_t currentFrameIndex,
4015  uint32_t frameInUseCount,
4016  VmaAllocationRequest* pAllocationRequest);
4017 
4018  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4019 
4020  // Makes actual allocation based on request. Request must already be checked and valid.
4021  void Alloc(
4022  const VmaAllocationRequest& request,
4023  VmaSuballocationType type,
4024  VkDeviceSize allocSize,
4025  VmaAllocation hAllocation);
4026 
4027  // Frees suballocation assigned to given memory region.
4028  void Free(const VmaAllocation allocation);
4029  void FreeAtOffset(VkDeviceSize offset);
4030 
4031 private:
4032  VkDeviceSize m_Size;
4033  uint32_t m_FreeCount;
4034  VkDeviceSize m_SumFreeSize;
4035  VmaSuballocationList m_Suballocations;
4036  // Suballocations that are free and have size greater than certain threshold.
4037  // Sorted by size, ascending.
4038  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4039 
4040  bool ValidateFreeSuballocationList() const;
4041 
4042  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4043  // If yes, fills pOffset and returns true. If no, returns false.
4044  bool CheckAllocation(
4045  uint32_t currentFrameIndex,
4046  uint32_t frameInUseCount,
4047  VkDeviceSize bufferImageGranularity,
4048  VkDeviceSize allocSize,
4049  VkDeviceSize allocAlignment,
4050  VmaSuballocationType allocType,
4051  VmaSuballocationList::const_iterator suballocItem,
4052  bool canMakeOtherLost,
4053  VkDeviceSize* pOffset,
4054  size_t* itemsToMakeLostCount,
4055  VkDeviceSize* pSumFreeSize,
4056  VkDeviceSize* pSumItemSize) const;
4057  // Given free suballocation, it merges it with following one, which must also be free.
4058  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4059  // Releases given suballocation, making it free.
4060  // Merges it with adjacent free suballocations if applicable.
4061  // Returns iterator to new free suballocation at this place.
4062  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4063  // Given free suballocation, it inserts it into sorted list of
4064  // m_FreeSuballocationsBySize if it's suitable.
4065  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4066  // Given free suballocation, it removes it from sorted list of
4067  // m_FreeSuballocationsBySize if it's suitable.
4068  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4069 };
4070 
4071 /*
4072 Represents a single block of device memory (`VkDeviceMemory`) with all the
4073 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
4074 
4075 Thread-safety: This class must be externally synchronized.
4076 */
4077 class VmaDeviceMemoryBlock
4078 {
4079 public:
4080  VmaBlockMetadata m_Metadata;
4081 
4082  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
4083 
4084  ~VmaDeviceMemoryBlock()
4085  {
4086  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
4087  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
4088  }
4089 
4090  // Always call after construction.
4091  void Init(
4092  uint32_t newMemoryTypeIndex,
4093  VkDeviceMemory newMemory,
4094  VkDeviceSize newSize);
4095  // Always call before destruction.
4096  void Destroy(VmaAllocator allocator);
4097 
4098  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
4099  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
4100  void* GetMappedData() const { return m_pMappedData; }
4101 
4102  // Validates all data structures inside this object. If not valid, returns false.
4103  bool Validate() const;
4104 
4105  // ppData can be null.
4106  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
4107  void Unmap(VmaAllocator hAllocator, uint32_t count);
4108 
4109  VkResult BindBufferMemory(
4110  const VmaAllocator hAllocator,
4111  const VmaAllocation hAllocation,
4112  VkBuffer hBuffer);
4113  VkResult BindImageMemory(
4114  const VmaAllocator hAllocator,
4115  const VmaAllocation hAllocation,
4116  VkImage hImage);
4117 
4118 private:
4119  uint32_t m_MemoryTypeIndex;
4120  VkDeviceMemory m_hMemory;
4121 
4122  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
4123  // Also protects m_MapCount, m_pMappedData.
4124  VMA_MUTEX m_Mutex;
4125  uint32_t m_MapCount;
4126  void* m_pMappedData;
4127 };
4128 
4129 struct VmaPointerLess
4130 {
4131  bool operator()(const void* lhs, const void* rhs) const
4132  {
4133  return lhs < rhs;
4134  }
4135 };
4136 
4137 class VmaDefragmentator;
4138 
4139 /*
4140 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
4141 Vulkan memory type.
4142 
4143 Synchronized internally with a mutex.
4144 */
4145 struct VmaBlockVector
4146 {
4147  VmaBlockVector(
4148  VmaAllocator hAllocator,
4149  uint32_t memoryTypeIndex,
4150  VkDeviceSize preferredBlockSize,
4151  size_t minBlockCount,
4152  size_t maxBlockCount,
4153  VkDeviceSize bufferImageGranularity,
4154  uint32_t frameInUseCount,
4155  bool isCustomPool);
4156  ~VmaBlockVector();
4157 
4158  VkResult CreateMinBlocks();
4159 
4160  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
4161  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
4162  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
4163  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
4164 
4165  void GetPoolStats(VmaPoolStats* pStats);
4166 
4167  bool IsEmpty() const { return m_Blocks.empty(); }
4168 
4169  VkResult Allocate(
4170  VmaPool hCurrentPool,
4171  uint32_t currentFrameIndex,
4172  const VkMemoryRequirements& vkMemReq,
4173  const VmaAllocationCreateInfo& createInfo,
4174  VmaSuballocationType suballocType,
4175  VmaAllocation* pAllocation);
4176 
4177  void Free(
4178  VmaAllocation hAllocation);
4179 
4180  // Adds statistics of this BlockVector to pStats.
4181  void AddStats(VmaStats* pStats);
4182 
4183 #if VMA_STATS_STRING_ENABLED
4184  void PrintDetailedMap(class VmaJsonWriter& json);
4185 #endif
4186 
4187  void MakePoolAllocationsLost(
4188  uint32_t currentFrameIndex,
4189  size_t* pLostAllocationCount);
4190 
4191  VmaDefragmentator* EnsureDefragmentator(
4192  VmaAllocator hAllocator,
4193  uint32_t currentFrameIndex);
4194 
4195  VkResult Defragment(
4196  VmaDefragmentationStats* pDefragmentationStats,
4197  VkDeviceSize& maxBytesToMove,
4198  uint32_t& maxAllocationsToMove);
4199 
4200  void DestroyDefragmentator();
4201 
4202 private:
4203  friend class VmaDefragmentator;
4204 
4205  const VmaAllocator m_hAllocator;
4206  const uint32_t m_MemoryTypeIndex;
4207  const VkDeviceSize m_PreferredBlockSize;
4208  const size_t m_MinBlockCount;
4209  const size_t m_MaxBlockCount;
4210  const VkDeviceSize m_BufferImageGranularity;
4211  const uint32_t m_FrameInUseCount;
4212  const bool m_IsCustomPool;
4213  VMA_MUTEX m_Mutex;
4214  // Incrementally sorted by sumFreeSize, ascending.
4215  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
4216  /* There can be at most one allocation that is completely empty - a
4217  hysteresis to avoid pessimistic case of alternating creation and destruction
4218  of a VkDeviceMemory. */
4219  bool m_HasEmptyBlock;
4220  VmaDefragmentator* m_pDefragmentator;
4221 
4222  size_t CalcMaxBlockSize() const;
4223 
4224  // Finds and removes given block from vector.
4225  void Remove(VmaDeviceMemoryBlock* pBlock);
4226 
4227  // Performs single step in sorting m_Blocks. They may not be fully sorted
4228  // after this call.
4229  void IncrementallySortBlocks();
4230 
4231  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
4232 };
4233 
4234 struct VmaPool_T
4235 {
4236 public:
4237  VmaBlockVector m_BlockVector;
4238 
4239  // Takes ownership.
4240  VmaPool_T(
4241  VmaAllocator hAllocator,
4242  const VmaPoolCreateInfo& createInfo);
4243  ~VmaPool_T();
4244 
4245  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
4246 
4247 #if VMA_STATS_STRING_ENABLED
4248  //void PrintDetailedMap(class VmaStringBuilder& sb);
4249 #endif
4250 };
4251 
4252 class VmaDefragmentator
4253 {
4254  const VmaAllocator m_hAllocator;
4255  VmaBlockVector* const m_pBlockVector;
4256  uint32_t m_CurrentFrameIndex;
4257  VkDeviceSize m_BytesMoved;
4258  uint32_t m_AllocationsMoved;
4259 
4260  struct AllocationInfo
4261  {
4262  VmaAllocation m_hAllocation;
4263  VkBool32* m_pChanged;
4264 
4265  AllocationInfo() :
4266  m_hAllocation(VK_NULL_HANDLE),
4267  m_pChanged(VMA_NULL)
4268  {
4269  }
4270  };
4271 
4272  struct AllocationInfoSizeGreater
4273  {
4274  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
4275  {
4276  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
4277  }
4278  };
4279 
4280  // Used between AddAllocation and Defragment.
4281  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4282 
4283  struct BlockInfo
4284  {
4285  VmaDeviceMemoryBlock* m_pBlock;
4286  bool m_HasNonMovableAllocations;
4287  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4288 
4289  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
4290  m_pBlock(VMA_NULL),
4291  m_HasNonMovableAllocations(true),
4292  m_Allocations(pAllocationCallbacks),
4293  m_pMappedDataForDefragmentation(VMA_NULL)
4294  {
4295  }
4296 
4297  void CalcHasNonMovableAllocations()
4298  {
4299  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
4300  const size_t defragmentAllocCount = m_Allocations.size();
4301  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
4302  }
4303 
4304  void SortAllocationsBySizeDescecnding()
4305  {
4306  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
4307  }
4308 
4309  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
4310  void Unmap(VmaAllocator hAllocator);
4311 
4312  private:
4313  // Not null if mapped for defragmentation only, not originally mapped.
4314  void* m_pMappedDataForDefragmentation;
4315  };
4316 
4317  struct BlockPointerLess
4318  {
4319  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
4320  {
4321  return pLhsBlockInfo->m_pBlock < pRhsBlock;
4322  }
4323  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4324  {
4325  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
4326  }
4327  };
4328 
4329  // 1. Blocks with some non-movable allocations go first.
4330  // 2. Blocks with smaller sumFreeSize go first.
4331  struct BlockInfoCompareMoveDestination
4332  {
4333  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4334  {
4335  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
4336  {
4337  return true;
4338  }
4339  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
4340  {
4341  return false;
4342  }
4343  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
4344  {
4345  return true;
4346  }
4347  return false;
4348  }
4349  };
4350 
4351  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
4352  BlockInfoVector m_Blocks;
4353 
4354  VkResult DefragmentRound(
4355  VkDeviceSize maxBytesToMove,
4356  uint32_t maxAllocationsToMove);
4357 
4358  static bool MoveMakesSense(
4359  size_t dstBlockIndex, VkDeviceSize dstOffset,
4360  size_t srcBlockIndex, VkDeviceSize srcOffset);
4361 
4362 public:
4363  VmaDefragmentator(
4364  VmaAllocator hAllocator,
4365  VmaBlockVector* pBlockVector,
4366  uint32_t currentFrameIndex);
4367 
4368  ~VmaDefragmentator();
4369 
4370  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
4371  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
4372 
4373  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
4374 
4375  VkResult Defragment(
4376  VkDeviceSize maxBytesToMove,
4377  uint32_t maxAllocationsToMove);
4378 };
4379 
4380 // Main allocator object.
4381 struct VmaAllocator_T
4382 {
4383  bool m_UseMutex;
4384  bool m_UseKhrDedicatedAllocation;
4385  VkDevice m_hDevice;
4386  bool m_AllocationCallbacksSpecified;
4387  VkAllocationCallbacks m_AllocationCallbacks;
4388  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
4389 
4390  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
4391  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
4392  VMA_MUTEX m_HeapSizeLimitMutex;
4393 
4394  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
4395  VkPhysicalDeviceMemoryProperties m_MemProps;
4396 
4397  // Default pools.
4398  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
4399 
4400  // Each vector is sorted by memory (handle value).
4401  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
4402  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
4403  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
4404 
4405  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
4406  ~VmaAllocator_T();
4407 
4408  const VkAllocationCallbacks* GetAllocationCallbacks() const
4409  {
4410  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
4411  }
4412  const VmaVulkanFunctions& GetVulkanFunctions() const
4413  {
4414  return m_VulkanFunctions;
4415  }
4416 
4417  VkDeviceSize GetBufferImageGranularity() const
4418  {
4419  return VMA_MAX(
4420  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
4421  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
4422  }
4423 
4424  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
4425  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
4426 
4427  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
4428  {
4429  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
4430  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
4431  }
4432 
4433  void GetBufferMemoryRequirements(
4434  VkBuffer hBuffer,
4435  VkMemoryRequirements& memReq,
4436  bool& requiresDedicatedAllocation,
4437  bool& prefersDedicatedAllocation) const;
4438  void GetImageMemoryRequirements(
4439  VkImage hImage,
4440  VkMemoryRequirements& memReq,
4441  bool& requiresDedicatedAllocation,
4442  bool& prefersDedicatedAllocation) const;
4443 
4444  // Main allocation function.
4445  VkResult AllocateMemory(
4446  const VkMemoryRequirements& vkMemReq,
4447  bool requiresDedicatedAllocation,
4448  bool prefersDedicatedAllocation,
4449  VkBuffer dedicatedBuffer,
4450  VkImage dedicatedImage,
4451  const VmaAllocationCreateInfo& createInfo,
4452  VmaSuballocationType suballocType,
4453  VmaAllocation* pAllocation);
4454 
4455  // Main deallocation function.
4456  void FreeMemory(const VmaAllocation allocation);
4457 
4458  void CalculateStats(VmaStats* pStats);
4459 
4460 #if VMA_STATS_STRING_ENABLED
4461  void PrintDetailedMap(class VmaJsonWriter& json);
4462 #endif
4463 
4464  VkResult Defragment(
4465  VmaAllocation* pAllocations,
4466  size_t allocationCount,
4467  VkBool32* pAllocationsChanged,
4468  const VmaDefragmentationInfo* pDefragmentationInfo,
4469  VmaDefragmentationStats* pDefragmentationStats);
4470 
4471  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
4472  bool TouchAllocation(VmaAllocation hAllocation);
4473 
4474  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
4475  void DestroyPool(VmaPool pool);
4476  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
4477 
4478  void SetCurrentFrameIndex(uint32_t frameIndex);
4479 
4480  void MakePoolAllocationsLost(
4481  VmaPool hPool,
4482  size_t* pLostAllocationCount);
4483 
4484  void CreateLostAllocation(VmaAllocation* pAllocation);
4485 
4486  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
4487  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
4488 
4489  VkResult Map(VmaAllocation hAllocation, void** ppData);
4490  void Unmap(VmaAllocation hAllocation);
4491 
4492  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
4493  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
4494 
4495 private:
4496  VkDeviceSize m_PreferredLargeHeapBlockSize;
4497 
4498  VkPhysicalDevice m_PhysicalDevice;
4499  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
4500 
4501  VMA_MUTEX m_PoolsMutex;
4502  // Protected by m_PoolsMutex. Sorted by pointer value.
4503  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
4504 
4505  VmaVulkanFunctions m_VulkanFunctions;
4506 
4507  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
4508 
4509  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
4510 
4511  VkResult AllocateMemoryOfType(
4512  const VkMemoryRequirements& vkMemReq,
4513  bool dedicatedAllocation,
4514  VkBuffer dedicatedBuffer,
4515  VkImage dedicatedImage,
4516  const VmaAllocationCreateInfo& createInfo,
4517  uint32_t memTypeIndex,
4518  VmaSuballocationType suballocType,
4519  VmaAllocation* pAllocation);
4520 
4521  // Allocates and registers new VkDeviceMemory specifically for single allocation.
4522  VkResult AllocateDedicatedMemory(
4523  VkDeviceSize size,
4524  VmaSuballocationType suballocType,
4525  uint32_t memTypeIndex,
4526  bool map,
4527  bool isUserDataString,
4528  void* pUserData,
4529  VkBuffer dedicatedBuffer,
4530  VkImage dedicatedImage,
4531  VmaAllocation* pAllocation);
4532 
4533  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
4534  void FreeDedicatedMemory(VmaAllocation allocation);
4535 };
4536 
4538 // Memory allocation #2 after VmaAllocator_T definition
4539 
4540 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
4541 {
4542  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
4543 }
4544 
4545 static void VmaFree(VmaAllocator hAllocator, void* ptr)
4546 {
4547  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
4548 }
4549 
4550 template<typename T>
4551 static T* VmaAllocate(VmaAllocator hAllocator)
4552 {
4553  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
4554 }
4555 
4556 template<typename T>
4557 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
4558 {
4559  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
4560 }
4561 
4562 template<typename T>
4563 static void vma_delete(VmaAllocator hAllocator, T* ptr)
4564 {
4565  if(ptr != VMA_NULL)
4566  {
4567  ptr->~T();
4568  VmaFree(hAllocator, ptr);
4569  }
4570 }
4571 
4572 template<typename T>
4573 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
4574 {
4575  if(ptr != VMA_NULL)
4576  {
4577  for(size_t i = count; i--; )
4578  ptr[i].~T();
4579  VmaFree(hAllocator, ptr);
4580  }
4581 }
4582 
4584 // VmaStringBuilder
4585 
4586 #if VMA_STATS_STRING_ENABLED
4587 
4588 class VmaStringBuilder
4589 {
4590 public:
4591  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
4592  size_t GetLength() const { return m_Data.size(); }
4593  const char* GetData() const { return m_Data.data(); }
4594 
4595  void Add(char ch) { m_Data.push_back(ch); }
4596  void Add(const char* pStr);
4597  void AddNewLine() { Add('\n'); }
4598  void AddNumber(uint32_t num);
4599  void AddNumber(uint64_t num);
4600  void AddPointer(const void* ptr);
4601 
4602 private:
4603  VmaVector< char, VmaStlAllocator<char> > m_Data;
4604 };
4605 
4606 void VmaStringBuilder::Add(const char* pStr)
4607 {
4608  const size_t strLen = strlen(pStr);
4609  if(strLen > 0)
4610  {
4611  const size_t oldCount = m_Data.size();
4612  m_Data.resize(oldCount + strLen);
4613  memcpy(m_Data.data() + oldCount, pStr, strLen);
4614  }
4615 }
4616 
4617 void VmaStringBuilder::AddNumber(uint32_t num)
4618 {
4619  char buf[11];
4620  VmaUint32ToStr(buf, sizeof(buf), num);
4621  Add(buf);
4622 }
4623 
4624 void VmaStringBuilder::AddNumber(uint64_t num)
4625 {
4626  char buf[21];
4627  VmaUint64ToStr(buf, sizeof(buf), num);
4628  Add(buf);
4629 }
4630 
4631 void VmaStringBuilder::AddPointer(const void* ptr)
4632 {
4633  char buf[21];
4634  VmaPtrToStr(buf, sizeof(buf), ptr);
4635  Add(buf);
4636 }
4637 
4638 #endif // #if VMA_STATS_STRING_ENABLED
4639 
4641 // VmaJsonWriter
4642 
4643 #if VMA_STATS_STRING_ENABLED
4644 
4645 class VmaJsonWriter
4646 {
4647 public:
4648  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
4649  ~VmaJsonWriter();
4650 
4651  void BeginObject(bool singleLine = false);
4652  void EndObject();
4653 
4654  void BeginArray(bool singleLine = false);
4655  void EndArray();
4656 
4657  void WriteString(const char* pStr);
4658  void BeginString(const char* pStr = VMA_NULL);
4659  void ContinueString(const char* pStr);
4660  void ContinueString(uint32_t n);
4661  void ContinueString(uint64_t n);
4662  void ContinueString_Pointer(const void* ptr);
4663  void EndString(const char* pStr = VMA_NULL);
4664 
4665  void WriteNumber(uint32_t n);
4666  void WriteNumber(uint64_t n);
4667  void WriteBool(bool b);
4668  void WriteNull();
4669 
4670 private:
4671  static const char* const INDENT;
4672 
4673  enum COLLECTION_TYPE
4674  {
4675  COLLECTION_TYPE_OBJECT,
4676  COLLECTION_TYPE_ARRAY,
4677  };
4678  struct StackItem
4679  {
4680  COLLECTION_TYPE type;
4681  uint32_t valueCount;
4682  bool singleLineMode;
4683  };
4684 
4685  VmaStringBuilder& m_SB;
4686  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
4687  bool m_InsideString;
4688 
4689  void BeginValue(bool isString);
4690  void WriteIndent(bool oneLess = false);
4691 };
4692 
4693 const char* const VmaJsonWriter::INDENT = " ";
4694 
4695 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
4696  m_SB(sb),
4697  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4698  m_InsideString(false)
4699 {
4700 }
4701 
4702 VmaJsonWriter::~VmaJsonWriter()
4703 {
4704  VMA_ASSERT(!m_InsideString);
4705  VMA_ASSERT(m_Stack.empty());
4706 }
4707 
4708 void VmaJsonWriter::BeginObject(bool singleLine)
4709 {
4710  VMA_ASSERT(!m_InsideString);
4711 
4712  BeginValue(false);
4713  m_SB.Add('{');
4714 
4715  StackItem item;
4716  item.type = COLLECTION_TYPE_OBJECT;
4717  item.valueCount = 0;
4718  item.singleLineMode = singleLine;
4719  m_Stack.push_back(item);
4720 }
4721 
4722 void VmaJsonWriter::EndObject()
4723 {
4724  VMA_ASSERT(!m_InsideString);
4725 
4726  WriteIndent(true);
4727  m_SB.Add('}');
4728 
4729  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4730  m_Stack.pop_back();
4731 }
4732 
4733 void VmaJsonWriter::BeginArray(bool singleLine)
4734 {
4735  VMA_ASSERT(!m_InsideString);
4736 
4737  BeginValue(false);
4738  m_SB.Add('[');
4739 
4740  StackItem item;
4741  item.type = COLLECTION_TYPE_ARRAY;
4742  item.valueCount = 0;
4743  item.singleLineMode = singleLine;
4744  m_Stack.push_back(item);
4745 }
4746 
4747 void VmaJsonWriter::EndArray()
4748 {
4749  VMA_ASSERT(!m_InsideString);
4750 
4751  WriteIndent(true);
4752  m_SB.Add(']');
4753 
4754  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4755  m_Stack.pop_back();
4756 }
4757 
4758 void VmaJsonWriter::WriteString(const char* pStr)
4759 {
4760  BeginString(pStr);
4761  EndString();
4762 }
4763 
4764 void VmaJsonWriter::BeginString(const char* pStr)
4765 {
4766  VMA_ASSERT(!m_InsideString);
4767 
4768  BeginValue(true);
4769  m_SB.Add('"');
4770  m_InsideString = true;
4771  if(pStr != VMA_NULL && pStr[0] != '\0')
4772  {
4773  ContinueString(pStr);
4774  }
4775 }
4776 
4777 void VmaJsonWriter::ContinueString(const char* pStr)
4778 {
4779  VMA_ASSERT(m_InsideString);
4780 
4781  const size_t strLen = strlen(pStr);
4782  for(size_t i = 0; i < strLen; ++i)
4783  {
4784  char ch = pStr[i];
4785  if(ch == '\'')
4786  {
4787  m_SB.Add("\\\\");
4788  }
4789  else if(ch == '"')
4790  {
4791  m_SB.Add("\\\"");
4792  }
4793  else if(ch >= 32)
4794  {
4795  m_SB.Add(ch);
4796  }
4797  else switch(ch)
4798  {
4799  case '\b':
4800  m_SB.Add("\\b");
4801  break;
4802  case '\f':
4803  m_SB.Add("\\f");
4804  break;
4805  case '\n':
4806  m_SB.Add("\\n");
4807  break;
4808  case '\r':
4809  m_SB.Add("\\r");
4810  break;
4811  case '\t':
4812  m_SB.Add("\\t");
4813  break;
4814  default:
4815  VMA_ASSERT(0 && "Character not currently supported.");
4816  break;
4817  }
4818  }
4819 }
4820 
4821 void VmaJsonWriter::ContinueString(uint32_t n)
4822 {
4823  VMA_ASSERT(m_InsideString);
4824  m_SB.AddNumber(n);
4825 }
4826 
4827 void VmaJsonWriter::ContinueString(uint64_t n)
4828 {
4829  VMA_ASSERT(m_InsideString);
4830  m_SB.AddNumber(n);
4831 }
4832 
4833 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4834 {
4835  VMA_ASSERT(m_InsideString);
4836  m_SB.AddPointer(ptr);
4837 }
4838 
4839 void VmaJsonWriter::EndString(const char* pStr)
4840 {
4841  VMA_ASSERT(m_InsideString);
4842  if(pStr != VMA_NULL && pStr[0] != '\0')
4843  {
4844  ContinueString(pStr);
4845  }
4846  m_SB.Add('"');
4847  m_InsideString = false;
4848 }
4849 
4850 void VmaJsonWriter::WriteNumber(uint32_t n)
4851 {
4852  VMA_ASSERT(!m_InsideString);
4853  BeginValue(false);
4854  m_SB.AddNumber(n);
4855 }
4856 
4857 void VmaJsonWriter::WriteNumber(uint64_t n)
4858 {
4859  VMA_ASSERT(!m_InsideString);
4860  BeginValue(false);
4861  m_SB.AddNumber(n);
4862 }
4863 
4864 void VmaJsonWriter::WriteBool(bool b)
4865 {
4866  VMA_ASSERT(!m_InsideString);
4867  BeginValue(false);
4868  m_SB.Add(b ? "true" : "false");
4869 }
4870 
4871 void VmaJsonWriter::WriteNull()
4872 {
4873  VMA_ASSERT(!m_InsideString);
4874  BeginValue(false);
4875  m_SB.Add("null");
4876 }
4877 
4878 void VmaJsonWriter::BeginValue(bool isString)
4879 {
4880  if(!m_Stack.empty())
4881  {
4882  StackItem& currItem = m_Stack.back();
4883  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4884  currItem.valueCount % 2 == 0)
4885  {
4886  VMA_ASSERT(isString);
4887  }
4888 
4889  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4890  currItem.valueCount % 2 != 0)
4891  {
4892  m_SB.Add(": ");
4893  }
4894  else if(currItem.valueCount > 0)
4895  {
4896  m_SB.Add(", ");
4897  WriteIndent();
4898  }
4899  else
4900  {
4901  WriteIndent();
4902  }
4903  ++currItem.valueCount;
4904  }
4905 }
4906 
4907 void VmaJsonWriter::WriteIndent(bool oneLess)
4908 {
4909  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4910  {
4911  m_SB.AddNewLine();
4912 
4913  size_t count = m_Stack.size();
4914  if(count > 0 && oneLess)
4915  {
4916  --count;
4917  }
4918  for(size_t i = 0; i < count; ++i)
4919  {
4920  m_SB.Add(INDENT);
4921  }
4922  }
4923 }
4924 
4925 #endif // #if VMA_STATS_STRING_ENABLED
4926 
4928 
4929 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
4930 {
4931  if(IsUserDataString())
4932  {
4933  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
4934 
4935  FreeUserDataString(hAllocator);
4936 
4937  if(pUserData != VMA_NULL)
4938  {
4939  const char* const newStrSrc = (char*)pUserData;
4940  const size_t newStrLen = strlen(newStrSrc);
4941  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
4942  memcpy(newStrDst, newStrSrc, newStrLen + 1);
4943  m_pUserData = newStrDst;
4944  }
4945  }
4946  else
4947  {
4948  m_pUserData = pUserData;
4949  }
4950 }
4951 
4952 void VmaAllocation_T::ChangeBlockAllocation(
4953  VmaAllocator hAllocator,
4954  VmaDeviceMemoryBlock* block,
4955  VkDeviceSize offset)
4956 {
4957  VMA_ASSERT(block != VMA_NULL);
4958  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4959 
4960  // Move mapping reference counter from old block to new block.
4961  if(block != m_BlockAllocation.m_Block)
4962  {
4963  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
4964  if(IsPersistentMap())
4965  ++mapRefCount;
4966  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
4967  block->Map(hAllocator, mapRefCount, VMA_NULL);
4968  }
4969 
4970  m_BlockAllocation.m_Block = block;
4971  m_BlockAllocation.m_Offset = offset;
4972 }
4973 
4974 VkDeviceSize VmaAllocation_T::GetOffset() const
4975 {
4976  switch(m_Type)
4977  {
4978  case ALLOCATION_TYPE_BLOCK:
4979  return m_BlockAllocation.m_Offset;
4980  case ALLOCATION_TYPE_DEDICATED:
4981  return 0;
4982  default:
4983  VMA_ASSERT(0);
4984  return 0;
4985  }
4986 }
4987 
4988 VkDeviceMemory VmaAllocation_T::GetMemory() const
4989 {
4990  switch(m_Type)
4991  {
4992  case ALLOCATION_TYPE_BLOCK:
4993  return m_BlockAllocation.m_Block->GetDeviceMemory();
4994  case ALLOCATION_TYPE_DEDICATED:
4995  return m_DedicatedAllocation.m_hMemory;
4996  default:
4997  VMA_ASSERT(0);
4998  return VK_NULL_HANDLE;
4999  }
5000 }
5001 
5002 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
5003 {
5004  switch(m_Type)
5005  {
5006  case ALLOCATION_TYPE_BLOCK:
5007  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
5008  case ALLOCATION_TYPE_DEDICATED:
5009  return m_DedicatedAllocation.m_MemoryTypeIndex;
5010  default:
5011  VMA_ASSERT(0);
5012  return UINT32_MAX;
5013  }
5014 }
5015 
5016 void* VmaAllocation_T::GetMappedData() const
5017 {
5018  switch(m_Type)
5019  {
5020  case ALLOCATION_TYPE_BLOCK:
5021  if(m_MapCount != 0)
5022  {
5023  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
5024  VMA_ASSERT(pBlockData != VMA_NULL);
5025  return (char*)pBlockData + m_BlockAllocation.m_Offset;
5026  }
5027  else
5028  {
5029  return VMA_NULL;
5030  }
5031  break;
5032  case ALLOCATION_TYPE_DEDICATED:
5033  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
5034  return m_DedicatedAllocation.m_pMappedData;
5035  default:
5036  VMA_ASSERT(0);
5037  return VMA_NULL;
5038  }
5039 }
5040 
5041 bool VmaAllocation_T::CanBecomeLost() const
5042 {
5043  switch(m_Type)
5044  {
5045  case ALLOCATION_TYPE_BLOCK:
5046  return m_BlockAllocation.m_CanBecomeLost;
5047  case ALLOCATION_TYPE_DEDICATED:
5048  return false;
5049  default:
5050  VMA_ASSERT(0);
5051  return false;
5052  }
5053 }
5054 
5055 VmaPool VmaAllocation_T::GetPool() const
5056 {
5057  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5058  return m_BlockAllocation.m_hPool;
5059 }
5060 
5061 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5062 {
5063  VMA_ASSERT(CanBecomeLost());
5064 
5065  /*
5066  Warning: This is a carefully designed algorithm.
5067  Do not modify unless you really know what you're doing :)
5068  */
5069  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
5070  for(;;)
5071  {
5072  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
5073  {
5074  VMA_ASSERT(0);
5075  return false;
5076  }
5077  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
5078  {
5079  return false;
5080  }
5081  else // Last use time earlier than current time.
5082  {
5083  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
5084  {
5085  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
5086  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
5087  return true;
5088  }
5089  }
5090  }
5091 }
5092 
5093 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
5094 {
5095  VMA_ASSERT(IsUserDataString());
5096  if(m_pUserData != VMA_NULL)
5097  {
5098  char* const oldStr = (char*)m_pUserData;
5099  const size_t oldStrLen = strlen(oldStr);
5100  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
5101  m_pUserData = VMA_NULL;
5102  }
5103 }
5104 
5105 void VmaAllocation_T::BlockAllocMap()
5106 {
5107  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
5108 
5109  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
5110  {
5111  ++m_MapCount;
5112  }
5113  else
5114  {
5115  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
5116  }
5117 }
5118 
5119 void VmaAllocation_T::BlockAllocUnmap()
5120 {
5121  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
5122 
5123  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
5124  {
5125  --m_MapCount;
5126  }
5127  else
5128  {
5129  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
5130  }
5131 }
5132 
5133 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
5134 {
5135  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
5136 
5137  if(m_MapCount != 0)
5138  {
5139  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
5140  {
5141  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
5142  *ppData = m_DedicatedAllocation.m_pMappedData;
5143  ++m_MapCount;
5144  return VK_SUCCESS;
5145  }
5146  else
5147  {
5148  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
5149  return VK_ERROR_MEMORY_MAP_FAILED;
5150  }
5151  }
5152  else
5153  {
5154  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5155  hAllocator->m_hDevice,
5156  m_DedicatedAllocation.m_hMemory,
5157  0, // offset
5158  VK_WHOLE_SIZE,
5159  0, // flags
5160  ppData);
5161  if(result == VK_SUCCESS)
5162  {
5163  m_DedicatedAllocation.m_pMappedData = *ppData;
5164  m_MapCount = 1;
5165  }
5166  return result;
5167  }
5168 }
5169 
5170 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
5171 {
5172  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
5173 
5174  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
5175  {
5176  --m_MapCount;
5177  if(m_MapCount == 0)
5178  {
5179  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
5180  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
5181  hAllocator->m_hDevice,
5182  m_DedicatedAllocation.m_hMemory);
5183  }
5184  }
5185  else
5186  {
5187  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
5188  }
5189 }
5190 
5191 #if VMA_STATS_STRING_ENABLED
5192 
5193 // Correspond to values of enum VmaSuballocationType.
5194 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
5195  "FREE",
5196  "UNKNOWN",
5197  "BUFFER",
5198  "IMAGE_UNKNOWN",
5199  "IMAGE_LINEAR",
5200  "IMAGE_OPTIMAL",
5201 };
5202 
5203 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
5204 {
5205  json.BeginObject();
5206 
5207  json.WriteString("Blocks");
5208  json.WriteNumber(stat.blockCount);
5209 
5210  json.WriteString("Allocations");
5211  json.WriteNumber(stat.allocationCount);
5212 
5213  json.WriteString("UnusedRanges");
5214  json.WriteNumber(stat.unusedRangeCount);
5215 
5216  json.WriteString("UsedBytes");
5217  json.WriteNumber(stat.usedBytes);
5218 
5219  json.WriteString("UnusedBytes");
5220  json.WriteNumber(stat.unusedBytes);
5221 
5222  if(stat.allocationCount > 1)
5223  {
5224  json.WriteString("AllocationSize");
5225  json.BeginObject(true);
5226  json.WriteString("Min");
5227  json.WriteNumber(stat.allocationSizeMin);
5228  json.WriteString("Avg");
5229  json.WriteNumber(stat.allocationSizeAvg);
5230  json.WriteString("Max");
5231  json.WriteNumber(stat.allocationSizeMax);
5232  json.EndObject();
5233  }
5234 
5235  if(stat.unusedRangeCount > 1)
5236  {
5237  json.WriteString("UnusedRangeSize");
5238  json.BeginObject(true);
5239  json.WriteString("Min");
5240  json.WriteNumber(stat.unusedRangeSizeMin);
5241  json.WriteString("Avg");
5242  json.WriteNumber(stat.unusedRangeSizeAvg);
5243  json.WriteString("Max");
5244  json.WriteNumber(stat.unusedRangeSizeMax);
5245  json.EndObject();
5246  }
5247 
5248  json.EndObject();
5249 }
5250 
5251 #endif // #if VMA_STATS_STRING_ENABLED
5252 
5253 struct VmaSuballocationItemSizeLess
5254 {
5255  bool operator()(
5256  const VmaSuballocationList::iterator lhs,
5257  const VmaSuballocationList::iterator rhs) const
5258  {
5259  return lhs->size < rhs->size;
5260  }
5261  bool operator()(
5262  const VmaSuballocationList::iterator lhs,
5263  VkDeviceSize rhsSize) const
5264  {
5265  return lhs->size < rhsSize;
5266  }
5267 };
5268 
5270 // class VmaBlockMetadata
5271 
5272 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
5273  m_Size(0),
5274  m_FreeCount(0),
5275  m_SumFreeSize(0),
5276  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
5277  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
5278 {
5279 }
5280 
5281 VmaBlockMetadata::~VmaBlockMetadata()
5282 {
5283 }
5284 
5285 void VmaBlockMetadata::Init(VkDeviceSize size)
5286 {
5287  m_Size = size;
5288  m_FreeCount = 1;
5289  m_SumFreeSize = size;
5290 
5291  VmaSuballocation suballoc = {};
5292  suballoc.offset = 0;
5293  suballoc.size = size;
5294  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5295  suballoc.hAllocation = VK_NULL_HANDLE;
5296 
5297  m_Suballocations.push_back(suballoc);
5298  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
5299  --suballocItem;
5300  m_FreeSuballocationsBySize.push_back(suballocItem);
5301 }
5302 
5303 bool VmaBlockMetadata::Validate() const
5304 {
5305  if(m_Suballocations.empty())
5306  {
5307  return false;
5308  }
5309 
5310  // Expected offset of new suballocation as calculates from previous ones.
5311  VkDeviceSize calculatedOffset = 0;
5312  // Expected number of free suballocations as calculated from traversing their list.
5313  uint32_t calculatedFreeCount = 0;
5314  // Expected sum size of free suballocations as calculated from traversing their list.
5315  VkDeviceSize calculatedSumFreeSize = 0;
5316  // Expected number of free suballocations that should be registered in
5317  // m_FreeSuballocationsBySize calculated from traversing their list.
5318  size_t freeSuballocationsToRegister = 0;
5319  // True if previous visisted suballocation was free.
5320  bool prevFree = false;
5321 
5322  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5323  suballocItem != m_Suballocations.cend();
5324  ++suballocItem)
5325  {
5326  const VmaSuballocation& subAlloc = *suballocItem;
5327 
5328  // Actual offset of this suballocation doesn't match expected one.
5329  if(subAlloc.offset != calculatedOffset)
5330  {
5331  return false;
5332  }
5333 
5334  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
5335  // Two adjacent free suballocations are invalid. They should be merged.
5336  if(prevFree && currFree)
5337  {
5338  return false;
5339  }
5340 
5341  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
5342  {
5343  return false;
5344  }
5345 
5346  if(currFree)
5347  {
5348  calculatedSumFreeSize += subAlloc.size;
5349  ++calculatedFreeCount;
5350  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5351  {
5352  ++freeSuballocationsToRegister;
5353  }
5354  }
5355  else
5356  {
5357  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
5358  {
5359  return false;
5360  }
5361  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
5362  {
5363  return false;
5364  }
5365  }
5366 
5367  calculatedOffset += subAlloc.size;
5368  prevFree = currFree;
5369  }
5370 
5371  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
5372  // match expected one.
5373  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
5374  {
5375  return false;
5376  }
5377 
5378  VkDeviceSize lastSize = 0;
5379  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
5380  {
5381  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
5382 
5383  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
5384  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5385  {
5386  return false;
5387  }
5388  // They must be sorted by size ascending.
5389  if(suballocItem->size < lastSize)
5390  {
5391  return false;
5392  }
5393 
5394  lastSize = suballocItem->size;
5395  }
5396 
5397  // Check if totals match calculacted values.
5398  if(!ValidateFreeSuballocationList() ||
5399  (calculatedOffset != m_Size) ||
5400  (calculatedSumFreeSize != m_SumFreeSize) ||
5401  (calculatedFreeCount != m_FreeCount))
5402  {
5403  return false;
5404  }
5405 
5406  return true;
5407 }
5408 
5409 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
5410 {
5411  if(!m_FreeSuballocationsBySize.empty())
5412  {
5413  return m_FreeSuballocationsBySize.back()->size;
5414  }
5415  else
5416  {
5417  return 0;
5418  }
5419 }
5420 
5421 bool VmaBlockMetadata::IsEmpty() const
5422 {
5423  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
5424 }
5425 
5426 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
5427 {
5428  outInfo.blockCount = 1;
5429 
5430  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5431  outInfo.allocationCount = rangeCount - m_FreeCount;
5432  outInfo.unusedRangeCount = m_FreeCount;
5433 
5434  outInfo.unusedBytes = m_SumFreeSize;
5435  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
5436 
5437  outInfo.allocationSizeMin = UINT64_MAX;
5438  outInfo.allocationSizeMax = 0;
5439  outInfo.unusedRangeSizeMin = UINT64_MAX;
5440  outInfo.unusedRangeSizeMax = 0;
5441 
5442  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5443  suballocItem != m_Suballocations.cend();
5444  ++suballocItem)
5445  {
5446  const VmaSuballocation& suballoc = *suballocItem;
5447  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5448  {
5449  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
5450  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
5451  }
5452  else
5453  {
5454  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
5455  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
5456  }
5457  }
5458 }
5459 
5460 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
5461 {
5462  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5463 
5464  inoutStats.size += m_Size;
5465  inoutStats.unusedSize += m_SumFreeSize;
5466  inoutStats.allocationCount += rangeCount - m_FreeCount;
5467  inoutStats.unusedRangeCount += m_FreeCount;
5468  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
5469 }
5470 
5471 #if VMA_STATS_STRING_ENABLED
5472 
5473 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
5474 {
5475  json.BeginObject();
5476 
5477  json.WriteString("TotalBytes");
5478  json.WriteNumber(m_Size);
5479 
5480  json.WriteString("UnusedBytes");
5481  json.WriteNumber(m_SumFreeSize);
5482 
5483  json.WriteString("Allocations");
5484  json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);
5485 
5486  json.WriteString("UnusedRanges");
5487  json.WriteNumber(m_FreeCount);
5488 
5489  json.WriteString("Suballocations");
5490  json.BeginArray();
5491  size_t i = 0;
5492  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5493  suballocItem != m_Suballocations.cend();
5494  ++suballocItem, ++i)
5495  {
5496  json.BeginObject(true);
5497 
5498  json.WriteString("Type");
5499  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
5500 
5501  json.WriteString("Size");
5502  json.WriteNumber(suballocItem->size);
5503 
5504  json.WriteString("Offset");
5505  json.WriteNumber(suballocItem->offset);
5506 
5507  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5508  {
5509  const void* pUserData = suballocItem->hAllocation->GetUserData();
5510  if(pUserData != VMA_NULL)
5511  {
5512  json.WriteString("UserData");
5513  if(suballocItem->hAllocation->IsUserDataString())
5514  {
5515  json.WriteString((const char*)pUserData);
5516  }
5517  else
5518  {
5519  json.BeginString();
5520  json.ContinueString_Pointer(pUserData);
5521  json.EndString();
5522  }
5523  }
5524  }
5525 
5526  json.EndObject();
5527  }
5528  json.EndArray();
5529 
5530  json.EndObject();
5531 }
5532 
5533 #endif // #if VMA_STATS_STRING_ENABLED
5534 
5535 /*
5536 How many suitable free suballocations to analyze before choosing best one.
5537 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
5538  be chosen.
5539 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
5540  suballocations will be analized and best one will be chosen.
5541 - Any other value is also acceptable.
5542 */
5543 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
5544 
5545 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
5546 {
5547  VMA_ASSERT(IsEmpty());
5548  pAllocationRequest->offset = 0;
5549  pAllocationRequest->sumFreeSize = m_SumFreeSize;
5550  pAllocationRequest->sumItemSize = 0;
5551  pAllocationRequest->item = m_Suballocations.begin();
5552  pAllocationRequest->itemsToMakeLostCount = 0;
5553 }
5554 
5555 bool VmaBlockMetadata::CreateAllocationRequest(
5556  uint32_t currentFrameIndex,
5557  uint32_t frameInUseCount,
5558  VkDeviceSize bufferImageGranularity,
5559  VkDeviceSize allocSize,
5560  VkDeviceSize allocAlignment,
5561  VmaSuballocationType allocType,
5562  bool canMakeOtherLost,
5563  VmaAllocationRequest* pAllocationRequest)
5564 {
5565  VMA_ASSERT(allocSize > 0);
5566  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5567  VMA_ASSERT(pAllocationRequest != VMA_NULL);
5568  VMA_HEAVY_ASSERT(Validate());
5569 
5570  // There is not enough total free space in this block to fullfill the request: Early return.
5571  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
5572  {
5573  return false;
5574  }
5575 
5576  // New algorithm, efficiently searching freeSuballocationsBySize.
5577  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
5578  if(freeSuballocCount > 0)
5579  {
5580  if(VMA_BEST_FIT)
5581  {
5582  // Find first free suballocation with size not less than allocSize.
5583  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5584  m_FreeSuballocationsBySize.data(),
5585  m_FreeSuballocationsBySize.data() + freeSuballocCount,
5586  allocSize,
5587  VmaSuballocationItemSizeLess());
5588  size_t index = it - m_FreeSuballocationsBySize.data();
5589  for(; index < freeSuballocCount; ++index)
5590  {
5591  if(CheckAllocation(
5592  currentFrameIndex,
5593  frameInUseCount,
5594  bufferImageGranularity,
5595  allocSize,
5596  allocAlignment,
5597  allocType,
5598  m_FreeSuballocationsBySize[index],
5599  false, // canMakeOtherLost
5600  &pAllocationRequest->offset,
5601  &pAllocationRequest->itemsToMakeLostCount,
5602  &pAllocationRequest->sumFreeSize,
5603  &pAllocationRequest->sumItemSize))
5604  {
5605  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5606  return true;
5607  }
5608  }
5609  }
5610  else
5611  {
5612  // Search staring from biggest suballocations.
5613  for(size_t index = freeSuballocCount; index--; )
5614  {
5615  if(CheckAllocation(
5616  currentFrameIndex,
5617  frameInUseCount,
5618  bufferImageGranularity,
5619  allocSize,
5620  allocAlignment,
5621  allocType,
5622  m_FreeSuballocationsBySize[index],
5623  false, // canMakeOtherLost
5624  &pAllocationRequest->offset,
5625  &pAllocationRequest->itemsToMakeLostCount,
5626  &pAllocationRequest->sumFreeSize,
5627  &pAllocationRequest->sumItemSize))
5628  {
5629  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5630  return true;
5631  }
5632  }
5633  }
5634  }
5635 
5636  if(canMakeOtherLost)
5637  {
5638  // Brute-force algorithm. TODO: Come up with something better.
5639 
5640  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
5641  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
5642 
5643  VmaAllocationRequest tmpAllocRequest = {};
5644  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
5645  suballocIt != m_Suballocations.end();
5646  ++suballocIt)
5647  {
5648  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
5649  suballocIt->hAllocation->CanBecomeLost())
5650  {
5651  if(CheckAllocation(
5652  currentFrameIndex,
5653  frameInUseCount,
5654  bufferImageGranularity,
5655  allocSize,
5656  allocAlignment,
5657  allocType,
5658  suballocIt,
5659  canMakeOtherLost,
5660  &tmpAllocRequest.offset,
5661  &tmpAllocRequest.itemsToMakeLostCount,
5662  &tmpAllocRequest.sumFreeSize,
5663  &tmpAllocRequest.sumItemSize))
5664  {
5665  tmpAllocRequest.item = suballocIt;
5666 
5667  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
5668  {
5669  *pAllocationRequest = tmpAllocRequest;
5670  }
5671  }
5672  }
5673  }
5674 
5675  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
5676  {
5677  return true;
5678  }
5679  }
5680 
5681  return false;
5682 }
5683 
5684 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
5685  uint32_t currentFrameIndex,
5686  uint32_t frameInUseCount,
5687  VmaAllocationRequest* pAllocationRequest)
5688 {
5689  while(pAllocationRequest->itemsToMakeLostCount > 0)
5690  {
5691  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
5692  {
5693  ++pAllocationRequest->item;
5694  }
5695  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5696  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
5697  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
5698  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5699  {
5700  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
5701  --pAllocationRequest->itemsToMakeLostCount;
5702  }
5703  else
5704  {
5705  return false;
5706  }
5707  }
5708 
5709  VMA_HEAVY_ASSERT(Validate());
5710  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5711  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
5712 
5713  return true;
5714 }
5715 
5716 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5717 {
5718  uint32_t lostAllocationCount = 0;
5719  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
5720  it != m_Suballocations.end();
5721  ++it)
5722  {
5723  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
5724  it->hAllocation->CanBecomeLost() &&
5725  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5726  {
5727  it = FreeSuballocation(it);
5728  ++lostAllocationCount;
5729  }
5730  }
5731  return lostAllocationCount;
5732 }
5733 
5734 void VmaBlockMetadata::Alloc(
5735  const VmaAllocationRequest& request,
5736  VmaSuballocationType type,
5737  VkDeviceSize allocSize,
5738  VmaAllocation hAllocation)
5739 {
5740  VMA_ASSERT(request.item != m_Suballocations.end());
5741  VmaSuballocation& suballoc = *request.item;
5742  // Given suballocation is a free block.
5743  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5744  // Given offset is inside this suballocation.
5745  VMA_ASSERT(request.offset >= suballoc.offset);
5746  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
5747  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
5748  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
5749 
5750  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
5751  // it to become used.
5752  UnregisterFreeSuballocation(request.item);
5753 
5754  suballoc.offset = request.offset;
5755  suballoc.size = allocSize;
5756  suballoc.type = type;
5757  suballoc.hAllocation = hAllocation;
5758 
5759  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
5760  if(paddingEnd)
5761  {
5762  VmaSuballocation paddingSuballoc = {};
5763  paddingSuballoc.offset = request.offset + allocSize;
5764  paddingSuballoc.size = paddingEnd;
5765  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5766  VmaSuballocationList::iterator next = request.item;
5767  ++next;
5768  const VmaSuballocationList::iterator paddingEndItem =
5769  m_Suballocations.insert(next, paddingSuballoc);
5770  RegisterFreeSuballocation(paddingEndItem);
5771  }
5772 
5773  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5774  if(paddingBegin)
5775  {
5776  VmaSuballocation paddingSuballoc = {};
5777  paddingSuballoc.offset = request.offset - paddingBegin;
5778  paddingSuballoc.size = paddingBegin;
5779  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5780  const VmaSuballocationList::iterator paddingBeginItem =
5781  m_Suballocations.insert(request.item, paddingSuballoc);
5782  RegisterFreeSuballocation(paddingBeginItem);
5783  }
5784 
5785  // Update totals.
5786  m_FreeCount = m_FreeCount - 1;
5787  if(paddingBegin > 0)
5788  {
5789  ++m_FreeCount;
5790  }
5791  if(paddingEnd > 0)
5792  {
5793  ++m_FreeCount;
5794  }
5795  m_SumFreeSize -= allocSize;
5796 }
5797 
5798 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5799 {
5800  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5801  suballocItem != m_Suballocations.end();
5802  ++suballocItem)
5803  {
5804  VmaSuballocation& suballoc = *suballocItem;
5805  if(suballoc.hAllocation == allocation)
5806  {
5807  FreeSuballocation(suballocItem);
5808  VMA_HEAVY_ASSERT(Validate());
5809  return;
5810  }
5811  }
5812  VMA_ASSERT(0 && "Not found!");
5813 }
5814 
5815 void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)
5816 {
5817  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5818  suballocItem != m_Suballocations.end();
5819  ++suballocItem)
5820  {
5821  VmaSuballocation& suballoc = *suballocItem;
5822  if(suballoc.offset == offset)
5823  {
5824  FreeSuballocation(suballocItem);
5825  return;
5826  }
5827  }
5828  VMA_ASSERT(0 && "Not found!");
5829 }
5830 
5831 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5832 {
5833  VkDeviceSize lastSize = 0;
5834  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5835  {
5836  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5837 
5838  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5839  {
5840  VMA_ASSERT(0);
5841  return false;
5842  }
5843  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5844  {
5845  VMA_ASSERT(0);
5846  return false;
5847  }
5848  if(it->size < lastSize)
5849  {
5850  VMA_ASSERT(0);
5851  return false;
5852  }
5853 
5854  lastSize = it->size;
5855  }
5856  return true;
5857 }
5858 
5859 bool VmaBlockMetadata::CheckAllocation(
5860  uint32_t currentFrameIndex,
5861  uint32_t frameInUseCount,
5862  VkDeviceSize bufferImageGranularity,
5863  VkDeviceSize allocSize,
5864  VkDeviceSize allocAlignment,
5865  VmaSuballocationType allocType,
5866  VmaSuballocationList::const_iterator suballocItem,
5867  bool canMakeOtherLost,
5868  VkDeviceSize* pOffset,
5869  size_t* itemsToMakeLostCount,
5870  VkDeviceSize* pSumFreeSize,
5871  VkDeviceSize* pSumItemSize) const
5872 {
5873  VMA_ASSERT(allocSize > 0);
5874  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5875  VMA_ASSERT(suballocItem != m_Suballocations.cend());
5876  VMA_ASSERT(pOffset != VMA_NULL);
5877 
5878  *itemsToMakeLostCount = 0;
5879  *pSumFreeSize = 0;
5880  *pSumItemSize = 0;
5881 
5882  if(canMakeOtherLost)
5883  {
5884  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5885  {
5886  *pSumFreeSize = suballocItem->size;
5887  }
5888  else
5889  {
5890  if(suballocItem->hAllocation->CanBecomeLost() &&
5891  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5892  {
5893  ++*itemsToMakeLostCount;
5894  *pSumItemSize = suballocItem->size;
5895  }
5896  else
5897  {
5898  return false;
5899  }
5900  }
5901 
5902  // Remaining size is too small for this request: Early return.
5903  if(m_Size - suballocItem->offset < allocSize)
5904  {
5905  return false;
5906  }
5907 
5908  // Start from offset equal to beginning of this suballocation.
5909  *pOffset = suballocItem->offset;
5910 
5911  // Apply VMA_DEBUG_MARGIN at the beginning.
5912  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5913  {
5914  *pOffset += VMA_DEBUG_MARGIN;
5915  }
5916 
5917  // Apply alignment.
5918  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5919  *pOffset = VmaAlignUp(*pOffset, alignment);
5920 
5921  // Check previous suballocations for BufferImageGranularity conflicts.
5922  // Make bigger alignment if necessary.
5923  if(bufferImageGranularity > 1)
5924  {
5925  bool bufferImageGranularityConflict = false;
5926  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5927  while(prevSuballocItem != m_Suballocations.cbegin())
5928  {
5929  --prevSuballocItem;
5930  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5931  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5932  {
5933  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5934  {
5935  bufferImageGranularityConflict = true;
5936  break;
5937  }
5938  }
5939  else
5940  // Already on previous page.
5941  break;
5942  }
5943  if(bufferImageGranularityConflict)
5944  {
5945  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5946  }
5947  }
5948 
5949  // Now that we have final *pOffset, check if we are past suballocItem.
5950  // If yes, return false - this function should be called for another suballocItem as starting point.
5951  if(*pOffset >= suballocItem->offset + suballocItem->size)
5952  {
5953  return false;
5954  }
5955 
5956  // Calculate padding at the beginning based on current offset.
5957  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
5958 
5959  // Calculate required margin at the end if this is not last suballocation.
5960  VmaSuballocationList::const_iterator next = suballocItem;
5961  ++next;
5962  const VkDeviceSize requiredEndMargin =
5963  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5964 
5965  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
5966  // Another early return check.
5967  if(suballocItem->offset + totalSize > m_Size)
5968  {
5969  return false;
5970  }
5971 
5972  // Advance lastSuballocItem until desired size is reached.
5973  // Update itemsToMakeLostCount.
5974  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
5975  if(totalSize > suballocItem->size)
5976  {
5977  VkDeviceSize remainingSize = totalSize - suballocItem->size;
5978  while(remainingSize > 0)
5979  {
5980  ++lastSuballocItem;
5981  if(lastSuballocItem == m_Suballocations.cend())
5982  {
5983  return false;
5984  }
5985  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5986  {
5987  *pSumFreeSize += lastSuballocItem->size;
5988  }
5989  else
5990  {
5991  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
5992  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
5993  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5994  {
5995  ++*itemsToMakeLostCount;
5996  *pSumItemSize += lastSuballocItem->size;
5997  }
5998  else
5999  {
6000  return false;
6001  }
6002  }
6003  remainingSize = (lastSuballocItem->size < remainingSize) ?
6004  remainingSize - lastSuballocItem->size : 0;
6005  }
6006  }
6007 
6008  // Check next suballocations for BufferImageGranularity conflicts.
6009  // If conflict exists, we must mark more allocations lost or fail.
6010  if(bufferImageGranularity > 1)
6011  {
6012  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
6013  ++nextSuballocItem;
6014  while(nextSuballocItem != m_Suballocations.cend())
6015  {
6016  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
6017  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
6018  {
6019  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
6020  {
6021  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
6022  if(nextSuballoc.hAllocation->CanBecomeLost() &&
6023  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
6024  {
6025  ++*itemsToMakeLostCount;
6026  }
6027  else
6028  {
6029  return false;
6030  }
6031  }
6032  }
6033  else
6034  {
6035  // Already on next page.
6036  break;
6037  }
6038  ++nextSuballocItem;
6039  }
6040  }
6041  }
6042  else
6043  {
6044  const VmaSuballocation& suballoc = *suballocItem;
6045  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
6046 
6047  *pSumFreeSize = suballoc.size;
6048 
6049  // Size of this suballocation is too small for this request: Early return.
6050  if(suballoc.size < allocSize)
6051  {
6052  return false;
6053  }
6054 
6055  // Start from offset equal to beginning of this suballocation.
6056  *pOffset = suballoc.offset;
6057 
6058  // Apply VMA_DEBUG_MARGIN at the beginning.
6059  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
6060  {
6061  *pOffset += VMA_DEBUG_MARGIN;
6062  }
6063 
6064  // Apply alignment.
6065  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
6066  *pOffset = VmaAlignUp(*pOffset, alignment);
6067 
6068  // Check previous suballocations for BufferImageGranularity conflicts.
6069  // Make bigger alignment if necessary.
6070  if(bufferImageGranularity > 1)
6071  {
6072  bool bufferImageGranularityConflict = false;
6073  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
6074  while(prevSuballocItem != m_Suballocations.cbegin())
6075  {
6076  --prevSuballocItem;
6077  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
6078  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
6079  {
6080  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
6081  {
6082  bufferImageGranularityConflict = true;
6083  break;
6084  }
6085  }
6086  else
6087  // Already on previous page.
6088  break;
6089  }
6090  if(bufferImageGranularityConflict)
6091  {
6092  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
6093  }
6094  }
6095 
6096  // Calculate padding at the beginning based on current offset.
6097  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
6098 
6099  // Calculate required margin at the end if this is not last suballocation.
6100  VmaSuballocationList::const_iterator next = suballocItem;
6101  ++next;
6102  const VkDeviceSize requiredEndMargin =
6103  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
6104 
6105  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
6106  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
6107  {
6108  return false;
6109  }
6110 
6111  // Check next suballocations for BufferImageGranularity conflicts.
6112  // If conflict exists, allocation cannot be made here.
6113  if(bufferImageGranularity > 1)
6114  {
6115  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
6116  ++nextSuballocItem;
6117  while(nextSuballocItem != m_Suballocations.cend())
6118  {
6119  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
6120  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
6121  {
6122  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
6123  {
6124  return false;
6125  }
6126  }
6127  else
6128  {
6129  // Already on next page.
6130  break;
6131  }
6132  ++nextSuballocItem;
6133  }
6134  }
6135  }
6136 
6137  // All tests passed: Success. pOffset is already filled.
6138  return true;
6139 }
6140 
6141 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
6142 {
6143  VMA_ASSERT(item != m_Suballocations.end());
6144  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6145 
6146  VmaSuballocationList::iterator nextItem = item;
6147  ++nextItem;
6148  VMA_ASSERT(nextItem != m_Suballocations.end());
6149  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
6150 
6151  item->size += nextItem->size;
6152  --m_FreeCount;
6153  m_Suballocations.erase(nextItem);
6154 }
6155 
6156 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
6157 {
6158  // Change this suballocation to be marked as free.
6159  VmaSuballocation& suballoc = *suballocItem;
6160  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6161  suballoc.hAllocation = VK_NULL_HANDLE;
6162 
6163  // Update totals.
6164  ++m_FreeCount;
6165  m_SumFreeSize += suballoc.size;
6166 
6167  // Merge with previous and/or next suballocation if it's also free.
6168  bool mergeWithNext = false;
6169  bool mergeWithPrev = false;
6170 
6171  VmaSuballocationList::iterator nextItem = suballocItem;
6172  ++nextItem;
6173  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
6174  {
6175  mergeWithNext = true;
6176  }
6177 
6178  VmaSuballocationList::iterator prevItem = suballocItem;
6179  if(suballocItem != m_Suballocations.begin())
6180  {
6181  --prevItem;
6182  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6183  {
6184  mergeWithPrev = true;
6185  }
6186  }
6187 
6188  if(mergeWithNext)
6189  {
6190  UnregisterFreeSuballocation(nextItem);
6191  MergeFreeWithNext(suballocItem);
6192  }
6193 
6194  if(mergeWithPrev)
6195  {
6196  UnregisterFreeSuballocation(prevItem);
6197  MergeFreeWithNext(prevItem);
6198  RegisterFreeSuballocation(prevItem);
6199  return prevItem;
6200  }
6201  else
6202  {
6203  RegisterFreeSuballocation(suballocItem);
6204  return suballocItem;
6205  }
6206 }
6207 
6208 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
6209 {
6210  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6211  VMA_ASSERT(item->size > 0);
6212 
6213  // You may want to enable this validation at the beginning or at the end of
6214  // this function, depending on what do you want to check.
6215  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6216 
6217  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6218  {
6219  if(m_FreeSuballocationsBySize.empty())
6220  {
6221  m_FreeSuballocationsBySize.push_back(item);
6222  }
6223  else
6224  {
6225  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
6226  }
6227  }
6228 
6229  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6230 }
6231 
6232 
6233 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
6234 {
6235  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6236  VMA_ASSERT(item->size > 0);
6237 
6238  // You may want to enable this validation at the beginning or at the end of
6239  // this function, depending on what do you want to check.
6240  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6241 
6242  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6243  {
6244  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6245  m_FreeSuballocationsBySize.data(),
6246  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
6247  item,
6248  VmaSuballocationItemSizeLess());
6249  for(size_t index = it - m_FreeSuballocationsBySize.data();
6250  index < m_FreeSuballocationsBySize.size();
6251  ++index)
6252  {
6253  if(m_FreeSuballocationsBySize[index] == item)
6254  {
6255  VmaVectorRemove(m_FreeSuballocationsBySize, index);
6256  return;
6257  }
6258  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
6259  }
6260  VMA_ASSERT(0 && "Not found.");
6261  }
6262 
6263  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6264 }
6265 
6267 // class VmaDeviceMemoryBlock
6268 
6269 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
6270  m_Metadata(hAllocator),
6271  m_MemoryTypeIndex(UINT32_MAX),
6272  m_hMemory(VK_NULL_HANDLE),
6273  m_MapCount(0),
6274  m_pMappedData(VMA_NULL)
6275 {
6276 }
6277 
6278 void VmaDeviceMemoryBlock::Init(
6279  uint32_t newMemoryTypeIndex,
6280  VkDeviceMemory newMemory,
6281  VkDeviceSize newSize)
6282 {
6283  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6284 
6285  m_MemoryTypeIndex = newMemoryTypeIndex;
6286  m_hMemory = newMemory;
6287 
6288  m_Metadata.Init(newSize);
6289 }
6290 
6291 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
6292 {
6293  // This is the most important assert in the entire library.
6294  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
6295  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
6296 
6297  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
6298  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
6299  m_hMemory = VK_NULL_HANDLE;
6300 }
6301 
6302 bool VmaDeviceMemoryBlock::Validate() const
6303 {
6304  if((m_hMemory == VK_NULL_HANDLE) ||
6305  (m_Metadata.GetSize() == 0))
6306  {
6307  return false;
6308  }
6309 
6310  return m_Metadata.Validate();
6311 }
6312 
6313 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
6314 {
6315  if(count == 0)
6316  {
6317  return VK_SUCCESS;
6318  }
6319 
6320  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6321  if(m_MapCount != 0)
6322  {
6323  m_MapCount += count;
6324  VMA_ASSERT(m_pMappedData != VMA_NULL);
6325  if(ppData != VMA_NULL)
6326  {
6327  *ppData = m_pMappedData;
6328  }
6329  return VK_SUCCESS;
6330  }
6331  else
6332  {
6333  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6334  hAllocator->m_hDevice,
6335  m_hMemory,
6336  0, // offset
6337  VK_WHOLE_SIZE,
6338  0, // flags
6339  &m_pMappedData);
6340  if(result == VK_SUCCESS)
6341  {
6342  if(ppData != VMA_NULL)
6343  {
6344  *ppData = m_pMappedData;
6345  }
6346  m_MapCount = count;
6347  }
6348  return result;
6349  }
6350 }
6351 
6352 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
6353 {
6354  if(count == 0)
6355  {
6356  return;
6357  }
6358 
6359  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6360  if(m_MapCount >= count)
6361  {
6362  m_MapCount -= count;
6363  if(m_MapCount == 0)
6364  {
6365  m_pMappedData = VMA_NULL;
6366  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
6367  }
6368  }
6369  else
6370  {
6371  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
6372  }
6373 }
6374 
6375 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
6376  const VmaAllocator hAllocator,
6377  const VmaAllocation hAllocation,
6378  VkBuffer hBuffer)
6379 {
6380  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
6381  hAllocation->GetBlock() == this);
6382  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
6383  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6384  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
6385  hAllocator->m_hDevice,
6386  hBuffer,
6387  m_hMemory,
6388  hAllocation->GetOffset());
6389 }
6390 
6391 VkResult VmaDeviceMemoryBlock::BindImageMemory(
6392  const VmaAllocator hAllocator,
6393  const VmaAllocation hAllocation,
6394  VkImage hImage)
6395 {
6396  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
6397  hAllocation->GetBlock() == this);
6398  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
6399  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6400  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
6401  hAllocator->m_hDevice,
6402  hImage,
6403  m_hMemory,
6404  hAllocation->GetOffset());
6405 }
6406 
6407 static void InitStatInfo(VmaStatInfo& outInfo)
6408 {
6409  memset(&outInfo, 0, sizeof(outInfo));
6410  outInfo.allocationSizeMin = UINT64_MAX;
6411  outInfo.unusedRangeSizeMin = UINT64_MAX;
6412 }
6413 
6414 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
6415 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
6416 {
6417  inoutInfo.blockCount += srcInfo.blockCount;
6418  inoutInfo.allocationCount += srcInfo.allocationCount;
6419  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
6420  inoutInfo.usedBytes += srcInfo.usedBytes;
6421  inoutInfo.unusedBytes += srcInfo.unusedBytes;
6422  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
6423  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
6424  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
6425  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
6426 }
6427 
6428 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
6429 {
6430  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
6431  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
6432  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
6433  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
6434 }
6435 
6436 VmaPool_T::VmaPool_T(
6437  VmaAllocator hAllocator,
6438  const VmaPoolCreateInfo& createInfo) :
6439  m_BlockVector(
6440  hAllocator,
6441  createInfo.memoryTypeIndex,
6442  createInfo.blockSize,
6443  createInfo.minBlockCount,
6444  createInfo.maxBlockCount,
6445  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
6446  createInfo.frameInUseCount,
6447  true) // isCustomPool
6448 {
6449 }
6450 
6451 VmaPool_T::~VmaPool_T()
6452 {
6453 }
6454 
6455 #if VMA_STATS_STRING_ENABLED
6456 
6457 #endif // #if VMA_STATS_STRING_ENABLED
6458 
6459 VmaBlockVector::VmaBlockVector(
6460  VmaAllocator hAllocator,
6461  uint32_t memoryTypeIndex,
6462  VkDeviceSize preferredBlockSize,
6463  size_t minBlockCount,
6464  size_t maxBlockCount,
6465  VkDeviceSize bufferImageGranularity,
6466  uint32_t frameInUseCount,
6467  bool isCustomPool) :
6468  m_hAllocator(hAllocator),
6469  m_MemoryTypeIndex(memoryTypeIndex),
6470  m_PreferredBlockSize(preferredBlockSize),
6471  m_MinBlockCount(minBlockCount),
6472  m_MaxBlockCount(maxBlockCount),
6473  m_BufferImageGranularity(bufferImageGranularity),
6474  m_FrameInUseCount(frameInUseCount),
6475  m_IsCustomPool(isCustomPool),
6476  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
6477  m_HasEmptyBlock(false),
6478  m_pDefragmentator(VMA_NULL)
6479 {
6480 }
6481 
6482 VmaBlockVector::~VmaBlockVector()
6483 {
6484  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
6485 
6486  for(size_t i = m_Blocks.size(); i--; )
6487  {
6488  m_Blocks[i]->Destroy(m_hAllocator);
6489  vma_delete(m_hAllocator, m_Blocks[i]);
6490  }
6491 }
6492 
6493 VkResult VmaBlockVector::CreateMinBlocks()
6494 {
6495  for(size_t i = 0; i < m_MinBlockCount; ++i)
6496  {
6497  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
6498  if(res != VK_SUCCESS)
6499  {
6500  return res;
6501  }
6502  }
6503  return VK_SUCCESS;
6504 }
6505 
6506 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
6507 {
6508  pStats->size = 0;
6509  pStats->unusedSize = 0;
6510  pStats->allocationCount = 0;
6511  pStats->unusedRangeCount = 0;
6512  pStats->unusedRangeSizeMax = 0;
6513 
6514  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6515 
6516  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6517  {
6518  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6519  VMA_ASSERT(pBlock);
6520  VMA_HEAVY_ASSERT(pBlock->Validate());
6521  pBlock->m_Metadata.AddPoolStats(*pStats);
6522  }
6523 }
6524 
6525 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
6526 
6527 VkResult VmaBlockVector::Allocate(
6528  VmaPool hCurrentPool,
6529  uint32_t currentFrameIndex,
6530  const VkMemoryRequirements& vkMemReq,
6531  const VmaAllocationCreateInfo& createInfo,
6532  VmaSuballocationType suballocType,
6533  VmaAllocation* pAllocation)
6534 {
6535  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
6536  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
6537 
6538  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6539 
6540  // 1. Search existing allocations. Try to allocate without making other allocations lost.
6541  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6542  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6543  {
6544  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6545  VMA_ASSERT(pCurrBlock);
6546  VmaAllocationRequest currRequest = {};
6547  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6548  currentFrameIndex,
6549  m_FrameInUseCount,
6550  m_BufferImageGranularity,
6551  vkMemReq.size,
6552  vkMemReq.alignment,
6553  suballocType,
6554  false, // canMakeOtherLost
6555  &currRequest))
6556  {
6557  // Allocate from pCurrBlock.
6558  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
6559 
6560  if(mapped)
6561  {
6562  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
6563  if(res != VK_SUCCESS)
6564  {
6565  return res;
6566  }
6567  }
6568 
6569  // We no longer have an empty Allocation.
6570  if(pCurrBlock->m_Metadata.IsEmpty())
6571  {
6572  m_HasEmptyBlock = false;
6573  }
6574 
6575  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6576  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
6577  (*pAllocation)->InitBlockAllocation(
6578  hCurrentPool,
6579  pCurrBlock,
6580  currRequest.offset,
6581  vkMemReq.alignment,
6582  vkMemReq.size,
6583  suballocType,
6584  mapped,
6585  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6586  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
6587  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6588  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6589  return VK_SUCCESS;
6590  }
6591  }
6592 
6593  const bool canCreateNewBlock =
6594  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
6595  (m_Blocks.size() < m_MaxBlockCount);
6596 
6597  // 2. Try to create new block.
6598  if(canCreateNewBlock)
6599  {
6600  // Calculate optimal size for new block.
6601  VkDeviceSize newBlockSize = m_PreferredBlockSize;
6602  uint32_t newBlockSizeShift = 0;
6603  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
6604 
6605  // Allocating blocks of other sizes is allowed only in default pools.
6606  // In custom pools block size is fixed.
6607  if(m_IsCustomPool == false)
6608  {
6609  // Allocate 1/8, 1/4, 1/2 as first blocks.
6610  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
6611  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
6612  {
6613  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6614  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= vkMemReq.size * 2)
6615  {
6616  newBlockSize = smallerNewBlockSize;
6617  ++newBlockSizeShift;
6618  }
6619  else
6620  {
6621  break;
6622  }
6623  }
6624  }
6625 
6626  size_t newBlockIndex = 0;
6627  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
6628  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
6629  if(m_IsCustomPool == false)
6630  {
6631  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
6632  {
6633  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6634  if(smallerNewBlockSize >= vkMemReq.size)
6635  {
6636  newBlockSize = smallerNewBlockSize;
6637  ++newBlockSizeShift;
6638  res = CreateBlock(newBlockSize, &newBlockIndex);
6639  }
6640  else
6641  {
6642  break;
6643  }
6644  }
6645  }
6646 
6647  if(res == VK_SUCCESS)
6648  {
6649  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
6650  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
6651 
6652  if(mapped)
6653  {
6654  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
6655  if(res != VK_SUCCESS)
6656  {
6657  return res;
6658  }
6659  }
6660 
6661  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
6662  VmaAllocationRequest allocRequest;
6663  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
6664  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6665  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
6666  (*pAllocation)->InitBlockAllocation(
6667  hCurrentPool,
6668  pBlock,
6669  allocRequest.offset,
6670  vkMemReq.alignment,
6671  vkMemReq.size,
6672  suballocType,
6673  mapped,
6674  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6675  VMA_HEAVY_ASSERT(pBlock->Validate());
6676  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
6677  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6678  return VK_SUCCESS;
6679  }
6680  }
6681 
6682  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
6683 
6684  // 3. Try to allocate from existing blocks with making other allocations lost.
6685  if(canMakeOtherLost)
6686  {
6687  uint32_t tryIndex = 0;
6688  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
6689  {
6690  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
6691  VmaAllocationRequest bestRequest = {};
6692  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
6693 
6694  // 1. Search existing allocations.
6695  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6696  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6697  {
6698  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6699  VMA_ASSERT(pCurrBlock);
6700  VmaAllocationRequest currRequest = {};
6701  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6702  currentFrameIndex,
6703  m_FrameInUseCount,
6704  m_BufferImageGranularity,
6705  vkMemReq.size,
6706  vkMemReq.alignment,
6707  suballocType,
6708  canMakeOtherLost,
6709  &currRequest))
6710  {
6711  const VkDeviceSize currRequestCost = currRequest.CalcCost();
6712  if(pBestRequestBlock == VMA_NULL ||
6713  currRequestCost < bestRequestCost)
6714  {
6715  pBestRequestBlock = pCurrBlock;
6716  bestRequest = currRequest;
6717  bestRequestCost = currRequestCost;
6718 
6719  if(bestRequestCost == 0)
6720  {
6721  break;
6722  }
6723  }
6724  }
6725  }
6726 
6727  if(pBestRequestBlock != VMA_NULL)
6728  {
6729  if(mapped)
6730  {
6731  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
6732  if(res != VK_SUCCESS)
6733  {
6734  return res;
6735  }
6736  }
6737 
6738  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
6739  currentFrameIndex,
6740  m_FrameInUseCount,
6741  &bestRequest))
6742  {
6743  // We no longer have an empty Allocation.
6744  if(pBestRequestBlock->m_Metadata.IsEmpty())
6745  {
6746  m_HasEmptyBlock = false;
6747  }
6748  // Allocate from this pBlock.
6749  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6750  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
6751  (*pAllocation)->InitBlockAllocation(
6752  hCurrentPool,
6753  pBestRequestBlock,
6754  bestRequest.offset,
6755  vkMemReq.alignment,
6756  vkMemReq.size,
6757  suballocType,
6758  mapped,
6759  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6760  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
6761  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6762  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6763  return VK_SUCCESS;
6764  }
6765  // else: Some allocations must have been touched while we are here. Next try.
6766  }
6767  else
6768  {
6769  // Could not find place in any of the blocks - break outer loop.
6770  break;
6771  }
6772  }
6773  /* Maximum number of tries exceeded - a very unlike event when many other
6774  threads are simultaneously touching allocations making it impossible to make
6775  lost at the same time as we try to allocate. */
6776  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
6777  {
6778  return VK_ERROR_TOO_MANY_OBJECTS;
6779  }
6780  }
6781 
6782  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6783 }
6784 
6785 void VmaBlockVector::Free(
6786  VmaAllocation hAllocation)
6787 {
6788  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
6789 
6790  // Scope for lock.
6791  {
6792  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6793 
6794  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
6795 
6796  if(hAllocation->IsPersistentMap())
6797  {
6798  pBlock->Unmap(m_hAllocator, 1);
6799  }
6800 
6801  pBlock->m_Metadata.Free(hAllocation);
6802  VMA_HEAVY_ASSERT(pBlock->Validate());
6803 
6804  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
6805 
6806  // pBlock became empty after this deallocation.
6807  if(pBlock->m_Metadata.IsEmpty())
6808  {
6809  // Already has empty Allocation. We don't want to have two, so delete this one.
6810  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
6811  {
6812  pBlockToDelete = pBlock;
6813  Remove(pBlock);
6814  }
6815  // We now have first empty Allocation.
6816  else
6817  {
6818  m_HasEmptyBlock = true;
6819  }
6820  }
6821  // pBlock didn't become empty, but we have another empty block - find and free that one.
6822  // (This is optional, heuristics.)
6823  else if(m_HasEmptyBlock)
6824  {
6825  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6826  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6827  {
6828  pBlockToDelete = pLastBlock;
6829  m_Blocks.pop_back();
6830  m_HasEmptyBlock = false;
6831  }
6832  }
6833 
6834  IncrementallySortBlocks();
6835  }
6836 
6837  // Destruction of a free Allocation. Deferred until this point, outside of mutex
6838  // lock, for performance reason.
6839  if(pBlockToDelete != VMA_NULL)
6840  {
6841  VMA_DEBUG_LOG(" Deleted empty allocation");
6842  pBlockToDelete->Destroy(m_hAllocator);
6843  vma_delete(m_hAllocator, pBlockToDelete);
6844  }
6845 }
6846 
6847 size_t VmaBlockVector::CalcMaxBlockSize() const
6848 {
6849  size_t result = 0;
6850  for(size_t i = m_Blocks.size(); i--; )
6851  {
6852  result = VMA_MAX((uint64_t)result, (uint64_t)m_Blocks[i]->m_Metadata.GetSize());
6853  if(result >= m_PreferredBlockSize)
6854  {
6855  break;
6856  }
6857  }
6858  return result;
6859 }
6860 
6861 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
6862 {
6863  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6864  {
6865  if(m_Blocks[blockIndex] == pBlock)
6866  {
6867  VmaVectorRemove(m_Blocks, blockIndex);
6868  return;
6869  }
6870  }
6871  VMA_ASSERT(0);
6872 }
6873 
6874 void VmaBlockVector::IncrementallySortBlocks()
6875 {
6876  // Bubble sort only until first swap.
6877  for(size_t i = 1; i < m_Blocks.size(); ++i)
6878  {
6879  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
6880  {
6881  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
6882  return;
6883  }
6884  }
6885 }
6886 
6887 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
6888 {
6889  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6890  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
6891  allocInfo.allocationSize = blockSize;
6892  VkDeviceMemory mem = VK_NULL_HANDLE;
6893  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
6894  if(res < 0)
6895  {
6896  return res;
6897  }
6898 
6899  // New VkDeviceMemory successfully created.
6900 
6901  // Create new Allocation for it.
6902  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
6903  pBlock->Init(
6904  m_MemoryTypeIndex,
6905  mem,
6906  allocInfo.allocationSize);
6907 
6908  m_Blocks.push_back(pBlock);
6909  if(pNewBlockIndex != VMA_NULL)
6910  {
6911  *pNewBlockIndex = m_Blocks.size() - 1;
6912  }
6913 
6914  return VK_SUCCESS;
6915 }
6916 
6917 #if VMA_STATS_STRING_ENABLED
6918 
6919 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
6920 {
6921  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6922 
6923  json.BeginObject();
6924 
6925  if(m_IsCustomPool)
6926  {
6927  json.WriteString("MemoryTypeIndex");
6928  json.WriteNumber(m_MemoryTypeIndex);
6929 
6930  json.WriteString("BlockSize");
6931  json.WriteNumber(m_PreferredBlockSize);
6932 
6933  json.WriteString("BlockCount");
6934  json.BeginObject(true);
6935  if(m_MinBlockCount > 0)
6936  {
6937  json.WriteString("Min");
6938  json.WriteNumber((uint64_t)m_MinBlockCount);
6939  }
6940  if(m_MaxBlockCount < SIZE_MAX)
6941  {
6942  json.WriteString("Max");
6943  json.WriteNumber((uint64_t)m_MaxBlockCount);
6944  }
6945  json.WriteString("Cur");
6946  json.WriteNumber((uint64_t)m_Blocks.size());
6947  json.EndObject();
6948 
6949  if(m_FrameInUseCount > 0)
6950  {
6951  json.WriteString("FrameInUseCount");
6952  json.WriteNumber(m_FrameInUseCount);
6953  }
6954  }
6955  else
6956  {
6957  json.WriteString("PreferredBlockSize");
6958  json.WriteNumber(m_PreferredBlockSize);
6959  }
6960 
6961  json.WriteString("Blocks");
6962  json.BeginArray();
6963  for(size_t i = 0; i < m_Blocks.size(); ++i)
6964  {
6965  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
6966  }
6967  json.EndArray();
6968 
6969  json.EndObject();
6970 }
6971 
6972 #endif // #if VMA_STATS_STRING_ENABLED
6973 
6974 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
6975  VmaAllocator hAllocator,
6976  uint32_t currentFrameIndex)
6977 {
6978  if(m_pDefragmentator == VMA_NULL)
6979  {
6980  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
6981  hAllocator,
6982  this,
6983  currentFrameIndex);
6984  }
6985 
6986  return m_pDefragmentator;
6987 }
6988 
6989 VkResult VmaBlockVector::Defragment(
6990  VmaDefragmentationStats* pDefragmentationStats,
6991  VkDeviceSize& maxBytesToMove,
6992  uint32_t& maxAllocationsToMove)
6993 {
6994  if(m_pDefragmentator == VMA_NULL)
6995  {
6996  return VK_SUCCESS;
6997  }
6998 
6999  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
7000 
7001  // Defragment.
7002  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
7003 
7004  // Accumulate statistics.
7005  if(pDefragmentationStats != VMA_NULL)
7006  {
7007  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
7008  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
7009  pDefragmentationStats->bytesMoved += bytesMoved;
7010  pDefragmentationStats->allocationsMoved += allocationsMoved;
7011  VMA_ASSERT(bytesMoved <= maxBytesToMove);
7012  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
7013  maxBytesToMove -= bytesMoved;
7014  maxAllocationsToMove -= allocationsMoved;
7015  }
7016 
7017  // Free empty blocks.
7018  m_HasEmptyBlock = false;
7019  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
7020  {
7021  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
7022  if(pBlock->m_Metadata.IsEmpty())
7023  {
7024  if(m_Blocks.size() > m_MinBlockCount)
7025  {
7026  if(pDefragmentationStats != VMA_NULL)
7027  {
7028  ++pDefragmentationStats->deviceMemoryBlocksFreed;
7029  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
7030  }
7031 
7032  VmaVectorRemove(m_Blocks, blockIndex);
7033  pBlock->Destroy(m_hAllocator);
7034  vma_delete(m_hAllocator, pBlock);
7035  }
7036  else
7037  {
7038  m_HasEmptyBlock = true;
7039  }
7040  }
7041  }
7042 
7043  return result;
7044 }
7045 
7046 void VmaBlockVector::DestroyDefragmentator()
7047 {
7048  if(m_pDefragmentator != VMA_NULL)
7049  {
7050  vma_delete(m_hAllocator, m_pDefragmentator);
7051  m_pDefragmentator = VMA_NULL;
7052  }
7053 }
7054 
7055 void VmaBlockVector::MakePoolAllocationsLost(
7056  uint32_t currentFrameIndex,
7057  size_t* pLostAllocationCount)
7058 {
7059  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
7060  size_t lostAllocationCount = 0;
7061  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
7062  {
7063  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
7064  VMA_ASSERT(pBlock);
7065  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
7066  }
7067  if(pLostAllocationCount != VMA_NULL)
7068  {
7069  *pLostAllocationCount = lostAllocationCount;
7070  }
7071 }
7072 
7073 void VmaBlockVector::AddStats(VmaStats* pStats)
7074 {
7075  const uint32_t memTypeIndex = m_MemoryTypeIndex;
7076  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
7077 
7078  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
7079 
7080  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
7081  {
7082  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
7083  VMA_ASSERT(pBlock);
7084  VMA_HEAVY_ASSERT(pBlock->Validate());
7085  VmaStatInfo allocationStatInfo;
7086  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
7087  VmaAddStatInfo(pStats->total, allocationStatInfo);
7088  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7089  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7090  }
7091 }
7092 
7094 // VmaDefragmentator members definition
7095 
7096 VmaDefragmentator::VmaDefragmentator(
7097  VmaAllocator hAllocator,
7098  VmaBlockVector* pBlockVector,
7099  uint32_t currentFrameIndex) :
7100  m_hAllocator(hAllocator),
7101  m_pBlockVector(pBlockVector),
7102  m_CurrentFrameIndex(currentFrameIndex),
7103  m_BytesMoved(0),
7104  m_AllocationsMoved(0),
7105  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
7106  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
7107 {
7108 }
7109 
7110 VmaDefragmentator::~VmaDefragmentator()
7111 {
7112  for(size_t i = m_Blocks.size(); i--; )
7113  {
7114  vma_delete(m_hAllocator, m_Blocks[i]);
7115  }
7116 }
7117 
7118 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
7119 {
7120  AllocationInfo allocInfo;
7121  allocInfo.m_hAllocation = hAlloc;
7122  allocInfo.m_pChanged = pChanged;
7123  m_Allocations.push_back(allocInfo);
7124 }
7125 
7126 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
7127 {
7128  // It has already been mapped for defragmentation.
7129  if(m_pMappedDataForDefragmentation)
7130  {
7131  *ppMappedData = m_pMappedDataForDefragmentation;
7132  return VK_SUCCESS;
7133  }
7134 
7135  // It is originally mapped.
7136  if(m_pBlock->GetMappedData())
7137  {
7138  *ppMappedData = m_pBlock->GetMappedData();
7139  return VK_SUCCESS;
7140  }
7141 
7142  // Map on first usage.
7143  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
7144  *ppMappedData = m_pMappedDataForDefragmentation;
7145  return res;
7146 }
7147 
7148 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
7149 {
7150  if(m_pMappedDataForDefragmentation != VMA_NULL)
7151  {
7152  m_pBlock->Unmap(hAllocator, 1);
7153  }
7154 }
7155 
7156 VkResult VmaDefragmentator::DefragmentRound(
7157  VkDeviceSize maxBytesToMove,
7158  uint32_t maxAllocationsToMove)
7159 {
7160  if(m_Blocks.empty())
7161  {
7162  return VK_SUCCESS;
7163  }
7164 
7165  size_t srcBlockIndex = m_Blocks.size() - 1;
7166  size_t srcAllocIndex = SIZE_MAX;
7167  for(;;)
7168  {
7169  // 1. Find next allocation to move.
7170  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
7171  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
7172  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
7173  {
7174  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
7175  {
7176  // Finished: no more allocations to process.
7177  if(srcBlockIndex == 0)
7178  {
7179  return VK_SUCCESS;
7180  }
7181  else
7182  {
7183  --srcBlockIndex;
7184  srcAllocIndex = SIZE_MAX;
7185  }
7186  }
7187  else
7188  {
7189  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
7190  }
7191  }
7192 
7193  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
7194  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
7195 
7196  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
7197  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
7198  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
7199  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
7200 
7201  // 2. Try to find new place for this allocation in preceding or current block.
7202  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
7203  {
7204  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
7205  VmaAllocationRequest dstAllocRequest;
7206  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
7207  m_CurrentFrameIndex,
7208  m_pBlockVector->GetFrameInUseCount(),
7209  m_pBlockVector->GetBufferImageGranularity(),
7210  size,
7211  alignment,
7212  suballocType,
7213  false, // canMakeOtherLost
7214  &dstAllocRequest) &&
7215  MoveMakesSense(
7216  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
7217  {
7218  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
7219 
7220  // Reached limit on number of allocations or bytes to move.
7221  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
7222  (m_BytesMoved + size > maxBytesToMove))
7223  {
7224  return VK_INCOMPLETE;
7225  }
7226 
7227  void* pDstMappedData = VMA_NULL;
7228  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
7229  if(res != VK_SUCCESS)
7230  {
7231  return res;
7232  }
7233 
7234  void* pSrcMappedData = VMA_NULL;
7235  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
7236  if(res != VK_SUCCESS)
7237  {
7238  return res;
7239  }
7240 
7241  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
7242  memcpy(
7243  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
7244  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
7245  static_cast<size_t>(size));
7246 
7247  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
7248  pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);
7249 
7250  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
7251 
7252  if(allocInfo.m_pChanged != VMA_NULL)
7253  {
7254  *allocInfo.m_pChanged = VK_TRUE;
7255  }
7256 
7257  ++m_AllocationsMoved;
7258  m_BytesMoved += size;
7259 
7260  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
7261 
7262  break;
7263  }
7264  }
7265 
7266  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
7267 
7268  if(srcAllocIndex > 0)
7269  {
7270  --srcAllocIndex;
7271  }
7272  else
7273  {
7274  if(srcBlockIndex > 0)
7275  {
7276  --srcBlockIndex;
7277  srcAllocIndex = SIZE_MAX;
7278  }
7279  else
7280  {
7281  return VK_SUCCESS;
7282  }
7283  }
7284  }
7285 }
7286 
7287 VkResult VmaDefragmentator::Defragment(
7288  VkDeviceSize maxBytesToMove,
7289  uint32_t maxAllocationsToMove)
7290 {
7291  if(m_Allocations.empty())
7292  {
7293  return VK_SUCCESS;
7294  }
7295 
7296  // Create block info for each block.
7297  const size_t blockCount = m_pBlockVector->m_Blocks.size();
7298  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7299  {
7300  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
7301  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
7302  m_Blocks.push_back(pBlockInfo);
7303  }
7304 
7305  // Sort them by m_pBlock pointer value.
7306  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
7307 
7308  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
7309  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
7310  {
7311  AllocationInfo& allocInfo = m_Allocations[blockIndex];
7312  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
7313  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7314  {
7315  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
7316  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
7317  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
7318  {
7319  (*it)->m_Allocations.push_back(allocInfo);
7320  }
7321  else
7322  {
7323  VMA_ASSERT(0);
7324  }
7325  }
7326  }
7327  m_Allocations.clear();
7328 
7329  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7330  {
7331  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
7332  pBlockInfo->CalcHasNonMovableAllocations();
7333  pBlockInfo->SortAllocationsBySizeDescecnding();
7334  }
7335 
7336  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
7337  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
7338 
7339  // Execute defragmentation rounds (the main part).
7340  VkResult result = VK_SUCCESS;
7341  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
7342  {
7343  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
7344  }
7345 
7346  // Unmap blocks that were mapped for defragmentation.
7347  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7348  {
7349  m_Blocks[blockIndex]->Unmap(m_hAllocator);
7350  }
7351 
7352  return result;
7353 }
7354 
7355 bool VmaDefragmentator::MoveMakesSense(
7356  size_t dstBlockIndex, VkDeviceSize dstOffset,
7357  size_t srcBlockIndex, VkDeviceSize srcOffset)
7358 {
7359  if(dstBlockIndex < srcBlockIndex)
7360  {
7361  return true;
7362  }
7363  if(dstBlockIndex > srcBlockIndex)
7364  {
7365  return false;
7366  }
7367  if(dstOffset < srcOffset)
7368  {
7369  return true;
7370  }
7371  return false;
7372 }
7373 
7375 // VmaAllocator_T
7376 
7377 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
7378  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
7379  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
7380  m_hDevice(pCreateInfo->device),
7381  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
7382  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
7383  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
7384  m_PreferredLargeHeapBlockSize(0),
7385  m_PhysicalDevice(pCreateInfo->physicalDevice),
7386  m_CurrentFrameIndex(0),
7387  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
7388 {
7389  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
7390 
7391  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
7392  memset(&m_MemProps, 0, sizeof(m_MemProps));
7393  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
7394 
7395  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
7396  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
7397 
7398  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7399  {
7400  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
7401  }
7402 
7403  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
7404  {
7405  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
7406  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
7407  }
7408 
7409  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
7410 
7411  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
7412  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
7413 
7414  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
7415  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
7416 
7417  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
7418  {
7419  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
7420  {
7421  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
7422  if(limit != VK_WHOLE_SIZE)
7423  {
7424  m_HeapSizeLimit[heapIndex] = limit;
7425  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
7426  {
7427  m_MemProps.memoryHeaps[heapIndex].size = limit;
7428  }
7429  }
7430  }
7431  }
7432 
7433  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7434  {
7435  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
7436 
7437  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
7438  this,
7439  memTypeIndex,
7440  preferredBlockSize,
7441  0,
7442  SIZE_MAX,
7443  GetBufferImageGranularity(),
7444  pCreateInfo->frameInUseCount,
7445  false); // isCustomPool
7446  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
7447  // becase minBlockCount is 0.
7448  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
7449  }
7450 }
7451 
7452 VmaAllocator_T::~VmaAllocator_T()
7453 {
7454  VMA_ASSERT(m_Pools.empty());
7455 
7456  for(size_t i = GetMemoryTypeCount(); i--; )
7457  {
7458  vma_delete(this, m_pDedicatedAllocations[i]);
7459  vma_delete(this, m_pBlockVectors[i]);
7460  }
7461 }
7462 
7463 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
7464 {
7465 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7466  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
7467  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
7468  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
7469  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
7470  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
7471  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
7472  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
7473  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
7474  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
7475  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
7476  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
7477  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
7478  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
7479  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
7480  if(m_UseKhrDedicatedAllocation)
7481  {
7482  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
7483  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
7484  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
7485  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
7486  }
7487 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7488 
7489 #define VMA_COPY_IF_NOT_NULL(funcName) \
7490  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
7491 
7492  if(pVulkanFunctions != VMA_NULL)
7493  {
7494  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
7495  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
7496  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
7497  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
7498  VMA_COPY_IF_NOT_NULL(vkMapMemory);
7499  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
7500  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
7501  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
7502  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
7503  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
7504  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
7505  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
7506  VMA_COPY_IF_NOT_NULL(vkCreateImage);
7507  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
7508  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
7509  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
7510  }
7511 
7512 #undef VMA_COPY_IF_NOT_NULL
7513 
7514  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
7515  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
7516  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
7517  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
7518  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
7519  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
7520  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
7521  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
7522  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
7523  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
7524  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
7525  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
7526  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
7527  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
7528  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
7529  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
7530  if(m_UseKhrDedicatedAllocation)
7531  {
7532  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
7533  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
7534  }
7535 }
7536 
7537 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
7538 {
7539  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7540  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
7541  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
7542  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
7543 }
7544 
7545 VkResult VmaAllocator_T::AllocateMemoryOfType(
7546  const VkMemoryRequirements& vkMemReq,
7547  bool dedicatedAllocation,
7548  VkBuffer dedicatedBuffer,
7549  VkImage dedicatedImage,
7550  const VmaAllocationCreateInfo& createInfo,
7551  uint32_t memTypeIndex,
7552  VmaSuballocationType suballocType,
7553  VmaAllocation* pAllocation)
7554 {
7555  VMA_ASSERT(pAllocation != VMA_NULL);
7556  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
7557 
7558  VmaAllocationCreateInfo finalCreateInfo = createInfo;
7559 
7560  // If memory type is not HOST_VISIBLE, disable MAPPED.
7561  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7562  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
7563  {
7564  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
7565  }
7566 
7567  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
7568  VMA_ASSERT(blockVector);
7569 
7570  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
7571  bool preferDedicatedMemory =
7572  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
7573  dedicatedAllocation ||
7574  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
7575  vkMemReq.size > preferredBlockSize / 2;
7576 
7577  if(preferDedicatedMemory &&
7578  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
7579  finalCreateInfo.pool == VK_NULL_HANDLE)
7580  {
7582  }
7583 
7584  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
7585  {
7586  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7587  {
7588  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7589  }
7590  else
7591  {
7592  return AllocateDedicatedMemory(
7593  vkMemReq.size,
7594  suballocType,
7595  memTypeIndex,
7596  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7597  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7598  finalCreateInfo.pUserData,
7599  dedicatedBuffer,
7600  dedicatedImage,
7601  pAllocation);
7602  }
7603  }
7604  else
7605  {
7606  VkResult res = blockVector->Allocate(
7607  VK_NULL_HANDLE, // hCurrentPool
7608  m_CurrentFrameIndex.load(),
7609  vkMemReq,
7610  finalCreateInfo,
7611  suballocType,
7612  pAllocation);
7613  if(res == VK_SUCCESS)
7614  {
7615  return res;
7616  }
7617 
7618  // 5. Try dedicated memory.
7619  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7620  {
7621  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7622  }
7623  else
7624  {
7625  res = AllocateDedicatedMemory(
7626  vkMemReq.size,
7627  suballocType,
7628  memTypeIndex,
7629  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7630  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7631  finalCreateInfo.pUserData,
7632  dedicatedBuffer,
7633  dedicatedImage,
7634  pAllocation);
7635  if(res == VK_SUCCESS)
7636  {
7637  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
7638  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
7639  return VK_SUCCESS;
7640  }
7641  else
7642  {
7643  // Everything failed: Return error code.
7644  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7645  return res;
7646  }
7647  }
7648  }
7649 }
7650 
7651 VkResult VmaAllocator_T::AllocateDedicatedMemory(
7652  VkDeviceSize size,
7653  VmaSuballocationType suballocType,
7654  uint32_t memTypeIndex,
7655  bool map,
7656  bool isUserDataString,
7657  void* pUserData,
7658  VkBuffer dedicatedBuffer,
7659  VkImage dedicatedImage,
7660  VmaAllocation* pAllocation)
7661 {
7662  VMA_ASSERT(pAllocation);
7663 
7664  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
7665  allocInfo.memoryTypeIndex = memTypeIndex;
7666  allocInfo.allocationSize = size;
7667 
7668  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
7669  if(m_UseKhrDedicatedAllocation)
7670  {
7671  if(dedicatedBuffer != VK_NULL_HANDLE)
7672  {
7673  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
7674  dedicatedAllocInfo.buffer = dedicatedBuffer;
7675  allocInfo.pNext = &dedicatedAllocInfo;
7676  }
7677  else if(dedicatedImage != VK_NULL_HANDLE)
7678  {
7679  dedicatedAllocInfo.image = dedicatedImage;
7680  allocInfo.pNext = &dedicatedAllocInfo;
7681  }
7682  }
7683 
7684  // Allocate VkDeviceMemory.
7685  VkDeviceMemory hMemory = VK_NULL_HANDLE;
7686  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
7687  if(res < 0)
7688  {
7689  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7690  return res;
7691  }
7692 
7693  void* pMappedData = VMA_NULL;
7694  if(map)
7695  {
7696  res = (*m_VulkanFunctions.vkMapMemory)(
7697  m_hDevice,
7698  hMemory,
7699  0,
7700  VK_WHOLE_SIZE,
7701  0,
7702  &pMappedData);
7703  if(res < 0)
7704  {
7705  VMA_DEBUG_LOG(" vkMapMemory FAILED");
7706  FreeVulkanMemory(memTypeIndex, size, hMemory);
7707  return res;
7708  }
7709  }
7710 
7711  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
7712  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
7713  (*pAllocation)->SetUserData(this, pUserData);
7714 
7715  // Register it in m_pDedicatedAllocations.
7716  {
7717  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7718  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7719  VMA_ASSERT(pDedicatedAllocations);
7720  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
7721  }
7722 
7723  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
7724 
7725  return VK_SUCCESS;
7726 }
7727 
7728 void VmaAllocator_T::GetBufferMemoryRequirements(
7729  VkBuffer hBuffer,
7730  VkMemoryRequirements& memReq,
7731  bool& requiresDedicatedAllocation,
7732  bool& prefersDedicatedAllocation) const
7733 {
7734  if(m_UseKhrDedicatedAllocation)
7735  {
7736  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
7737  memReqInfo.buffer = hBuffer;
7738 
7739  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7740 
7741  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7742  memReq2.pNext = &memDedicatedReq;
7743 
7744  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7745 
7746  memReq = memReq2.memoryRequirements;
7747  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7748  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7749  }
7750  else
7751  {
7752  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
7753  requiresDedicatedAllocation = false;
7754  prefersDedicatedAllocation = false;
7755  }
7756 }
7757 
7758 void VmaAllocator_T::GetImageMemoryRequirements(
7759  VkImage hImage,
7760  VkMemoryRequirements& memReq,
7761  bool& requiresDedicatedAllocation,
7762  bool& prefersDedicatedAllocation) const
7763 {
7764  if(m_UseKhrDedicatedAllocation)
7765  {
7766  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
7767  memReqInfo.image = hImage;
7768 
7769  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7770 
7771  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7772  memReq2.pNext = &memDedicatedReq;
7773 
7774  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7775 
7776  memReq = memReq2.memoryRequirements;
7777  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7778  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7779  }
7780  else
7781  {
7782  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
7783  requiresDedicatedAllocation = false;
7784  prefersDedicatedAllocation = false;
7785  }
7786 }
7787 
7788 VkResult VmaAllocator_T::AllocateMemory(
7789  const VkMemoryRequirements& vkMemReq,
7790  bool requiresDedicatedAllocation,
7791  bool prefersDedicatedAllocation,
7792  VkBuffer dedicatedBuffer,
7793  VkImage dedicatedImage,
7794  const VmaAllocationCreateInfo& createInfo,
7795  VmaSuballocationType suballocType,
7796  VmaAllocation* pAllocation)
7797 {
7798  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
7799  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7800  {
7801  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
7802  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7803  }
7804  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7806  {
7807  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
7808  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7809  }
7810  if(requiresDedicatedAllocation)
7811  {
7812  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7813  {
7814  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
7815  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7816  }
7817  if(createInfo.pool != VK_NULL_HANDLE)
7818  {
7819  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
7820  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7821  }
7822  }
7823  if((createInfo.pool != VK_NULL_HANDLE) &&
7824  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
7825  {
7826  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
7827  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7828  }
7829 
7830  if(createInfo.pool != VK_NULL_HANDLE)
7831  {
7832  return createInfo.pool->m_BlockVector.Allocate(
7833  createInfo.pool,
7834  m_CurrentFrameIndex.load(),
7835  vkMemReq,
7836  createInfo,
7837  suballocType,
7838  pAllocation);
7839  }
7840  else
7841  {
7842  // Bit mask of memory Vulkan types acceptable for this allocation.
7843  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
7844  uint32_t memTypeIndex = UINT32_MAX;
7845  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7846  if(res == VK_SUCCESS)
7847  {
7848  res = AllocateMemoryOfType(
7849  vkMemReq,
7850  requiresDedicatedAllocation || prefersDedicatedAllocation,
7851  dedicatedBuffer,
7852  dedicatedImage,
7853  createInfo,
7854  memTypeIndex,
7855  suballocType,
7856  pAllocation);
7857  // Succeeded on first try.
7858  if(res == VK_SUCCESS)
7859  {
7860  return res;
7861  }
7862  // Allocation from this memory type failed. Try other compatible memory types.
7863  else
7864  {
7865  for(;;)
7866  {
7867  // Remove old memTypeIndex from list of possibilities.
7868  memoryTypeBits &= ~(1u << memTypeIndex);
7869  // Find alternative memTypeIndex.
7870  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7871  if(res == VK_SUCCESS)
7872  {
7873  res = AllocateMemoryOfType(
7874  vkMemReq,
7875  requiresDedicatedAllocation || prefersDedicatedAllocation,
7876  dedicatedBuffer,
7877  dedicatedImage,
7878  createInfo,
7879  memTypeIndex,
7880  suballocType,
7881  pAllocation);
7882  // Allocation from this alternative memory type succeeded.
7883  if(res == VK_SUCCESS)
7884  {
7885  return res;
7886  }
7887  // else: Allocation from this memory type failed. Try next one - next loop iteration.
7888  }
7889  // No other matching memory type index could be found.
7890  else
7891  {
7892  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
7893  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7894  }
7895  }
7896  }
7897  }
7898  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
7899  else
7900  return res;
7901  }
7902 }
7903 
7904 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
7905 {
7906  VMA_ASSERT(allocation);
7907 
7908  if(allocation->CanBecomeLost() == false ||
7909  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7910  {
7911  switch(allocation->GetType())
7912  {
7913  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7914  {
7915  VmaBlockVector* pBlockVector = VMA_NULL;
7916  VmaPool hPool = allocation->GetPool();
7917  if(hPool != VK_NULL_HANDLE)
7918  {
7919  pBlockVector = &hPool->m_BlockVector;
7920  }
7921  else
7922  {
7923  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7924  pBlockVector = m_pBlockVectors[memTypeIndex];
7925  }
7926  pBlockVector->Free(allocation);
7927  }
7928  break;
7929  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7930  FreeDedicatedMemory(allocation);
7931  break;
7932  default:
7933  VMA_ASSERT(0);
7934  }
7935  }
7936 
7937  allocation->SetUserData(this, VMA_NULL);
7938  vma_delete(this, allocation);
7939 }
7940 
7941 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
7942 {
7943  // Initialize.
7944  InitStatInfo(pStats->total);
7945  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
7946  InitStatInfo(pStats->memoryType[i]);
7947  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7948  InitStatInfo(pStats->memoryHeap[i]);
7949 
7950  // Process default pools.
7951  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7952  {
7953  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
7954  VMA_ASSERT(pBlockVector);
7955  pBlockVector->AddStats(pStats);
7956  }
7957 
7958  // Process custom pools.
7959  {
7960  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7961  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
7962  {
7963  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
7964  }
7965  }
7966 
7967  // Process dedicated allocations.
7968  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7969  {
7970  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7971  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7972  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7973  VMA_ASSERT(pDedicatedAllocVector);
7974  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
7975  {
7976  VmaStatInfo allocationStatInfo;
7977  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
7978  VmaAddStatInfo(pStats->total, allocationStatInfo);
7979  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7980  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7981  }
7982  }
7983 
7984  // Postprocess.
7985  VmaPostprocessCalcStatInfo(pStats->total);
7986  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
7987  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
7988  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
7989  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
7990 }
7991 
7992 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
7993 
7994 VkResult VmaAllocator_T::Defragment(
7995  VmaAllocation* pAllocations,
7996  size_t allocationCount,
7997  VkBool32* pAllocationsChanged,
7998  const VmaDefragmentationInfo* pDefragmentationInfo,
7999  VmaDefragmentationStats* pDefragmentationStats)
8000 {
8001  if(pAllocationsChanged != VMA_NULL)
8002  {
8003  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
8004  }
8005  if(pDefragmentationStats != VMA_NULL)
8006  {
8007  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
8008  }
8009 
8010  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
8011 
8012  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
8013 
8014  const size_t poolCount = m_Pools.size();
8015 
8016  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
8017  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
8018  {
8019  VmaAllocation hAlloc = pAllocations[allocIndex];
8020  VMA_ASSERT(hAlloc);
8021  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
8022  // DedicatedAlloc cannot be defragmented.
8023  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
8024  // Only HOST_VISIBLE memory types can be defragmented.
8025  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
8026  // Lost allocation cannot be defragmented.
8027  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
8028  {
8029  VmaBlockVector* pAllocBlockVector = VMA_NULL;
8030 
8031  const VmaPool hAllocPool = hAlloc->GetPool();
8032  // This allocation belongs to custom pool.
8033  if(hAllocPool != VK_NULL_HANDLE)
8034  {
8035  pAllocBlockVector = &hAllocPool->GetBlockVector();
8036  }
8037  // This allocation belongs to general pool.
8038  else
8039  {
8040  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
8041  }
8042 
8043  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
8044 
8045  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
8046  &pAllocationsChanged[allocIndex] : VMA_NULL;
8047  pDefragmentator->AddAllocation(hAlloc, pChanged);
8048  }
8049  }
8050 
8051  VkResult result = VK_SUCCESS;
8052 
8053  // ======== Main processing.
8054 
8055  VkDeviceSize maxBytesToMove = SIZE_MAX;
8056  uint32_t maxAllocationsToMove = UINT32_MAX;
8057  if(pDefragmentationInfo != VMA_NULL)
8058  {
8059  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
8060  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
8061  }
8062 
8063  // Process standard memory.
8064  for(uint32_t memTypeIndex = 0;
8065  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
8066  ++memTypeIndex)
8067  {
8068  // Only HOST_VISIBLE memory types can be defragmented.
8069  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8070  {
8071  result = m_pBlockVectors[memTypeIndex]->Defragment(
8072  pDefragmentationStats,
8073  maxBytesToMove,
8074  maxAllocationsToMove);
8075  }
8076  }
8077 
8078  // Process custom pools.
8079  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
8080  {
8081  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
8082  pDefragmentationStats,
8083  maxBytesToMove,
8084  maxAllocationsToMove);
8085  }
8086 
8087  // ======== Destroy defragmentators.
8088 
8089  // Process custom pools.
8090  for(size_t poolIndex = poolCount; poolIndex--; )
8091  {
8092  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
8093  }
8094 
8095  // Process standard memory.
8096  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
8097  {
8098  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8099  {
8100  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
8101  }
8102  }
8103 
8104  return result;
8105 }
8106 
8107 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
8108 {
8109  if(hAllocation->CanBecomeLost())
8110  {
8111  /*
8112  Warning: This is a carefully designed algorithm.
8113  Do not modify unless you really know what you're doing :)
8114  */
8115  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
8116  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
8117  for(;;)
8118  {
8119  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
8120  {
8121  pAllocationInfo->memoryType = UINT32_MAX;
8122  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
8123  pAllocationInfo->offset = 0;
8124  pAllocationInfo->size = hAllocation->GetSize();
8125  pAllocationInfo->pMappedData = VMA_NULL;
8126  pAllocationInfo->pUserData = hAllocation->GetUserData();
8127  return;
8128  }
8129  else if(localLastUseFrameIndex == localCurrFrameIndex)
8130  {
8131  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
8132  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
8133  pAllocationInfo->offset = hAllocation->GetOffset();
8134  pAllocationInfo->size = hAllocation->GetSize();
8135  pAllocationInfo->pMappedData = VMA_NULL;
8136  pAllocationInfo->pUserData = hAllocation->GetUserData();
8137  return;
8138  }
8139  else // Last use time earlier than current time.
8140  {
8141  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
8142  {
8143  localLastUseFrameIndex = localCurrFrameIndex;
8144  }
8145  }
8146  }
8147  }
8148  else
8149  {
8150  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
8151  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
8152  pAllocationInfo->offset = hAllocation->GetOffset();
8153  pAllocationInfo->size = hAllocation->GetSize();
8154  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
8155  pAllocationInfo->pUserData = hAllocation->GetUserData();
8156  }
8157 }
8158 
8159 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
8160 {
8161  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
8162  if(hAllocation->CanBecomeLost())
8163  {
8164  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
8165  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
8166  for(;;)
8167  {
8168  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
8169  {
8170  return false;
8171  }
8172  else if(localLastUseFrameIndex == localCurrFrameIndex)
8173  {
8174  return true;
8175  }
8176  else // Last use time earlier than current time.
8177  {
8178  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
8179  {
8180  localLastUseFrameIndex = localCurrFrameIndex;
8181  }
8182  }
8183  }
8184  }
8185  else
8186  {
8187  return true;
8188  }
8189 }
8190 
8191 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
8192 {
8193  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
8194 
8195  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
8196 
8197  if(newCreateInfo.maxBlockCount == 0)
8198  {
8199  newCreateInfo.maxBlockCount = SIZE_MAX;
8200  }
8201  if(newCreateInfo.blockSize == 0)
8202  {
8203  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
8204  }
8205 
8206  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
8207 
8208  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
8209  if(res != VK_SUCCESS)
8210  {
8211  vma_delete(this, *pPool);
8212  *pPool = VMA_NULL;
8213  return res;
8214  }
8215 
8216  // Add to m_Pools.
8217  {
8218  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8219  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
8220  }
8221 
8222  return VK_SUCCESS;
8223 }
8224 
8225 void VmaAllocator_T::DestroyPool(VmaPool pool)
8226 {
8227  // Remove from m_Pools.
8228  {
8229  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8230  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
8231  VMA_ASSERT(success && "Pool not found in Allocator.");
8232  }
8233 
8234  vma_delete(this, pool);
8235 }
8236 
8237 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
8238 {
8239  pool->m_BlockVector.GetPoolStats(pPoolStats);
8240 }
8241 
8242 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
8243 {
8244  m_CurrentFrameIndex.store(frameIndex);
8245 }
8246 
8247 void VmaAllocator_T::MakePoolAllocationsLost(
8248  VmaPool hPool,
8249  size_t* pLostAllocationCount)
8250 {
8251  hPool->m_BlockVector.MakePoolAllocationsLost(
8252  m_CurrentFrameIndex.load(),
8253  pLostAllocationCount);
8254 }
8255 
8256 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
8257 {
8258  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
8259  (*pAllocation)->InitLost();
8260 }
8261 
8262 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
8263 {
8264  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
8265 
8266  VkResult res;
8267  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8268  {
8269  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8270  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
8271  {
8272  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8273  if(res == VK_SUCCESS)
8274  {
8275  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
8276  }
8277  }
8278  else
8279  {
8280  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
8281  }
8282  }
8283  else
8284  {
8285  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8286  }
8287 
8288  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
8289  {
8290  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
8291  }
8292 
8293  return res;
8294 }
8295 
8296 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
8297 {
8298  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
8299  {
8300  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
8301  }
8302 
8303  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
8304 
8305  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
8306  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8307  {
8308  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8309  m_HeapSizeLimit[heapIndex] += size;
8310  }
8311 }
8312 
8313 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
8314 {
8315  if(hAllocation->CanBecomeLost())
8316  {
8317  return VK_ERROR_MEMORY_MAP_FAILED;
8318  }
8319 
8320  switch(hAllocation->GetType())
8321  {
8322  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8323  {
8324  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8325  char *pBytes = VMA_NULL;
8326  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
8327  if(res == VK_SUCCESS)
8328  {
8329  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
8330  hAllocation->BlockAllocMap();
8331  }
8332  return res;
8333  }
8334  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8335  return hAllocation->DedicatedAllocMap(this, ppData);
8336  default:
8337  VMA_ASSERT(0);
8338  return VK_ERROR_MEMORY_MAP_FAILED;
8339  }
8340 }
8341 
8342 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
8343 {
8344  switch(hAllocation->GetType())
8345  {
8346  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8347  {
8348  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8349  hAllocation->BlockAllocUnmap();
8350  pBlock->Unmap(this, 1);
8351  }
8352  break;
8353  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8354  hAllocation->DedicatedAllocUnmap(this);
8355  break;
8356  default:
8357  VMA_ASSERT(0);
8358  }
8359 }
8360 
8361 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
8362 {
8363  VkResult res = VK_SUCCESS;
8364  switch(hAllocation->GetType())
8365  {
8366  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8367  res = GetVulkanFunctions().vkBindBufferMemory(
8368  m_hDevice,
8369  hBuffer,
8370  hAllocation->GetMemory(),
8371  0); //memoryOffset
8372  break;
8373  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8374  {
8375  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
8376  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
8377  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
8378  break;
8379  }
8380  default:
8381  VMA_ASSERT(0);
8382  }
8383  return res;
8384 }
8385 
8386 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
8387 {
8388  VkResult res = VK_SUCCESS;
8389  switch(hAllocation->GetType())
8390  {
8391  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8392  res = GetVulkanFunctions().vkBindImageMemory(
8393  m_hDevice,
8394  hImage,
8395  hAllocation->GetMemory(),
8396  0); //memoryOffset
8397  break;
8398  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8399  {
8400  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
8401  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
8402  res = pBlock->BindImageMemory(this, hAllocation, hImage);
8403  break;
8404  }
8405  default:
8406  VMA_ASSERT(0);
8407  }
8408  return res;
8409 }
8410 
8411 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
8412 {
8413  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
8414 
8415  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
8416  {
8417  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8418  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
8419  VMA_ASSERT(pDedicatedAllocations);
8420  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
8421  VMA_ASSERT(success);
8422  }
8423 
8424  VkDeviceMemory hMemory = allocation->GetMemory();
8425 
8426  if(allocation->GetMappedData() != VMA_NULL)
8427  {
8428  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
8429  }
8430 
8431  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
8432 
8433  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
8434 }
8435 
8436 #if VMA_STATS_STRING_ENABLED
8437 
8438 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
8439 {
8440  bool dedicatedAllocationsStarted = false;
8441  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8442  {
8443  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8444  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
8445  VMA_ASSERT(pDedicatedAllocVector);
8446  if(pDedicatedAllocVector->empty() == false)
8447  {
8448  if(dedicatedAllocationsStarted == false)
8449  {
8450  dedicatedAllocationsStarted = true;
8451  json.WriteString("DedicatedAllocations");
8452  json.BeginObject();
8453  }
8454 
8455  json.BeginString("Type ");
8456  json.ContinueString(memTypeIndex);
8457  json.EndString();
8458 
8459  json.BeginArray();
8460 
8461  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
8462  {
8463  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
8464  json.BeginObject(true);
8465 
8466  json.WriteString("Type");
8467  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
8468 
8469  json.WriteString("Size");
8470  json.WriteNumber(hAlloc->GetSize());
8471 
8472  const void* pUserData = hAlloc->GetUserData();
8473  if(pUserData != VMA_NULL)
8474  {
8475  json.WriteString("UserData");
8476  if(hAlloc->IsUserDataString())
8477  {
8478  json.WriteString((const char*)pUserData);
8479  }
8480  else
8481  {
8482  json.BeginString();
8483  json.ContinueString_Pointer(pUserData);
8484  json.EndString();
8485  }
8486  }
8487 
8488  json.EndObject();
8489  }
8490 
8491  json.EndArray();
8492  }
8493  }
8494  if(dedicatedAllocationsStarted)
8495  {
8496  json.EndObject();
8497  }
8498 
8499  {
8500  bool allocationsStarted = false;
8501  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8502  {
8503  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
8504  {
8505  if(allocationsStarted == false)
8506  {
8507  allocationsStarted = true;
8508  json.WriteString("DefaultPools");
8509  json.BeginObject();
8510  }
8511 
8512  json.BeginString("Type ");
8513  json.ContinueString(memTypeIndex);
8514  json.EndString();
8515 
8516  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
8517  }
8518  }
8519  if(allocationsStarted)
8520  {
8521  json.EndObject();
8522  }
8523  }
8524 
8525  {
8526  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8527  const size_t poolCount = m_Pools.size();
8528  if(poolCount > 0)
8529  {
8530  json.WriteString("Pools");
8531  json.BeginArray();
8532  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
8533  {
8534  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
8535  }
8536  json.EndArray();
8537  }
8538  }
8539 }
8540 
8541 #endif // #if VMA_STATS_STRING_ENABLED
8542 
8543 static VkResult AllocateMemoryForImage(
8544  VmaAllocator allocator,
8545  VkImage image,
8546  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8547  VmaSuballocationType suballocType,
8548  VmaAllocation* pAllocation)
8549 {
8550  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
8551 
8552  VkMemoryRequirements vkMemReq = {};
8553  bool requiresDedicatedAllocation = false;
8554  bool prefersDedicatedAllocation = false;
8555  allocator->GetImageMemoryRequirements(image, vkMemReq,
8556  requiresDedicatedAllocation, prefersDedicatedAllocation);
8557 
8558  return allocator->AllocateMemory(
8559  vkMemReq,
8560  requiresDedicatedAllocation,
8561  prefersDedicatedAllocation,
8562  VK_NULL_HANDLE, // dedicatedBuffer
8563  image, // dedicatedImage
8564  *pAllocationCreateInfo,
8565  suballocType,
8566  pAllocation);
8567 }
8568 
8570 // Public interface
8571 
8572 VkResult vmaCreateAllocator(
8573  const VmaAllocatorCreateInfo* pCreateInfo,
8574  VmaAllocator* pAllocator)
8575 {
8576  VMA_ASSERT(pCreateInfo && pAllocator);
8577  VMA_DEBUG_LOG("vmaCreateAllocator");
8578  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
8579  return VK_SUCCESS;
8580 }
8581 
8582 void vmaDestroyAllocator(
8583  VmaAllocator allocator)
8584 {
8585  if(allocator != VK_NULL_HANDLE)
8586  {
8587  VMA_DEBUG_LOG("vmaDestroyAllocator");
8588  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
8589  vma_delete(&allocationCallbacks, allocator);
8590  }
8591 }
8592 
8594  VmaAllocator allocator,
8595  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
8596 {
8597  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
8598  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
8599 }
8600 
8602  VmaAllocator allocator,
8603  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
8604 {
8605  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
8606  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
8607 }
8608 
8610  VmaAllocator allocator,
8611  uint32_t memoryTypeIndex,
8612  VkMemoryPropertyFlags* pFlags)
8613 {
8614  VMA_ASSERT(allocator && pFlags);
8615  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
8616  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
8617 }
8618 
8620  VmaAllocator allocator,
8621  uint32_t frameIndex)
8622 {
8623  VMA_ASSERT(allocator);
8624  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
8625 
8626  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8627 
8628  allocator->SetCurrentFrameIndex(frameIndex);
8629 }
8630 
8631 void vmaCalculateStats(
8632  VmaAllocator allocator,
8633  VmaStats* pStats)
8634 {
8635  VMA_ASSERT(allocator && pStats);
8636  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8637  allocator->CalculateStats(pStats);
8638 }
8639 
8640 #if VMA_STATS_STRING_ENABLED
8641 
8642 void vmaBuildStatsString(
8643  VmaAllocator allocator,
8644  char** ppStatsString,
8645  VkBool32 detailedMap)
8646 {
8647  VMA_ASSERT(allocator && ppStatsString);
8648  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8649 
8650  VmaStringBuilder sb(allocator);
8651  {
8652  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
8653  json.BeginObject();
8654 
8655  VmaStats stats;
8656  allocator->CalculateStats(&stats);
8657 
8658  json.WriteString("Total");
8659  VmaPrintStatInfo(json, stats.total);
8660 
8661  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
8662  {
8663  json.BeginString("Heap ");
8664  json.ContinueString(heapIndex);
8665  json.EndString();
8666  json.BeginObject();
8667 
8668  json.WriteString("Size");
8669  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
8670 
8671  json.WriteString("Flags");
8672  json.BeginArray(true);
8673  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
8674  {
8675  json.WriteString("DEVICE_LOCAL");
8676  }
8677  json.EndArray();
8678 
8679  if(stats.memoryHeap[heapIndex].blockCount > 0)
8680  {
8681  json.WriteString("Stats");
8682  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
8683  }
8684 
8685  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
8686  {
8687  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
8688  {
8689  json.BeginString("Type ");
8690  json.ContinueString(typeIndex);
8691  json.EndString();
8692 
8693  json.BeginObject();
8694 
8695  json.WriteString("Flags");
8696  json.BeginArray(true);
8697  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
8698  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
8699  {
8700  json.WriteString("DEVICE_LOCAL");
8701  }
8702  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8703  {
8704  json.WriteString("HOST_VISIBLE");
8705  }
8706  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
8707  {
8708  json.WriteString("HOST_COHERENT");
8709  }
8710  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
8711  {
8712  json.WriteString("HOST_CACHED");
8713  }
8714  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
8715  {
8716  json.WriteString("LAZILY_ALLOCATED");
8717  }
8718  json.EndArray();
8719 
8720  if(stats.memoryType[typeIndex].blockCount > 0)
8721  {
8722  json.WriteString("Stats");
8723  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
8724  }
8725 
8726  json.EndObject();
8727  }
8728  }
8729 
8730  json.EndObject();
8731  }
8732  if(detailedMap == VK_TRUE)
8733  {
8734  allocator->PrintDetailedMap(json);
8735  }
8736 
8737  json.EndObject();
8738  }
8739 
8740  const size_t len = sb.GetLength();
8741  char* const pChars = vma_new_array(allocator, char, len + 1);
8742  if(len > 0)
8743  {
8744  memcpy(pChars, sb.GetData(), len);
8745  }
8746  pChars[len] = '\0';
8747  *ppStatsString = pChars;
8748 }
8749 
8750 void vmaFreeStatsString(
8751  VmaAllocator allocator,
8752  char* pStatsString)
8753 {
8754  if(pStatsString != VMA_NULL)
8755  {
8756  VMA_ASSERT(allocator);
8757  size_t len = strlen(pStatsString);
8758  vma_delete_array(allocator, pStatsString, len + 1);
8759  }
8760 }
8761 
8762 #endif // #if VMA_STATS_STRING_ENABLED
8763 
8764 /*
8765 This function is not protected by any mutex because it just reads immutable data.
8766 */
8767 VkResult vmaFindMemoryTypeIndex(
8768  VmaAllocator allocator,
8769  uint32_t memoryTypeBits,
8770  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8771  uint32_t* pMemoryTypeIndex)
8772 {
8773  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8774  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8775  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8776 
8777  if(pAllocationCreateInfo->memoryTypeBits != 0)
8778  {
8779  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
8780  }
8781 
8782  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
8783  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
8784 
8785  // Convert usage to requiredFlags and preferredFlags.
8786  switch(pAllocationCreateInfo->usage)
8787  {
8789  break;
8791  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8792  break;
8794  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
8795  break;
8797  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8798  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8799  break;
8801  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8802  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
8803  break;
8804  default:
8805  break;
8806  }
8807 
8808  *pMemoryTypeIndex = UINT32_MAX;
8809  uint32_t minCost = UINT32_MAX;
8810  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
8811  memTypeIndex < allocator->GetMemoryTypeCount();
8812  ++memTypeIndex, memTypeBit <<= 1)
8813  {
8814  // This memory type is acceptable according to memoryTypeBits bitmask.
8815  if((memTypeBit & memoryTypeBits) != 0)
8816  {
8817  const VkMemoryPropertyFlags currFlags =
8818  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
8819  // This memory type contains requiredFlags.
8820  if((requiredFlags & ~currFlags) == 0)
8821  {
8822  // Calculate cost as number of bits from preferredFlags not present in this memory type.
8823  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
8824  // Remember memory type with lowest cost.
8825  if(currCost < minCost)
8826  {
8827  *pMemoryTypeIndex = memTypeIndex;
8828  if(currCost == 0)
8829  {
8830  return VK_SUCCESS;
8831  }
8832  minCost = currCost;
8833  }
8834  }
8835  }
8836  }
8837  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
8838 }
8839 
8841  VmaAllocator allocator,
8842  const VkBufferCreateInfo* pBufferCreateInfo,
8843  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8844  uint32_t* pMemoryTypeIndex)
8845 {
8846  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8847  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
8848  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8849  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8850 
8851  const VkDevice hDev = allocator->m_hDevice;
8852  VkBuffer hBuffer = VK_NULL_HANDLE;
8853  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
8854  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
8855  if(res == VK_SUCCESS)
8856  {
8857  VkMemoryRequirements memReq = {};
8858  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
8859  hDev, hBuffer, &memReq);
8860 
8861  res = vmaFindMemoryTypeIndex(
8862  allocator,
8863  memReq.memoryTypeBits,
8864  pAllocationCreateInfo,
8865  pMemoryTypeIndex);
8866 
8867  allocator->GetVulkanFunctions().vkDestroyBuffer(
8868  hDev, hBuffer, allocator->GetAllocationCallbacks());
8869  }
8870  return res;
8871 }
8872 
8874  VmaAllocator allocator,
8875  const VkImageCreateInfo* pImageCreateInfo,
8876  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8877  uint32_t* pMemoryTypeIndex)
8878 {
8879  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8880  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
8881  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8882  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8883 
8884  const VkDevice hDev = allocator->m_hDevice;
8885  VkImage hImage = VK_NULL_HANDLE;
8886  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
8887  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
8888  if(res == VK_SUCCESS)
8889  {
8890  VkMemoryRequirements memReq = {};
8891  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
8892  hDev, hImage, &memReq);
8893 
8894  res = vmaFindMemoryTypeIndex(
8895  allocator,
8896  memReq.memoryTypeBits,
8897  pAllocationCreateInfo,
8898  pMemoryTypeIndex);
8899 
8900  allocator->GetVulkanFunctions().vkDestroyImage(
8901  hDev, hImage, allocator->GetAllocationCallbacks());
8902  }
8903  return res;
8904 }
8905 
8906 VkResult vmaCreatePool(
8907  VmaAllocator allocator,
8908  const VmaPoolCreateInfo* pCreateInfo,
8909  VmaPool* pPool)
8910 {
8911  VMA_ASSERT(allocator && pCreateInfo && pPool);
8912 
8913  VMA_DEBUG_LOG("vmaCreatePool");
8914 
8915  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8916 
8917  return allocator->CreatePool(pCreateInfo, pPool);
8918 }
8919 
8920 void vmaDestroyPool(
8921  VmaAllocator allocator,
8922  VmaPool pool)
8923 {
8924  VMA_ASSERT(allocator);
8925 
8926  if(pool == VK_NULL_HANDLE)
8927  {
8928  return;
8929  }
8930 
8931  VMA_DEBUG_LOG("vmaDestroyPool");
8932 
8933  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8934 
8935  allocator->DestroyPool(pool);
8936 }
8937 
8938 void vmaGetPoolStats(
8939  VmaAllocator allocator,
8940  VmaPool pool,
8941  VmaPoolStats* pPoolStats)
8942 {
8943  VMA_ASSERT(allocator && pool && pPoolStats);
8944 
8945  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8946 
8947  allocator->GetPoolStats(pool, pPoolStats);
8948 }
8949 
8951  VmaAllocator allocator,
8952  VmaPool pool,
8953  size_t* pLostAllocationCount)
8954 {
8955  VMA_ASSERT(allocator && pool);
8956 
8957  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8958 
8959  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
8960 }
8961 
8962 VkResult vmaAllocateMemory(
8963  VmaAllocator allocator,
8964  const VkMemoryRequirements* pVkMemoryRequirements,
8965  const VmaAllocationCreateInfo* pCreateInfo,
8966  VmaAllocation* pAllocation,
8967  VmaAllocationInfo* pAllocationInfo)
8968 {
8969  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
8970 
8971  VMA_DEBUG_LOG("vmaAllocateMemory");
8972 
8973  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8974 
8975  VkResult result = allocator->AllocateMemory(
8976  *pVkMemoryRequirements,
8977  false, // requiresDedicatedAllocation
8978  false, // prefersDedicatedAllocation
8979  VK_NULL_HANDLE, // dedicatedBuffer
8980  VK_NULL_HANDLE, // dedicatedImage
8981  *pCreateInfo,
8982  VMA_SUBALLOCATION_TYPE_UNKNOWN,
8983  pAllocation);
8984 
8985  if(pAllocationInfo && result == VK_SUCCESS)
8986  {
8987  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8988  }
8989 
8990  return result;
8991 }
8992 
8994  VmaAllocator allocator,
8995  VkBuffer buffer,
8996  const VmaAllocationCreateInfo* pCreateInfo,
8997  VmaAllocation* pAllocation,
8998  VmaAllocationInfo* pAllocationInfo)
8999 {
9000  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
9001 
9002  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
9003 
9004  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9005 
9006  VkMemoryRequirements vkMemReq = {};
9007  bool requiresDedicatedAllocation = false;
9008  bool prefersDedicatedAllocation = false;
9009  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
9010  requiresDedicatedAllocation,
9011  prefersDedicatedAllocation);
9012 
9013  VkResult result = allocator->AllocateMemory(
9014  vkMemReq,
9015  requiresDedicatedAllocation,
9016  prefersDedicatedAllocation,
9017  buffer, // dedicatedBuffer
9018  VK_NULL_HANDLE, // dedicatedImage
9019  *pCreateInfo,
9020  VMA_SUBALLOCATION_TYPE_BUFFER,
9021  pAllocation);
9022 
9023  if(pAllocationInfo && result == VK_SUCCESS)
9024  {
9025  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9026  }
9027 
9028  return result;
9029 }
9030 
9031 VkResult vmaAllocateMemoryForImage(
9032  VmaAllocator allocator,
9033  VkImage image,
9034  const VmaAllocationCreateInfo* pCreateInfo,
9035  VmaAllocation* pAllocation,
9036  VmaAllocationInfo* pAllocationInfo)
9037 {
9038  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
9039 
9040  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
9041 
9042  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9043 
9044  VkResult result = AllocateMemoryForImage(
9045  allocator,
9046  image,
9047  pCreateInfo,
9048  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
9049  pAllocation);
9050 
9051  if(pAllocationInfo && result == VK_SUCCESS)
9052  {
9053  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9054  }
9055 
9056  return result;
9057 }
9058 
9059 void vmaFreeMemory(
9060  VmaAllocator allocator,
9061  VmaAllocation allocation)
9062 {
9063  VMA_ASSERT(allocator && allocation);
9064 
9065  VMA_DEBUG_LOG("vmaFreeMemory");
9066 
9067  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9068 
9069  allocator->FreeMemory(allocation);
9070 }
9071 
9073  VmaAllocator allocator,
9074  VmaAllocation allocation,
9075  VmaAllocationInfo* pAllocationInfo)
9076 {
9077  VMA_ASSERT(allocator && allocation && pAllocationInfo);
9078 
9079  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9080 
9081  allocator->GetAllocationInfo(allocation, pAllocationInfo);
9082 }
9083 
9084 VkBool32 vmaTouchAllocation(
9085  VmaAllocator allocator,
9086  VmaAllocation allocation)
9087 {
9088  VMA_ASSERT(allocator && allocation);
9089 
9090  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9091 
9092  return allocator->TouchAllocation(allocation);
9093 }
9094 
9096  VmaAllocator allocator,
9097  VmaAllocation allocation,
9098  void* pUserData)
9099 {
9100  VMA_ASSERT(allocator && allocation);
9101 
9102  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9103 
9104  allocation->SetUserData(allocator, pUserData);
9105 }
9106 
9108  VmaAllocator allocator,
9109  VmaAllocation* pAllocation)
9110 {
9111  VMA_ASSERT(allocator && pAllocation);
9112 
9113  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
9114 
9115  allocator->CreateLostAllocation(pAllocation);
9116 }
9117 
9118 VkResult vmaMapMemory(
9119  VmaAllocator allocator,
9120  VmaAllocation allocation,
9121  void** ppData)
9122 {
9123  VMA_ASSERT(allocator && allocation && ppData);
9124 
9125  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9126 
9127  return allocator->Map(allocation, ppData);
9128 }
9129 
9130 void vmaUnmapMemory(
9131  VmaAllocator allocator,
9132  VmaAllocation allocation)
9133 {
9134  VMA_ASSERT(allocator && allocation);
9135 
9136  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9137 
9138  allocator->Unmap(allocation);
9139 }
9140 
9141 VkResult vmaDefragment(
9142  VmaAllocator allocator,
9143  VmaAllocation* pAllocations,
9144  size_t allocationCount,
9145  VkBool32* pAllocationsChanged,
9146  const VmaDefragmentationInfo *pDefragmentationInfo,
9147  VmaDefragmentationStats* pDefragmentationStats)
9148 {
9149  VMA_ASSERT(allocator && pAllocations);
9150 
9151  VMA_DEBUG_LOG("vmaDefragment");
9152 
9153  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9154 
9155  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
9156 }
9157 
9158 VkResult vmaBindBufferMemory(
9159  VmaAllocator allocator,
9160  VmaAllocation allocation,
9161  VkBuffer buffer)
9162 {
9163  VMA_ASSERT(allocator && allocation && buffer);
9164 
9165  VMA_DEBUG_LOG("vmaBindBufferMemory");
9166 
9167  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9168 
9169  return allocator->BindBufferMemory(allocation, buffer);
9170 }
9171 
9172 VkResult vmaBindImageMemory(
9173  VmaAllocator allocator,
9174  VmaAllocation allocation,
9175  VkImage image)
9176 {
9177  VMA_ASSERT(allocator && allocation && image);
9178 
9179  VMA_DEBUG_LOG("vmaBindImageMemory");
9180 
9181  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9182 
9183  return allocator->BindImageMemory(allocation, image);
9184 }
9185 
9186 VkResult vmaCreateBuffer(
9187  VmaAllocator allocator,
9188  const VkBufferCreateInfo* pBufferCreateInfo,
9189  const VmaAllocationCreateInfo* pAllocationCreateInfo,
9190  VkBuffer* pBuffer,
9191  VmaAllocation* pAllocation,
9192  VmaAllocationInfo* pAllocationInfo)
9193 {
9194  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
9195 
9196  VMA_DEBUG_LOG("vmaCreateBuffer");
9197 
9198  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9199 
9200  *pBuffer = VK_NULL_HANDLE;
9201  *pAllocation = VK_NULL_HANDLE;
9202 
9203  // 1. Create VkBuffer.
9204  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
9205  allocator->m_hDevice,
9206  pBufferCreateInfo,
9207  allocator->GetAllocationCallbacks(),
9208  pBuffer);
9209  if(res >= 0)
9210  {
9211  // 2. vkGetBufferMemoryRequirements.
9212  VkMemoryRequirements vkMemReq = {};
9213  bool requiresDedicatedAllocation = false;
9214  bool prefersDedicatedAllocation = false;
9215  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
9216  requiresDedicatedAllocation, prefersDedicatedAllocation);
9217 
9218  // Make sure alignment requirements for specific buffer usages reported
9219  // in Physical Device Properties are included in alignment reported by memory requirements.
9220  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
9221  {
9222  VMA_ASSERT(vkMemReq.alignment %
9223  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
9224  }
9225  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
9226  {
9227  VMA_ASSERT(vkMemReq.alignment %
9228  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
9229  }
9230  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
9231  {
9232  VMA_ASSERT(vkMemReq.alignment %
9233  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
9234  }
9235 
9236  // 3. Allocate memory using allocator.
9237  res = allocator->AllocateMemory(
9238  vkMemReq,
9239  requiresDedicatedAllocation,
9240  prefersDedicatedAllocation,
9241  *pBuffer, // dedicatedBuffer
9242  VK_NULL_HANDLE, // dedicatedImage
9243  *pAllocationCreateInfo,
9244  VMA_SUBALLOCATION_TYPE_BUFFER,
9245  pAllocation);
9246  if(res >= 0)
9247  {
9248  // 3. Bind buffer with memory.
9249  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
9250  if(res >= 0)
9251  {
9252  // All steps succeeded.
9253  if(pAllocationInfo != VMA_NULL)
9254  {
9255  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9256  }
9257  return VK_SUCCESS;
9258  }
9259  allocator->FreeMemory(*pAllocation);
9260  *pAllocation = VK_NULL_HANDLE;
9261  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
9262  *pBuffer = VK_NULL_HANDLE;
9263  return res;
9264  }
9265  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
9266  *pBuffer = VK_NULL_HANDLE;
9267  return res;
9268  }
9269  return res;
9270 }
9271 
9272 void vmaDestroyBuffer(
9273  VmaAllocator allocator,
9274  VkBuffer buffer,
9275  VmaAllocation allocation)
9276 {
9277  if(buffer != VK_NULL_HANDLE)
9278  {
9279  VMA_ASSERT(allocator);
9280 
9281  VMA_DEBUG_LOG("vmaDestroyBuffer");
9282 
9283  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9284 
9285  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
9286 
9287  allocator->FreeMemory(allocation);
9288  }
9289 }
9290 
9291 VkResult vmaCreateImage(
9292  VmaAllocator allocator,
9293  const VkImageCreateInfo* pImageCreateInfo,
9294  const VmaAllocationCreateInfo* pAllocationCreateInfo,
9295  VkImage* pImage,
9296  VmaAllocation* pAllocation,
9297  VmaAllocationInfo* pAllocationInfo)
9298 {
9299  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
9300 
9301  VMA_DEBUG_LOG("vmaCreateImage");
9302 
9303  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9304 
9305  *pImage = VK_NULL_HANDLE;
9306  *pAllocation = VK_NULL_HANDLE;
9307 
9308  // 1. Create VkImage.
9309  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
9310  allocator->m_hDevice,
9311  pImageCreateInfo,
9312  allocator->GetAllocationCallbacks(),
9313  pImage);
9314  if(res >= 0)
9315  {
9316  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
9317  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
9318  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
9319 
9320  // 2. Allocate memory using allocator.
9321  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
9322  if(res >= 0)
9323  {
9324  // 3. Bind image with memory.
9325  res = allocator->BindImageMemory(*pAllocation, *pImage);
9326  if(res >= 0)
9327  {
9328  // All steps succeeded.
9329  if(pAllocationInfo != VMA_NULL)
9330  {
9331  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9332  }
9333  return VK_SUCCESS;
9334  }
9335  allocator->FreeMemory(*pAllocation);
9336  *pAllocation = VK_NULL_HANDLE;
9337  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
9338  *pImage = VK_NULL_HANDLE;
9339  return res;
9340  }
9341  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
9342  *pImage = VK_NULL_HANDLE;
9343  return res;
9344  }
9345  return res;
9346 }
9347 
9348 void vmaDestroyImage(
9349  VmaAllocator allocator,
9350  VkImage image,
9351  VmaAllocation allocation)
9352 {
9353  if(image != VK_NULL_HANDLE)
9354  {
9355  VMA_ASSERT(allocator);
9356 
9357  VMA_DEBUG_LOG("vmaDestroyImage");
9358 
9359  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9360 
9361  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
9362 
9363  allocator->FreeMemory(allocation);
9364  }
9365 }
9366 
9367 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1157
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1419
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1182
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
Represents single memory allocation.
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1167
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1376
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1161
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1749
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1179
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1948
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1595
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1649
Definition: vk_mem_alloc.h:1456
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1150
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1494
Definition: vk_mem_alloc.h:1403
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1191
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1244
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1176
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1407
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1309
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1164
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1308
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
Definition: vk_mem_alloc.h:1172
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1952
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1208
VmaStatInfo total
Definition: vk_mem_alloc.h:1318
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1960
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1478
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1943
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1165
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1092
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1185
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1603
Definition: vk_mem_alloc.h:1597
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1759
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1162
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1515
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1619
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1655
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1148
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1606
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaMemoryUsage
Definition: vk_mem_alloc.h:1354
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1938
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1956
Definition: vk_mem_alloc.h:1393
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1502
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1163
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1314
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1098
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1119
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1124
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1958
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1489
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1665
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1158
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1297
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1614
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1111
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1463
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1310
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1115
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1609
Definition: vk_mem_alloc.h:1402
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1484
Definition: vk_mem_alloc.h:1475
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1300
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1160
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:1627
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1194
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1658
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1473
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1508
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1232
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1316
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1443
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1309
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1169
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1113
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1168
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1641
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1773
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1188
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1309
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1306
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1646
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1754
Definition: vk_mem_alloc.h:1471
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1954
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1156
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
Definition: vk_mem_alloc.h:1171
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1304
Definition: vk_mem_alloc.h:1359
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1599
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1302
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1166
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1170
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1430
Definition: vk_mem_alloc.h:1386
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1768
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1146
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1159
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1735
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1577
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1310
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1317
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1652
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1310
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1740