Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1084 #include <vulkan/vulkan.h>
1085 
1086 #if !defined(VMA_DEDICATED_ALLOCATION)
1087  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1088  #define VMA_DEDICATED_ALLOCATION 1
1089  #else
1090  #define VMA_DEDICATED_ALLOCATION 0
1091  #endif
1092 #endif
1093 
1103 VK_DEFINE_HANDLE(VmaAllocator)
1104 
1105 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1107  VmaAllocator allocator,
1108  uint32_t memoryType,
1109  VkDeviceMemory memory,
1110  VkDeviceSize size);
1112 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1113  VmaAllocator allocator,
1114  uint32_t memoryType,
1115  VkDeviceMemory memory,
1116  VkDeviceSize size);
1117 
1131 
1161 
1164 typedef VkFlags VmaAllocatorCreateFlags;
1165 
1170 typedef struct VmaVulkanFunctions {
1171  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1172  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1173  PFN_vkAllocateMemory vkAllocateMemory;
1174  PFN_vkFreeMemory vkFreeMemory;
1175  PFN_vkMapMemory vkMapMemory;
1176  PFN_vkUnmapMemory vkUnmapMemory;
1177  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1178  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1179  PFN_vkBindBufferMemory vkBindBufferMemory;
1180  PFN_vkBindImageMemory vkBindImageMemory;
1181  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1182  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1183  PFN_vkCreateBuffer vkCreateBuffer;
1184  PFN_vkDestroyBuffer vkDestroyBuffer;
1185  PFN_vkCreateImage vkCreateImage;
1186  PFN_vkDestroyImage vkDestroyImage;
1187 #if VMA_DEDICATED_ALLOCATION
1188  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1189  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1190 #endif
1192 
1195 {
1197  VmaAllocatorCreateFlags flags;
1199 
1200  VkPhysicalDevice physicalDevice;
1202 
1203  VkDevice device;
1205 
1208 
1209  const VkAllocationCallbacks* pAllocationCallbacks;
1211 
1250  const VkDeviceSize* pHeapSizeLimit;
1264 
1266 VkResult vmaCreateAllocator(
1267  const VmaAllocatorCreateInfo* pCreateInfo,
1268  VmaAllocator* pAllocator);
1269 
1271 void vmaDestroyAllocator(
1272  VmaAllocator allocator);
1273 
1279  VmaAllocator allocator,
1280  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1281 
1287  VmaAllocator allocator,
1288  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1289 
1297  VmaAllocator allocator,
1298  uint32_t memoryTypeIndex,
1299  VkMemoryPropertyFlags* pFlags);
1300 
1310  VmaAllocator allocator,
1311  uint32_t frameIndex);
1312 
1315 typedef struct VmaStatInfo
1316 {
1318  uint32_t blockCount;
1324  VkDeviceSize usedBytes;
1326  VkDeviceSize unusedBytes;
1327  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1328  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1329 } VmaStatInfo;
1330 
1332 typedef struct VmaStats
1333 {
1334  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1335  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1337 } VmaStats;
1338 
1340 void vmaCalculateStats(
1341  VmaAllocator allocator,
1342  VmaStats* pStats);
1343 
1344 #define VMA_STATS_STRING_ENABLED 1
1345 
1346 #if VMA_STATS_STRING_ENABLED
1347 
1349 
1351 void vmaBuildStatsString(
1352  VmaAllocator allocator,
1353  char** ppStatsString,
1354  VkBool32 detailedMap);
1355 
1356 void vmaFreeStatsString(
1357  VmaAllocator allocator,
1358  char* pStatsString);
1359 
1360 #endif // #if VMA_STATS_STRING_ENABLED
1361 
1370 VK_DEFINE_HANDLE(VmaPool)
1371 
1372 typedef enum VmaMemoryUsage
1373 {
1422 } VmaMemoryUsage;
1423 
1438 
1488 
1492 
1494 {
1496  VmaAllocationCreateFlags flags;
1507  VkMemoryPropertyFlags requiredFlags;
1512  VkMemoryPropertyFlags preferredFlags;
1520  uint32_t memoryTypeBits;
1533  void* pUserData;
1535 
1552 VkResult vmaFindMemoryTypeIndex(
1553  VmaAllocator allocator,
1554  uint32_t memoryTypeBits,
1555  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1556  uint32_t* pMemoryTypeIndex);
1557 
1571  VmaAllocator allocator,
1572  const VkBufferCreateInfo* pBufferCreateInfo,
1573  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1574  uint32_t* pMemoryTypeIndex);
1575 
1589  VmaAllocator allocator,
1590  const VkImageCreateInfo* pImageCreateInfo,
1591  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1592  uint32_t* pMemoryTypeIndex);
1593 
1614 
1617 typedef VkFlags VmaPoolCreateFlags;
1618 
1621 typedef struct VmaPoolCreateInfo {
1627  VmaPoolCreateFlags flags;
1632  VkDeviceSize blockSize;
1661 
1664 typedef struct VmaPoolStats {
1667  VkDeviceSize size;
1670  VkDeviceSize unusedSize;
1683  VkDeviceSize unusedRangeSizeMax;
1684 } VmaPoolStats;
1685 
1692 VkResult vmaCreatePool(
1693  VmaAllocator allocator,
1694  const VmaPoolCreateInfo* pCreateInfo,
1695  VmaPool* pPool);
1696 
1699 void vmaDestroyPool(
1700  VmaAllocator allocator,
1701  VmaPool pool);
1702 
1709 void vmaGetPoolStats(
1710  VmaAllocator allocator,
1711  VmaPool pool,
1712  VmaPoolStats* pPoolStats);
1713 
1721  VmaAllocator allocator,
1722  VmaPool pool,
1723  size_t* pLostAllocationCount);
1724 
1749 VK_DEFINE_HANDLE(VmaAllocation)
1750 
1751 
1753 typedef struct VmaAllocationInfo {
1758  uint32_t memoryType;
1767  VkDeviceMemory deviceMemory;
1772  VkDeviceSize offset;
1777  VkDeviceSize size;
1791  void* pUserData;
1793 
1804 VkResult vmaAllocateMemory(
1805  VmaAllocator allocator,
1806  const VkMemoryRequirements* pVkMemoryRequirements,
1807  const VmaAllocationCreateInfo* pCreateInfo,
1808  VmaAllocation* pAllocation,
1809  VmaAllocationInfo* pAllocationInfo);
1810 
1818  VmaAllocator allocator,
1819  VkBuffer buffer,
1820  const VmaAllocationCreateInfo* pCreateInfo,
1821  VmaAllocation* pAllocation,
1822  VmaAllocationInfo* pAllocationInfo);
1823 
1825 VkResult vmaAllocateMemoryForImage(
1826  VmaAllocator allocator,
1827  VkImage image,
1828  const VmaAllocationCreateInfo* pCreateInfo,
1829  VmaAllocation* pAllocation,
1830  VmaAllocationInfo* pAllocationInfo);
1831 
1833 void vmaFreeMemory(
1834  VmaAllocator allocator,
1835  VmaAllocation allocation);
1836 
1854  VmaAllocator allocator,
1855  VmaAllocation allocation,
1856  VmaAllocationInfo* pAllocationInfo);
1857 
1872 VkBool32 vmaTouchAllocation(
1873  VmaAllocator allocator,
1874  VmaAllocation allocation);
1875 
1890  VmaAllocator allocator,
1891  VmaAllocation allocation,
1892  void* pUserData);
1893 
1905  VmaAllocator allocator,
1906  VmaAllocation* pAllocation);
1907 
1942 VkResult vmaMapMemory(
1943  VmaAllocator allocator,
1944  VmaAllocation allocation,
1945  void** ppData);
1946 
1951 void vmaUnmapMemory(
1952  VmaAllocator allocator,
1953  VmaAllocation allocation);
1954 
1967 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
1968 
1981 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
1982 
1984 typedef struct VmaDefragmentationInfo {
1989  VkDeviceSize maxBytesToMove;
1996 
1998 typedef struct VmaDefragmentationStats {
2000  VkDeviceSize bytesMoved;
2002  VkDeviceSize bytesFreed;
2008 
2091 VkResult vmaDefragment(
2092  VmaAllocator allocator,
2093  VmaAllocation* pAllocations,
2094  size_t allocationCount,
2095  VkBool32* pAllocationsChanged,
2096  const VmaDefragmentationInfo *pDefragmentationInfo,
2097  VmaDefragmentationStats* pDefragmentationStats);
2098 
2111 VkResult vmaBindBufferMemory(
2112  VmaAllocator allocator,
2113  VmaAllocation allocation,
2114  VkBuffer buffer);
2115 
2128 VkResult vmaBindImageMemory(
2129  VmaAllocator allocator,
2130  VmaAllocation allocation,
2131  VkImage image);
2132 
2159 VkResult vmaCreateBuffer(
2160  VmaAllocator allocator,
2161  const VkBufferCreateInfo* pBufferCreateInfo,
2162  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2163  VkBuffer* pBuffer,
2164  VmaAllocation* pAllocation,
2165  VmaAllocationInfo* pAllocationInfo);
2166 
2178 void vmaDestroyBuffer(
2179  VmaAllocator allocator,
2180  VkBuffer buffer,
2181  VmaAllocation allocation);
2182 
2184 VkResult vmaCreateImage(
2185  VmaAllocator allocator,
2186  const VkImageCreateInfo* pImageCreateInfo,
2187  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2188  VkImage* pImage,
2189  VmaAllocation* pAllocation,
2190  VmaAllocationInfo* pAllocationInfo);
2191 
2203 void vmaDestroyImage(
2204  VmaAllocator allocator,
2205  VkImage image,
2206  VmaAllocation allocation);
2207 
2208 #ifdef __cplusplus
2209 }
2210 #endif
2211 
2212 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2213 
2214 // For Visual Studio IntelliSense.
2215 #if defined(__cplusplus) && defined(__INTELLISENSE__)
2216 #define VMA_IMPLEMENTATION
2217 #endif
2218 
2219 #ifdef VMA_IMPLEMENTATION
2220 #undef VMA_IMPLEMENTATION
2221 
2222 #include <cstdint>
2223 #include <cstdlib>
2224 #include <cstring>
2225 
2226 /*******************************************************************************
2227 CONFIGURATION SECTION
2228 
2229 Define some of these macros before each #include of this header or change them
2230 here if you need other then default behavior depending on your environment.
2231 */
2232 
2233 /*
2234 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2235 internally, like:
2236 
2237  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2238 
2239 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2240 VmaAllocatorCreateInfo::pVulkanFunctions.
2241 */
2242 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2243 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2244 #endif
2245 
2246 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2247 //#define VMA_USE_STL_CONTAINERS 1
2248 
2249 /* Set this macro to 1 to make the library including and using STL containers:
2250 std::pair, std::vector, std::list, std::unordered_map.
2251 
2252 Set it to 0 or undefined to make the library using its own implementation of
2253 the containers.
2254 */
2255 #if VMA_USE_STL_CONTAINERS
2256  #define VMA_USE_STL_VECTOR 1
2257  #define VMA_USE_STL_UNORDERED_MAP 1
2258  #define VMA_USE_STL_LIST 1
2259 #endif
2260 
2261 #if VMA_USE_STL_VECTOR
2262  #include <vector>
2263 #endif
2264 
2265 #if VMA_USE_STL_UNORDERED_MAP
2266  #include <unordered_map>
2267 #endif
2268 
2269 #if VMA_USE_STL_LIST
2270  #include <list>
2271 #endif
2272 
2273 /*
2274 Following headers are used in this CONFIGURATION section only, so feel free to
2275 remove them if not needed.
2276 */
2277 #include <cassert> // for assert
2278 #include <algorithm> // for min, max
2279 #include <mutex> // for std::mutex
2280 #include <atomic> // for std::atomic
2281 
2282 #ifndef VMA_NULL
2283  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2284  #define VMA_NULL nullptr
2285 #endif
2286 
2287 #if defined(__APPLE__) || defined(__ANDROID__)
2288 #include <cstdlib>
2289 void *aligned_alloc(size_t alignment, size_t size)
2290 {
2291  // alignment must be >= sizeof(void*)
2292  if(alignment < sizeof(void*))
2293  {
2294  alignment = sizeof(void*);
2295  }
2296 
2297  void *pointer;
2298  if(posix_memalign(&pointer, alignment, size) == 0)
2299  return pointer;
2300  return VMA_NULL;
2301 }
2302 #endif
2303 
2304 // If your compiler is not compatible with C++11 and definition of
2305 // aligned_alloc() function is missing, uncommeting following line may help:
2306 
2307 //#include <malloc.h>
2308 
2309 // Normal assert to check for programmer's errors, especially in Debug configuration.
2310 #ifndef VMA_ASSERT
2311  #ifdef _DEBUG
2312  #define VMA_ASSERT(expr) assert(expr)
2313  #else
2314  #define VMA_ASSERT(expr)
2315  #endif
2316 #endif
2317 
2318 // Assert that will be called very often, like inside data structures e.g. operator[].
2319 // Making it non-empty can make program slow.
2320 #ifndef VMA_HEAVY_ASSERT
2321  #ifdef _DEBUG
2322  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2323  #else
2324  #define VMA_HEAVY_ASSERT(expr)
2325  #endif
2326 #endif
2327 
2328 #ifndef VMA_ALIGN_OF
2329  #define VMA_ALIGN_OF(type) (__alignof(type))
2330 #endif
2331 
2332 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2333  #if defined(_WIN32)
2334  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2335  #else
2336  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2337  #endif
2338 #endif
2339 
2340 #ifndef VMA_SYSTEM_FREE
2341  #if defined(_WIN32)
2342  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2343  #else
2344  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2345  #endif
2346 #endif
2347 
2348 #ifndef VMA_MIN
2349  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2350 #endif
2351 
2352 #ifndef VMA_MAX
2353  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2354 #endif
2355 
2356 #ifndef VMA_SWAP
2357  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2358 #endif
2359 
2360 #ifndef VMA_SORT
2361  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2362 #endif
2363 
2364 #ifndef VMA_DEBUG_LOG
2365  #define VMA_DEBUG_LOG(format, ...)
2366  /*
2367  #define VMA_DEBUG_LOG(format, ...) do { \
2368  printf(format, __VA_ARGS__); \
2369  printf("\n"); \
2370  } while(false)
2371  */
2372 #endif
2373 
2374 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2375 #if VMA_STATS_STRING_ENABLED
2376  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2377  {
2378  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2379  }
2380  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2381  {
2382  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2383  }
2384  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2385  {
2386  snprintf(outStr, strLen, "%p", ptr);
2387  }
2388 #endif
2389 
2390 #ifndef VMA_MUTEX
2391  class VmaMutex
2392  {
2393  public:
2394  VmaMutex() { }
2395  ~VmaMutex() { }
2396  void Lock() { m_Mutex.lock(); }
2397  void Unlock() { m_Mutex.unlock(); }
2398  private:
2399  std::mutex m_Mutex;
2400  };
2401  #define VMA_MUTEX VmaMutex
2402 #endif
2403 
2404 /*
2405 If providing your own implementation, you need to implement a subset of std::atomic:
2406 
2407 - Constructor(uint32_t desired)
2408 - uint32_t load() const
2409 - void store(uint32_t desired)
2410 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2411 */
2412 #ifndef VMA_ATOMIC_UINT32
2413  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2414 #endif
2415 
2416 #ifndef VMA_BEST_FIT
2417 
2429  #define VMA_BEST_FIT (1)
2430 #endif
2431 
2432 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2433 
2437  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2438 #endif
2439 
2440 #ifndef VMA_DEBUG_ALIGNMENT
2441 
2445  #define VMA_DEBUG_ALIGNMENT (1)
2446 #endif
2447 
2448 #ifndef VMA_DEBUG_MARGIN
2449 
2453  #define VMA_DEBUG_MARGIN (0)
2454 #endif
2455 
2456 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2457 
2461  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2462 #endif
2463 
2464 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2465 
2469  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2470 #endif
2471 
2472 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2473  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2475 #endif
2476 
2477 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2478  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2480 #endif
2481 
2482 #ifndef VMA_CLASS_NO_COPY
2483  #define VMA_CLASS_NO_COPY(className) \
2484  private: \
2485  className(const className&) = delete; \
2486  className& operator=(const className&) = delete;
2487 #endif
2488 
2489 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2490 
2491 /*******************************************************************************
2492 END OF CONFIGURATION
2493 */
2494 
2495 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2496  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2497 
2498 // Returns number of bits set to 1 in (v).
2499 static inline uint32_t VmaCountBitsSet(uint32_t v)
2500 {
2501  uint32_t c = v - ((v >> 1) & 0x55555555);
2502  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2503  c = ((c >> 4) + c) & 0x0F0F0F0F;
2504  c = ((c >> 8) + c) & 0x00FF00FF;
2505  c = ((c >> 16) + c) & 0x0000FFFF;
2506  return c;
2507 }
2508 
2509 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2510 // Use types like uint32_t, uint64_t as T.
2511 template <typename T>
2512 static inline T VmaAlignUp(T val, T align)
2513 {
2514  return (val + align - 1) / align * align;
2515 }
2516 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
2517 // Use types like uint32_t, uint64_t as T.
2518 template <typename T>
2519 static inline T VmaAlignDown(T val, T align)
2520 {
2521  return val / align * align;
2522 }
2523 
2524 // Division with mathematical rounding to nearest number.
2525 template <typename T>
2526 inline T VmaRoundDiv(T x, T y)
2527 {
2528  return (x + (y / (T)2)) / y;
2529 }
2530 
2531 #ifndef VMA_SORT
2532 
2533 template<typename Iterator, typename Compare>
2534 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
2535 {
2536  Iterator centerValue = end; --centerValue;
2537  Iterator insertIndex = beg;
2538  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
2539  {
2540  if(cmp(*memTypeIndex, *centerValue))
2541  {
2542  if(insertIndex != memTypeIndex)
2543  {
2544  VMA_SWAP(*memTypeIndex, *insertIndex);
2545  }
2546  ++insertIndex;
2547  }
2548  }
2549  if(insertIndex != centerValue)
2550  {
2551  VMA_SWAP(*insertIndex, *centerValue);
2552  }
2553  return insertIndex;
2554 }
2555 
2556 template<typename Iterator, typename Compare>
2557 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
2558 {
2559  if(beg < end)
2560  {
2561  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
2562  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
2563  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
2564  }
2565 }
2566 
2567 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
2568 
2569 #endif // #ifndef VMA_SORT
2570 
2571 /*
2572 Returns true if two memory blocks occupy overlapping pages.
2573 ResourceA must be in less memory offset than ResourceB.
2574 
2575 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
2576 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
2577 */
2578 static inline bool VmaBlocksOnSamePage(
2579  VkDeviceSize resourceAOffset,
2580  VkDeviceSize resourceASize,
2581  VkDeviceSize resourceBOffset,
2582  VkDeviceSize pageSize)
2583 {
2584  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
2585  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
2586  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
2587  VkDeviceSize resourceBStart = resourceBOffset;
2588  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
2589  return resourceAEndPage == resourceBStartPage;
2590 }
2591 
2592 enum VmaSuballocationType
2593 {
2594  VMA_SUBALLOCATION_TYPE_FREE = 0,
2595  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
2596  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
2597  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
2598  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
2599  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
2600  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
2601 };
2602 
2603 /*
2604 Returns true if given suballocation types could conflict and must respect
2605 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
2606 or linear image and another one is optimal image. If type is unknown, behave
2607 conservatively.
2608 */
2609 static inline bool VmaIsBufferImageGranularityConflict(
2610  VmaSuballocationType suballocType1,
2611  VmaSuballocationType suballocType2)
2612 {
2613  if(suballocType1 > suballocType2)
2614  {
2615  VMA_SWAP(suballocType1, suballocType2);
2616  }
2617 
2618  switch(suballocType1)
2619  {
2620  case VMA_SUBALLOCATION_TYPE_FREE:
2621  return false;
2622  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
2623  return true;
2624  case VMA_SUBALLOCATION_TYPE_BUFFER:
2625  return
2626  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2627  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2628  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
2629  return
2630  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2631  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
2632  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2633  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
2634  return
2635  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2636  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
2637  return false;
2638  default:
2639  VMA_ASSERT(0);
2640  return true;
2641  }
2642 }
2643 
2644 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
2645 struct VmaMutexLock
2646 {
2647  VMA_CLASS_NO_COPY(VmaMutexLock)
2648 public:
2649  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
2650  m_pMutex(useMutex ? &mutex : VMA_NULL)
2651  {
2652  if(m_pMutex)
2653  {
2654  m_pMutex->Lock();
2655  }
2656  }
2657 
2658  ~VmaMutexLock()
2659  {
2660  if(m_pMutex)
2661  {
2662  m_pMutex->Unlock();
2663  }
2664  }
2665 
2666 private:
2667  VMA_MUTEX* m_pMutex;
2668 };
2669 
2670 #if VMA_DEBUG_GLOBAL_MUTEX
2671  static VMA_MUTEX gDebugGlobalMutex;
2672  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
2673 #else
2674  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
2675 #endif
2676 
2677 // Minimum size of a free suballocation to register it in the free suballocation collection.
2678 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
2679 
2680 /*
2681 Performs binary search and returns iterator to first element that is greater or
2682 equal to (key), according to comparison (cmp).
2683 
2684 Cmp should return true if first argument is less than second argument.
2685 
2686 Returned value is the found element, if present in the collection or place where
2687 new element with value (key) should be inserted.
2688 */
2689 template <typename IterT, typename KeyT, typename CmpT>
2690 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
2691 {
2692  size_t down = 0, up = (end - beg);
2693  while(down < up)
2694  {
2695  const size_t mid = (down + up) / 2;
2696  if(cmp(*(beg+mid), key))
2697  {
2698  down = mid + 1;
2699  }
2700  else
2701  {
2702  up = mid;
2703  }
2704  }
2705  return beg + down;
2706 }
2707 
2709 // Memory allocation
2710 
2711 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
2712 {
2713  if((pAllocationCallbacks != VMA_NULL) &&
2714  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
2715  {
2716  return (*pAllocationCallbacks->pfnAllocation)(
2717  pAllocationCallbacks->pUserData,
2718  size,
2719  alignment,
2720  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2721  }
2722  else
2723  {
2724  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
2725  }
2726 }
2727 
2728 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
2729 {
2730  if((pAllocationCallbacks != VMA_NULL) &&
2731  (pAllocationCallbacks->pfnFree != VMA_NULL))
2732  {
2733  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
2734  }
2735  else
2736  {
2737  VMA_SYSTEM_FREE(ptr);
2738  }
2739 }
2740 
2741 template<typename T>
2742 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
2743 {
2744  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
2745 }
2746 
2747 template<typename T>
2748 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
2749 {
2750  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
2751 }
2752 
2753 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2754 
2755 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2756 
2757 template<typename T>
2758 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2759 {
2760  ptr->~T();
2761  VmaFree(pAllocationCallbacks, ptr);
2762 }
2763 
2764 template<typename T>
2765 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2766 {
2767  if(ptr != VMA_NULL)
2768  {
2769  for(size_t i = count; i--; )
2770  {
2771  ptr[i].~T();
2772  }
2773  VmaFree(pAllocationCallbacks, ptr);
2774  }
2775 }
2776 
2777 // STL-compatible allocator.
2778 template<typename T>
2779 class VmaStlAllocator
2780 {
2781 public:
2782  const VkAllocationCallbacks* const m_pCallbacks;
2783  typedef T value_type;
2784 
2785  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2786  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2787 
2788  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2789  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2790 
2791  template<typename U>
2792  bool operator==(const VmaStlAllocator<U>& rhs) const
2793  {
2794  return m_pCallbacks == rhs.m_pCallbacks;
2795  }
2796  template<typename U>
2797  bool operator!=(const VmaStlAllocator<U>& rhs) const
2798  {
2799  return m_pCallbacks != rhs.m_pCallbacks;
2800  }
2801 
2802  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2803 };
2804 
2805 #if VMA_USE_STL_VECTOR
2806 
2807 #define VmaVector std::vector
2808 
2809 template<typename T, typename allocatorT>
2810 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2811 {
2812  vec.insert(vec.begin() + index, item);
2813 }
2814 
2815 template<typename T, typename allocatorT>
2816 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2817 {
2818  vec.erase(vec.begin() + index);
2819 }
2820 
2821 #else // #if VMA_USE_STL_VECTOR
2822 
2823 /* Class with interface compatible with subset of std::vector.
2824 T must be POD because constructors and destructors are not called and memcpy is
2825 used for these objects. */
2826 template<typename T, typename AllocatorT>
2827 class VmaVector
2828 {
2829 public:
2830  typedef T value_type;
2831 
2832  VmaVector(const AllocatorT& allocator) :
2833  m_Allocator(allocator),
2834  m_pArray(VMA_NULL),
2835  m_Count(0),
2836  m_Capacity(0)
2837  {
2838  }
2839 
2840  VmaVector(size_t count, const AllocatorT& allocator) :
2841  m_Allocator(allocator),
2842  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2843  m_Count(count),
2844  m_Capacity(count)
2845  {
2846  }
2847 
2848  VmaVector(const VmaVector<T, AllocatorT>& src) :
2849  m_Allocator(src.m_Allocator),
2850  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2851  m_Count(src.m_Count),
2852  m_Capacity(src.m_Count)
2853  {
2854  if(m_Count != 0)
2855  {
2856  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2857  }
2858  }
2859 
2860  ~VmaVector()
2861  {
2862  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2863  }
2864 
2865  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2866  {
2867  if(&rhs != this)
2868  {
2869  resize(rhs.m_Count);
2870  if(m_Count != 0)
2871  {
2872  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2873  }
2874  }
2875  return *this;
2876  }
2877 
2878  bool empty() const { return m_Count == 0; }
2879  size_t size() const { return m_Count; }
2880  T* data() { return m_pArray; }
2881  const T* data() const { return m_pArray; }
2882 
2883  T& operator[](size_t index)
2884  {
2885  VMA_HEAVY_ASSERT(index < m_Count);
2886  return m_pArray[index];
2887  }
2888  const T& operator[](size_t index) const
2889  {
2890  VMA_HEAVY_ASSERT(index < m_Count);
2891  return m_pArray[index];
2892  }
2893 
2894  T& front()
2895  {
2896  VMA_HEAVY_ASSERT(m_Count > 0);
2897  return m_pArray[0];
2898  }
2899  const T& front() const
2900  {
2901  VMA_HEAVY_ASSERT(m_Count > 0);
2902  return m_pArray[0];
2903  }
2904  T& back()
2905  {
2906  VMA_HEAVY_ASSERT(m_Count > 0);
2907  return m_pArray[m_Count - 1];
2908  }
2909  const T& back() const
2910  {
2911  VMA_HEAVY_ASSERT(m_Count > 0);
2912  return m_pArray[m_Count - 1];
2913  }
2914 
2915  void reserve(size_t newCapacity, bool freeMemory = false)
2916  {
2917  newCapacity = VMA_MAX(newCapacity, m_Count);
2918 
2919  if((newCapacity < m_Capacity) && !freeMemory)
2920  {
2921  newCapacity = m_Capacity;
2922  }
2923 
2924  if(newCapacity != m_Capacity)
2925  {
2926  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2927  if(m_Count != 0)
2928  {
2929  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2930  }
2931  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2932  m_Capacity = newCapacity;
2933  m_pArray = newArray;
2934  }
2935  }
2936 
2937  void resize(size_t newCount, bool freeMemory = false)
2938  {
2939  size_t newCapacity = m_Capacity;
2940  if(newCount > m_Capacity)
2941  {
2942  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2943  }
2944  else if(freeMemory)
2945  {
2946  newCapacity = newCount;
2947  }
2948 
2949  if(newCapacity != m_Capacity)
2950  {
2951  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2952  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2953  if(elementsToCopy != 0)
2954  {
2955  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2956  }
2957  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2958  m_Capacity = newCapacity;
2959  m_pArray = newArray;
2960  }
2961 
2962  m_Count = newCount;
2963  }
2964 
2965  void clear(bool freeMemory = false)
2966  {
2967  resize(0, freeMemory);
2968  }
2969 
2970  void insert(size_t index, const T& src)
2971  {
2972  VMA_HEAVY_ASSERT(index <= m_Count);
2973  const size_t oldCount = size();
2974  resize(oldCount + 1);
2975  if(index < oldCount)
2976  {
2977  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2978  }
2979  m_pArray[index] = src;
2980  }
2981 
2982  void remove(size_t index)
2983  {
2984  VMA_HEAVY_ASSERT(index < m_Count);
2985  const size_t oldCount = size();
2986  if(index < oldCount - 1)
2987  {
2988  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2989  }
2990  resize(oldCount - 1);
2991  }
2992 
2993  void push_back(const T& src)
2994  {
2995  const size_t newIndex = size();
2996  resize(newIndex + 1);
2997  m_pArray[newIndex] = src;
2998  }
2999 
3000  void pop_back()
3001  {
3002  VMA_HEAVY_ASSERT(m_Count > 0);
3003  resize(size() - 1);
3004  }
3005 
3006  void push_front(const T& src)
3007  {
3008  insert(0, src);
3009  }
3010 
3011  void pop_front()
3012  {
3013  VMA_HEAVY_ASSERT(m_Count > 0);
3014  remove(0);
3015  }
3016 
3017  typedef T* iterator;
3018 
3019  iterator begin() { return m_pArray; }
3020  iterator end() { return m_pArray + m_Count; }
3021 
3022 private:
3023  AllocatorT m_Allocator;
3024  T* m_pArray;
3025  size_t m_Count;
3026  size_t m_Capacity;
3027 };
3028 
3029 template<typename T, typename allocatorT>
3030 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
3031 {
3032  vec.insert(index, item);
3033 }
3034 
3035 template<typename T, typename allocatorT>
3036 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
3037 {
3038  vec.remove(index);
3039 }
3040 
3041 #endif // #if VMA_USE_STL_VECTOR
3042 
3043 template<typename CmpLess, typename VectorT>
3044 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3045 {
3046  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3047  vector.data(),
3048  vector.data() + vector.size(),
3049  value,
3050  CmpLess()) - vector.data();
3051  VmaVectorInsert(vector, indexToInsert, value);
3052  return indexToInsert;
3053 }
3054 
3055 template<typename CmpLess, typename VectorT>
3056 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3057 {
3058  CmpLess comparator;
3059  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3060  vector.begin(),
3061  vector.end(),
3062  value,
3063  comparator);
3064  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3065  {
3066  size_t indexToRemove = it - vector.begin();
3067  VmaVectorRemove(vector, indexToRemove);
3068  return true;
3069  }
3070  return false;
3071 }
3072 
3073 template<typename CmpLess, typename VectorT>
3074 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
3075 {
3076  CmpLess comparator;
3077  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3078  vector.data(),
3079  vector.data() + vector.size(),
3080  value,
3081  comparator);
3082  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
3083  {
3084  return it - vector.begin();
3085  }
3086  else
3087  {
3088  return vector.size();
3089  }
3090 }
3091 
3093 // class VmaPoolAllocator
3094 
3095 /*
3096 Allocator for objects of type T using a list of arrays (pools) to speed up
3097 allocation. Number of elements that can be allocated is not bounded because
3098 allocator can create multiple blocks.
3099 */
3100 template<typename T>
3101 class VmaPoolAllocator
3102 {
3103  VMA_CLASS_NO_COPY(VmaPoolAllocator)
3104 public:
3105  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3106  ~VmaPoolAllocator();
3107  void Clear();
3108  T* Alloc();
3109  void Free(T* ptr);
3110 
3111 private:
3112  union Item
3113  {
3114  uint32_t NextFreeIndex;
3115  T Value;
3116  };
3117 
3118  struct ItemBlock
3119  {
3120  Item* pItems;
3121  uint32_t FirstFreeIndex;
3122  };
3123 
3124  const VkAllocationCallbacks* m_pAllocationCallbacks;
3125  size_t m_ItemsPerBlock;
3126  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3127 
3128  ItemBlock& CreateNewBlock();
3129 };
3130 
3131 template<typename T>
3132 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3133  m_pAllocationCallbacks(pAllocationCallbacks),
3134  m_ItemsPerBlock(itemsPerBlock),
3135  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3136 {
3137  VMA_ASSERT(itemsPerBlock > 0);
3138 }
3139 
3140 template<typename T>
3141 VmaPoolAllocator<T>::~VmaPoolAllocator()
3142 {
3143  Clear();
3144 }
3145 
3146 template<typename T>
3147 void VmaPoolAllocator<T>::Clear()
3148 {
3149  for(size_t i = m_ItemBlocks.size(); i--; )
3150  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3151  m_ItemBlocks.clear();
3152 }
3153 
3154 template<typename T>
3155 T* VmaPoolAllocator<T>::Alloc()
3156 {
3157  for(size_t i = m_ItemBlocks.size(); i--; )
3158  {
3159  ItemBlock& block = m_ItemBlocks[i];
3160  // This block has some free items: Use first one.
3161  if(block.FirstFreeIndex != UINT32_MAX)
3162  {
3163  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3164  block.FirstFreeIndex = pItem->NextFreeIndex;
3165  return &pItem->Value;
3166  }
3167  }
3168 
3169  // No block has free item: Create new one and use it.
3170  ItemBlock& newBlock = CreateNewBlock();
3171  Item* const pItem = &newBlock.pItems[0];
3172  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3173  return &pItem->Value;
3174 }
3175 
3176 template<typename T>
3177 void VmaPoolAllocator<T>::Free(T* ptr)
3178 {
3179  // Search all memory blocks to find ptr.
3180  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3181  {
3182  ItemBlock& block = m_ItemBlocks[i];
3183 
3184  // Casting to union.
3185  Item* pItemPtr;
3186  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3187 
3188  // Check if pItemPtr is in address range of this block.
3189  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3190  {
3191  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3192  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3193  block.FirstFreeIndex = index;
3194  return;
3195  }
3196  }
3197  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3198 }
3199 
3200 template<typename T>
3201 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3202 {
3203  ItemBlock newBlock = {
3204  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3205 
3206  m_ItemBlocks.push_back(newBlock);
3207 
3208  // Setup singly-linked list of all free items in this block.
3209  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3210  newBlock.pItems[i].NextFreeIndex = i + 1;
3211  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3212  return m_ItemBlocks.back();
3213 }
3214 
3216 // class VmaRawList, VmaList
3217 
3218 #if VMA_USE_STL_LIST
3219 
3220 #define VmaList std::list
3221 
3222 #else // #if VMA_USE_STL_LIST
3223 
3224 template<typename T>
3225 struct VmaListItem
3226 {
3227  VmaListItem* pPrev;
3228  VmaListItem* pNext;
3229  T Value;
3230 };
3231 
3232 // Doubly linked list.
3233 template<typename T>
3234 class VmaRawList
3235 {
3236  VMA_CLASS_NO_COPY(VmaRawList)
3237 public:
3238  typedef VmaListItem<T> ItemType;
3239 
3240  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3241  ~VmaRawList();
3242  void Clear();
3243 
3244  size_t GetCount() const { return m_Count; }
3245  bool IsEmpty() const { return m_Count == 0; }
3246 
3247  ItemType* Front() { return m_pFront; }
3248  const ItemType* Front() const { return m_pFront; }
3249  ItemType* Back() { return m_pBack; }
3250  const ItemType* Back() const { return m_pBack; }
3251 
3252  ItemType* PushBack();
3253  ItemType* PushFront();
3254  ItemType* PushBack(const T& value);
3255  ItemType* PushFront(const T& value);
3256  void PopBack();
3257  void PopFront();
3258 
3259  // Item can be null - it means PushBack.
3260  ItemType* InsertBefore(ItemType* pItem);
3261  // Item can be null - it means PushFront.
3262  ItemType* InsertAfter(ItemType* pItem);
3263 
3264  ItemType* InsertBefore(ItemType* pItem, const T& value);
3265  ItemType* InsertAfter(ItemType* pItem, const T& value);
3266 
3267  void Remove(ItemType* pItem);
3268 
3269 private:
3270  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3271  VmaPoolAllocator<ItemType> m_ItemAllocator;
3272  ItemType* m_pFront;
3273  ItemType* m_pBack;
3274  size_t m_Count;
3275 };
3276 
3277 template<typename T>
3278 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3279  m_pAllocationCallbacks(pAllocationCallbacks),
3280  m_ItemAllocator(pAllocationCallbacks, 128),
3281  m_pFront(VMA_NULL),
3282  m_pBack(VMA_NULL),
3283  m_Count(0)
3284 {
3285 }
3286 
3287 template<typename T>
3288 VmaRawList<T>::~VmaRawList()
3289 {
3290  // Intentionally not calling Clear, because that would be unnecessary
3291  // computations to return all items to m_ItemAllocator as free.
3292 }
3293 
3294 template<typename T>
3295 void VmaRawList<T>::Clear()
3296 {
3297  if(IsEmpty() == false)
3298  {
3299  ItemType* pItem = m_pBack;
3300  while(pItem != VMA_NULL)
3301  {
3302  ItemType* const pPrevItem = pItem->pPrev;
3303  m_ItemAllocator.Free(pItem);
3304  pItem = pPrevItem;
3305  }
3306  m_pFront = VMA_NULL;
3307  m_pBack = VMA_NULL;
3308  m_Count = 0;
3309  }
3310 }
3311 
3312 template<typename T>
3313 VmaListItem<T>* VmaRawList<T>::PushBack()
3314 {
3315  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3316  pNewItem->pNext = VMA_NULL;
3317  if(IsEmpty())
3318  {
3319  pNewItem->pPrev = VMA_NULL;
3320  m_pFront = pNewItem;
3321  m_pBack = pNewItem;
3322  m_Count = 1;
3323  }
3324  else
3325  {
3326  pNewItem->pPrev = m_pBack;
3327  m_pBack->pNext = pNewItem;
3328  m_pBack = pNewItem;
3329  ++m_Count;
3330  }
3331  return pNewItem;
3332 }
3333 
3334 template<typename T>
3335 VmaListItem<T>* VmaRawList<T>::PushFront()
3336 {
3337  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3338  pNewItem->pPrev = VMA_NULL;
3339  if(IsEmpty())
3340  {
3341  pNewItem->pNext = VMA_NULL;
3342  m_pFront = pNewItem;
3343  m_pBack = pNewItem;
3344  m_Count = 1;
3345  }
3346  else
3347  {
3348  pNewItem->pNext = m_pFront;
3349  m_pFront->pPrev = pNewItem;
3350  m_pFront = pNewItem;
3351  ++m_Count;
3352  }
3353  return pNewItem;
3354 }
3355 
3356 template<typename T>
3357 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3358 {
3359  ItemType* const pNewItem = PushBack();
3360  pNewItem->Value = value;
3361  return pNewItem;
3362 }
3363 
3364 template<typename T>
3365 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3366 {
3367  ItemType* const pNewItem = PushFront();
3368  pNewItem->Value = value;
3369  return pNewItem;
3370 }
3371 
3372 template<typename T>
3373 void VmaRawList<T>::PopBack()
3374 {
3375  VMA_HEAVY_ASSERT(m_Count > 0);
3376  ItemType* const pBackItem = m_pBack;
3377  ItemType* const pPrevItem = pBackItem->pPrev;
3378  if(pPrevItem != VMA_NULL)
3379  {
3380  pPrevItem->pNext = VMA_NULL;
3381  }
3382  m_pBack = pPrevItem;
3383  m_ItemAllocator.Free(pBackItem);
3384  --m_Count;
3385 }
3386 
3387 template<typename T>
3388 void VmaRawList<T>::PopFront()
3389 {
3390  VMA_HEAVY_ASSERT(m_Count > 0);
3391  ItemType* const pFrontItem = m_pFront;
3392  ItemType* const pNextItem = pFrontItem->pNext;
3393  if(pNextItem != VMA_NULL)
3394  {
3395  pNextItem->pPrev = VMA_NULL;
3396  }
3397  m_pFront = pNextItem;
3398  m_ItemAllocator.Free(pFrontItem);
3399  --m_Count;
3400 }
3401 
3402 template<typename T>
3403 void VmaRawList<T>::Remove(ItemType* pItem)
3404 {
3405  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
3406  VMA_HEAVY_ASSERT(m_Count > 0);
3407 
3408  if(pItem->pPrev != VMA_NULL)
3409  {
3410  pItem->pPrev->pNext = pItem->pNext;
3411  }
3412  else
3413  {
3414  VMA_HEAVY_ASSERT(m_pFront == pItem);
3415  m_pFront = pItem->pNext;
3416  }
3417 
3418  if(pItem->pNext != VMA_NULL)
3419  {
3420  pItem->pNext->pPrev = pItem->pPrev;
3421  }
3422  else
3423  {
3424  VMA_HEAVY_ASSERT(m_pBack == pItem);
3425  m_pBack = pItem->pPrev;
3426  }
3427 
3428  m_ItemAllocator.Free(pItem);
3429  --m_Count;
3430 }
3431 
3432 template<typename T>
3433 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
3434 {
3435  if(pItem != VMA_NULL)
3436  {
3437  ItemType* const prevItem = pItem->pPrev;
3438  ItemType* const newItem = m_ItemAllocator.Alloc();
3439  newItem->pPrev = prevItem;
3440  newItem->pNext = pItem;
3441  pItem->pPrev = newItem;
3442  if(prevItem != VMA_NULL)
3443  {
3444  prevItem->pNext = newItem;
3445  }
3446  else
3447  {
3448  VMA_HEAVY_ASSERT(m_pFront == pItem);
3449  m_pFront = newItem;
3450  }
3451  ++m_Count;
3452  return newItem;
3453  }
3454  else
3455  return PushBack();
3456 }
3457 
3458 template<typename T>
3459 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3460 {
3461  if(pItem != VMA_NULL)
3462  {
3463  ItemType* const nextItem = pItem->pNext;
3464  ItemType* const newItem = m_ItemAllocator.Alloc();
3465  newItem->pNext = nextItem;
3466  newItem->pPrev = pItem;
3467  pItem->pNext = newItem;
3468  if(nextItem != VMA_NULL)
3469  {
3470  nextItem->pPrev = newItem;
3471  }
3472  else
3473  {
3474  VMA_HEAVY_ASSERT(m_pBack == pItem);
3475  m_pBack = newItem;
3476  }
3477  ++m_Count;
3478  return newItem;
3479  }
3480  else
3481  return PushFront();
3482 }
3483 
3484 template<typename T>
3485 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
3486 {
3487  ItemType* const newItem = InsertBefore(pItem);
3488  newItem->Value = value;
3489  return newItem;
3490 }
3491 
3492 template<typename T>
3493 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
3494 {
3495  ItemType* const newItem = InsertAfter(pItem);
3496  newItem->Value = value;
3497  return newItem;
3498 }
3499 
3500 template<typename T, typename AllocatorT>
3501 class VmaList
3502 {
3503  VMA_CLASS_NO_COPY(VmaList)
3504 public:
3505  class iterator
3506  {
3507  public:
3508  iterator() :
3509  m_pList(VMA_NULL),
3510  m_pItem(VMA_NULL)
3511  {
3512  }
3513 
3514  T& operator*() const
3515  {
3516  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3517  return m_pItem->Value;
3518  }
3519  T* operator->() const
3520  {
3521  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3522  return &m_pItem->Value;
3523  }
3524 
3525  iterator& operator++()
3526  {
3527  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3528  m_pItem = m_pItem->pNext;
3529  return *this;
3530  }
3531  iterator& operator--()
3532  {
3533  if(m_pItem != VMA_NULL)
3534  {
3535  m_pItem = m_pItem->pPrev;
3536  }
3537  else
3538  {
3539  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3540  m_pItem = m_pList->Back();
3541  }
3542  return *this;
3543  }
3544 
3545  iterator operator++(int)
3546  {
3547  iterator result = *this;
3548  ++*this;
3549  return result;
3550  }
3551  iterator operator--(int)
3552  {
3553  iterator result = *this;
3554  --*this;
3555  return result;
3556  }
3557 
3558  bool operator==(const iterator& rhs) const
3559  {
3560  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3561  return m_pItem == rhs.m_pItem;
3562  }
3563  bool operator!=(const iterator& rhs) const
3564  {
3565  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3566  return m_pItem != rhs.m_pItem;
3567  }
3568 
3569  private:
3570  VmaRawList<T>* m_pList;
3571  VmaListItem<T>* m_pItem;
3572 
3573  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
3574  m_pList(pList),
3575  m_pItem(pItem)
3576  {
3577  }
3578 
3579  friend class VmaList<T, AllocatorT>;
3580  };
3581 
3582  class const_iterator
3583  {
3584  public:
3585  const_iterator() :
3586  m_pList(VMA_NULL),
3587  m_pItem(VMA_NULL)
3588  {
3589  }
3590 
3591  const_iterator(const iterator& src) :
3592  m_pList(src.m_pList),
3593  m_pItem(src.m_pItem)
3594  {
3595  }
3596 
3597  const T& operator*() const
3598  {
3599  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3600  return m_pItem->Value;
3601  }
3602  const T* operator->() const
3603  {
3604  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3605  return &m_pItem->Value;
3606  }
3607 
3608  const_iterator& operator++()
3609  {
3610  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3611  m_pItem = m_pItem->pNext;
3612  return *this;
3613  }
3614  const_iterator& operator--()
3615  {
3616  if(m_pItem != VMA_NULL)
3617  {
3618  m_pItem = m_pItem->pPrev;
3619  }
3620  else
3621  {
3622  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3623  m_pItem = m_pList->Back();
3624  }
3625  return *this;
3626  }
3627 
3628  const_iterator operator++(int)
3629  {
3630  const_iterator result = *this;
3631  ++*this;
3632  return result;
3633  }
3634  const_iterator operator--(int)
3635  {
3636  const_iterator result = *this;
3637  --*this;
3638  return result;
3639  }
3640 
3641  bool operator==(const const_iterator& rhs) const
3642  {
3643  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3644  return m_pItem == rhs.m_pItem;
3645  }
3646  bool operator!=(const const_iterator& rhs) const
3647  {
3648  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3649  return m_pItem != rhs.m_pItem;
3650  }
3651 
3652  private:
3653  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
3654  m_pList(pList),
3655  m_pItem(pItem)
3656  {
3657  }
3658 
3659  const VmaRawList<T>* m_pList;
3660  const VmaListItem<T>* m_pItem;
3661 
3662  friend class VmaList<T, AllocatorT>;
3663  };
3664 
3665  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
3666 
3667  bool empty() const { return m_RawList.IsEmpty(); }
3668  size_t size() const { return m_RawList.GetCount(); }
3669 
3670  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
3671  iterator end() { return iterator(&m_RawList, VMA_NULL); }
3672 
3673  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
3674  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
3675 
3676  void clear() { m_RawList.Clear(); }
3677  void push_back(const T& value) { m_RawList.PushBack(value); }
3678  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
3679  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
3680 
3681 private:
3682  VmaRawList<T> m_RawList;
3683 };
3684 
3685 #endif // #if VMA_USE_STL_LIST
3686 
3688 // class VmaMap
3689 
3690 // Unused in this version.
3691 #if 0
3692 
3693 #if VMA_USE_STL_UNORDERED_MAP
3694 
3695 #define VmaPair std::pair
3696 
3697 #define VMA_MAP_TYPE(KeyT, ValueT) \
3698  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
3699 
3700 #else // #if VMA_USE_STL_UNORDERED_MAP
3701 
3702 template<typename T1, typename T2>
3703 struct VmaPair
3704 {
3705  T1 first;
3706  T2 second;
3707 
3708  VmaPair() : first(), second() { }
3709  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
3710 };
3711 
3712 /* Class compatible with subset of interface of std::unordered_map.
3713 KeyT, ValueT must be POD because they will be stored in VmaVector.
3714 */
3715 template<typename KeyT, typename ValueT>
3716 class VmaMap
3717 {
3718 public:
3719  typedef VmaPair<KeyT, ValueT> PairType;
3720  typedef PairType* iterator;
3721 
3722  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
3723 
3724  iterator begin() { return m_Vector.begin(); }
3725  iterator end() { return m_Vector.end(); }
3726 
3727  void insert(const PairType& pair);
3728  iterator find(const KeyT& key);
3729  void erase(iterator it);
3730 
3731 private:
3732  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
3733 };
3734 
3735 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
3736 
3737 template<typename FirstT, typename SecondT>
3738 struct VmaPairFirstLess
3739 {
3740  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
3741  {
3742  return lhs.first < rhs.first;
3743  }
3744  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
3745  {
3746  return lhs.first < rhsFirst;
3747  }
3748 };
3749 
3750 template<typename KeyT, typename ValueT>
3751 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
3752 {
3753  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3754  m_Vector.data(),
3755  m_Vector.data() + m_Vector.size(),
3756  pair,
3757  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3758  VmaVectorInsert(m_Vector, indexToInsert, pair);
3759 }
3760 
3761 template<typename KeyT, typename ValueT>
3762 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3763 {
3764  PairType* it = VmaBinaryFindFirstNotLess(
3765  m_Vector.data(),
3766  m_Vector.data() + m_Vector.size(),
3767  key,
3768  VmaPairFirstLess<KeyT, ValueT>());
3769  if((it != m_Vector.end()) && (it->first == key))
3770  {
3771  return it;
3772  }
3773  else
3774  {
3775  return m_Vector.end();
3776  }
3777 }
3778 
3779 template<typename KeyT, typename ValueT>
3780 void VmaMap<KeyT, ValueT>::erase(iterator it)
3781 {
3782  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3783 }
3784 
3785 #endif // #if VMA_USE_STL_UNORDERED_MAP
3786 
3787 #endif // #if 0
3788 
3790 
3791 class VmaDeviceMemoryBlock;
3792 
3793 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
3794 
3795 struct VmaAllocation_T
3796 {
3797  VMA_CLASS_NO_COPY(VmaAllocation_T)
3798 private:
3799  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3800 
3801  enum FLAGS
3802  {
3803  FLAG_USER_DATA_STRING = 0x01,
3804  };
3805 
3806 public:
3807  enum ALLOCATION_TYPE
3808  {
3809  ALLOCATION_TYPE_NONE,
3810  ALLOCATION_TYPE_BLOCK,
3811  ALLOCATION_TYPE_DEDICATED,
3812  };
3813 
3814  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3815  m_Alignment(1),
3816  m_Size(0),
3817  m_pUserData(VMA_NULL),
3818  m_LastUseFrameIndex(currentFrameIndex),
3819  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3820  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3821  m_MapCount(0),
3822  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3823  {
3824 #if VMA_STATS_STRING_ENABLED
3825  m_CreationFrameIndex = currentFrameIndex;
3826  m_BufferImageUsage = 0;
3827 #endif
3828  }
3829 
3830  ~VmaAllocation_T()
3831  {
3832  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3833 
3834  // Check if owned string was freed.
3835  VMA_ASSERT(m_pUserData == VMA_NULL);
3836  }
3837 
3838  void InitBlockAllocation(
3839  VmaPool hPool,
3840  VmaDeviceMemoryBlock* block,
3841  VkDeviceSize offset,
3842  VkDeviceSize alignment,
3843  VkDeviceSize size,
3844  VmaSuballocationType suballocationType,
3845  bool mapped,
3846  bool canBecomeLost)
3847  {
3848  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3849  VMA_ASSERT(block != VMA_NULL);
3850  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3851  m_Alignment = alignment;
3852  m_Size = size;
3853  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3854  m_SuballocationType = (uint8_t)suballocationType;
3855  m_BlockAllocation.m_hPool = hPool;
3856  m_BlockAllocation.m_Block = block;
3857  m_BlockAllocation.m_Offset = offset;
3858  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3859  }
3860 
3861  void InitLost()
3862  {
3863  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3864  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3865  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3866  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3867  m_BlockAllocation.m_Block = VMA_NULL;
3868  m_BlockAllocation.m_Offset = 0;
3869  m_BlockAllocation.m_CanBecomeLost = true;
3870  }
3871 
3872  void ChangeBlockAllocation(
3873  VmaAllocator hAllocator,
3874  VmaDeviceMemoryBlock* block,
3875  VkDeviceSize offset);
3876 
3877  // pMappedData not null means allocation is created with MAPPED flag.
3878  void InitDedicatedAllocation(
3879  uint32_t memoryTypeIndex,
3880  VkDeviceMemory hMemory,
3881  VmaSuballocationType suballocationType,
3882  void* pMappedData,
3883  VkDeviceSize size)
3884  {
3885  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3886  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3887  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3888  m_Alignment = 0;
3889  m_Size = size;
3890  m_SuballocationType = (uint8_t)suballocationType;
3891  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3892  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3893  m_DedicatedAllocation.m_hMemory = hMemory;
3894  m_DedicatedAllocation.m_pMappedData = pMappedData;
3895  }
3896 
3897  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3898  VkDeviceSize GetAlignment() const { return m_Alignment; }
3899  VkDeviceSize GetSize() const { return m_Size; }
3900  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3901  void* GetUserData() const { return m_pUserData; }
3902  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3903  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3904 
3905  VmaDeviceMemoryBlock* GetBlock() const
3906  {
3907  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3908  return m_BlockAllocation.m_Block;
3909  }
3910  VkDeviceSize GetOffset() const;
3911  VkDeviceMemory GetMemory() const;
3912  uint32_t GetMemoryTypeIndex() const;
3913  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3914  void* GetMappedData() const;
3915  bool CanBecomeLost() const;
3916  VmaPool GetPool() const;
3917 
3918  uint32_t GetLastUseFrameIndex() const
3919  {
3920  return m_LastUseFrameIndex.load();
3921  }
3922  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3923  {
3924  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3925  }
3926  /*
3927  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3928  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3929  - Else, returns false.
3930 
3931  If hAllocation is already lost, assert - you should not call it then.
3932  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3933  */
3934  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3935 
3936  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3937  {
3938  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3939  outInfo.blockCount = 1;
3940  outInfo.allocationCount = 1;
3941  outInfo.unusedRangeCount = 0;
3942  outInfo.usedBytes = m_Size;
3943  outInfo.unusedBytes = 0;
3944  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3945  outInfo.unusedRangeSizeMin = UINT64_MAX;
3946  outInfo.unusedRangeSizeMax = 0;
3947  }
3948 
3949  void BlockAllocMap();
3950  void BlockAllocUnmap();
3951  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3952  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3953 
3954 #if VMA_STATS_STRING_ENABLED
3955  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
3956  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
3957 
3958  void InitBufferImageUsage(uint32_t bufferImageUsage)
3959  {
3960  VMA_ASSERT(m_BufferImageUsage == 0);
3961  m_BufferImageUsage = bufferImageUsage;
3962  }
3963 
3964  void PrintParameters(class VmaJsonWriter& json) const;
3965 #endif
3966 
3967 private:
3968  VkDeviceSize m_Alignment;
3969  VkDeviceSize m_Size;
3970  void* m_pUserData;
3971  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3972  uint8_t m_Type; // ALLOCATION_TYPE
3973  uint8_t m_SuballocationType; // VmaSuballocationType
3974  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3975  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
3976  uint8_t m_MapCount;
3977  uint8_t m_Flags; // enum FLAGS
3978 
3979  // Allocation out of VmaDeviceMemoryBlock.
3980  struct BlockAllocation
3981  {
3982  VmaPool m_hPool; // Null if belongs to general memory.
3983  VmaDeviceMemoryBlock* m_Block;
3984  VkDeviceSize m_Offset;
3985  bool m_CanBecomeLost;
3986  };
3987 
3988  // Allocation for an object that has its own private VkDeviceMemory.
3989  struct DedicatedAllocation
3990  {
3991  uint32_t m_MemoryTypeIndex;
3992  VkDeviceMemory m_hMemory;
3993  void* m_pMappedData; // Not null means memory is mapped.
3994  };
3995 
3996  union
3997  {
3998  // Allocation out of VmaDeviceMemoryBlock.
3999  BlockAllocation m_BlockAllocation;
4000  // Allocation for an object that has its own private VkDeviceMemory.
4001  DedicatedAllocation m_DedicatedAllocation;
4002  };
4003 
4004 #if VMA_STATS_STRING_ENABLED
4005  uint32_t m_CreationFrameIndex;
4006  uint32_t m_BufferImageUsage; // 0 if unknown.
4007 #endif
4008 
4009  void FreeUserDataString(VmaAllocator hAllocator);
4010 };
4011 
4012 /*
4013 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
4014 allocated memory block or free.
4015 */
4016 struct VmaSuballocation
4017 {
4018  VkDeviceSize offset;
4019  VkDeviceSize size;
4020  VmaAllocation hAllocation;
4021  VmaSuballocationType type;
4022 };
4023 
4024 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
4025 
4026 // Cost of one additional allocation lost, as equivalent in bytes.
4027 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
4028 
4029 /*
4030 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
4031 
4032 If canMakeOtherLost was false:
4033 - item points to a FREE suballocation.
4034 - itemsToMakeLostCount is 0.
4035 
4036 If canMakeOtherLost was true:
4037 - item points to first of sequence of suballocations, which are either FREE,
4038  or point to VmaAllocations that can become lost.
4039 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
4040  the requested allocation to succeed.
4041 */
4042 struct VmaAllocationRequest
4043 {
4044  VkDeviceSize offset;
4045  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
4046  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
4047  VmaSuballocationList::iterator item;
4048  size_t itemsToMakeLostCount;
4049 
4050  VkDeviceSize CalcCost() const
4051  {
4052  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
4053  }
4054 };
4055 
4056 /*
4057 Data structure used for bookkeeping of allocations and unused ranges of memory
4058 in a single VkDeviceMemory block.
4059 */
4060 class VmaBlockMetadata
4061 {
4062  VMA_CLASS_NO_COPY(VmaBlockMetadata)
4063 public:
4064  VmaBlockMetadata(VmaAllocator hAllocator);
4065  ~VmaBlockMetadata();
4066  void Init(VkDeviceSize size);
4067 
4068  // Validates all data structures inside this object. If not valid, returns false.
4069  bool Validate() const;
4070  VkDeviceSize GetSize() const { return m_Size; }
4071  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
4072  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4073  VkDeviceSize GetUnusedRangeSizeMax() const;
4074  // Returns true if this block is empty - contains only single free suballocation.
4075  bool IsEmpty() const;
4076 
4077  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4078  void AddPoolStats(VmaPoolStats& inoutStats) const;
4079 
4080 #if VMA_STATS_STRING_ENABLED
4081  void PrintDetailedMap(class VmaJsonWriter& json) const;
4082 #endif
4083 
4084  // Creates trivial request for case when block is empty.
4085  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
4086 
4087  // Tries to find a place for suballocation with given parameters inside this block.
4088  // If succeeded, fills pAllocationRequest and returns true.
4089  // If failed, returns false.
4090  bool CreateAllocationRequest(
4091  uint32_t currentFrameIndex,
4092  uint32_t frameInUseCount,
4093  VkDeviceSize bufferImageGranularity,
4094  VkDeviceSize allocSize,
4095  VkDeviceSize allocAlignment,
4096  VmaSuballocationType allocType,
4097  bool canMakeOtherLost,
4098  VmaAllocationRequest* pAllocationRequest);
4099 
4100  bool MakeRequestedAllocationsLost(
4101  uint32_t currentFrameIndex,
4102  uint32_t frameInUseCount,
4103  VmaAllocationRequest* pAllocationRequest);
4104 
4105  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4106 
4107  // Makes actual allocation based on request. Request must already be checked and valid.
4108  void Alloc(
4109  const VmaAllocationRequest& request,
4110  VmaSuballocationType type,
4111  VkDeviceSize allocSize,
4112  VmaAllocation hAllocation);
4113 
4114  // Frees suballocation assigned to given memory region.
4115  void Free(const VmaAllocation allocation);
4116  void FreeAtOffset(VkDeviceSize offset);
4117 
4118 private:
4119  VkDeviceSize m_Size;
4120  uint32_t m_FreeCount;
4121  VkDeviceSize m_SumFreeSize;
4122  VmaSuballocationList m_Suballocations;
4123  // Suballocations that are free and have size greater than certain threshold.
4124  // Sorted by size, ascending.
4125  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4126 
4127  bool ValidateFreeSuballocationList() const;
4128 
4129  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4130  // If yes, fills pOffset and returns true. If no, returns false.
4131  bool CheckAllocation(
4132  uint32_t currentFrameIndex,
4133  uint32_t frameInUseCount,
4134  VkDeviceSize bufferImageGranularity,
4135  VkDeviceSize allocSize,
4136  VkDeviceSize allocAlignment,
4137  VmaSuballocationType allocType,
4138  VmaSuballocationList::const_iterator suballocItem,
4139  bool canMakeOtherLost,
4140  VkDeviceSize* pOffset,
4141  size_t* itemsToMakeLostCount,
4142  VkDeviceSize* pSumFreeSize,
4143  VkDeviceSize* pSumItemSize) const;
4144  // Given free suballocation, it merges it with following one, which must also be free.
4145  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4146  // Releases given suballocation, making it free.
4147  // Merges it with adjacent free suballocations if applicable.
4148  // Returns iterator to new free suballocation at this place.
4149  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4150  // Given free suballocation, it inserts it into sorted list of
4151  // m_FreeSuballocationsBySize if it's suitable.
4152  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4153  // Given free suballocation, it removes it from sorted list of
4154  // m_FreeSuballocationsBySize if it's suitable.
4155  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4156 };
4157 
4158 /*
4159 Represents a single block of device memory (`VkDeviceMemory`) with all the
4160 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
4161 
4162 Thread-safety: This class must be externally synchronized.
4163 */
4164 class VmaDeviceMemoryBlock
4165 {
4166  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
4167 public:
4168  VmaBlockMetadata m_Metadata;
4169 
4170  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
4171 
4172  ~VmaDeviceMemoryBlock()
4173  {
4174  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
4175  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
4176  }
4177 
4178  // Always call after construction.
4179  void Init(
4180  uint32_t newMemoryTypeIndex,
4181  VkDeviceMemory newMemory,
4182  VkDeviceSize newSize,
4183  uint32_t id);
4184  // Always call before destruction.
4185  void Destroy(VmaAllocator allocator);
4186 
4187  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
4188  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
4189  uint32_t GetId() const { return m_Id; }
4190  void* GetMappedData() const { return m_pMappedData; }
4191 
4192  // Validates all data structures inside this object. If not valid, returns false.
4193  bool Validate() const;
4194 
4195  // ppData can be null.
4196  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
4197  void Unmap(VmaAllocator hAllocator, uint32_t count);
4198 
4199  VkResult BindBufferMemory(
4200  const VmaAllocator hAllocator,
4201  const VmaAllocation hAllocation,
4202  VkBuffer hBuffer);
4203  VkResult BindImageMemory(
4204  const VmaAllocator hAllocator,
4205  const VmaAllocation hAllocation,
4206  VkImage hImage);
4207 
4208 private:
4209  uint32_t m_MemoryTypeIndex;
4210  uint32_t m_Id;
4211  VkDeviceMemory m_hMemory;
4212 
4213  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
4214  // Also protects m_MapCount, m_pMappedData.
4215  VMA_MUTEX m_Mutex;
4216  uint32_t m_MapCount;
4217  void* m_pMappedData;
4218 };
4219 
4220 struct VmaPointerLess
4221 {
4222  bool operator()(const void* lhs, const void* rhs) const
4223  {
4224  return lhs < rhs;
4225  }
4226 };
4227 
4228 class VmaDefragmentator;
4229 
4230 /*
4231 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
4232 Vulkan memory type.
4233 
4234 Synchronized internally with a mutex.
4235 */
4236 struct VmaBlockVector
4237 {
4238  VMA_CLASS_NO_COPY(VmaBlockVector)
4239 public:
4240  VmaBlockVector(
4241  VmaAllocator hAllocator,
4242  uint32_t memoryTypeIndex,
4243  VkDeviceSize preferredBlockSize,
4244  size_t minBlockCount,
4245  size_t maxBlockCount,
4246  VkDeviceSize bufferImageGranularity,
4247  uint32_t frameInUseCount,
4248  bool isCustomPool);
4249  ~VmaBlockVector();
4250 
4251  VkResult CreateMinBlocks();
4252 
4253  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
4254  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
4255  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
4256  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
4257 
4258  void GetPoolStats(VmaPoolStats* pStats);
4259 
4260  bool IsEmpty() const { return m_Blocks.empty(); }
4261 
4262  VkResult Allocate(
4263  VmaPool hCurrentPool,
4264  uint32_t currentFrameIndex,
4265  VkDeviceSize size,
4266  VkDeviceSize alignment,
4267  const VmaAllocationCreateInfo& createInfo,
4268  VmaSuballocationType suballocType,
4269  VmaAllocation* pAllocation);
4270 
4271  void Free(
4272  VmaAllocation hAllocation);
4273 
4274  // Adds statistics of this BlockVector to pStats.
4275  void AddStats(VmaStats* pStats);
4276 
4277 #if VMA_STATS_STRING_ENABLED
4278  void PrintDetailedMap(class VmaJsonWriter& json);
4279 #endif
4280 
4281  void MakePoolAllocationsLost(
4282  uint32_t currentFrameIndex,
4283  size_t* pLostAllocationCount);
4284 
4285  VmaDefragmentator* EnsureDefragmentator(
4286  VmaAllocator hAllocator,
4287  uint32_t currentFrameIndex);
4288 
4289  VkResult Defragment(
4290  VmaDefragmentationStats* pDefragmentationStats,
4291  VkDeviceSize& maxBytesToMove,
4292  uint32_t& maxAllocationsToMove);
4293 
4294  void DestroyDefragmentator();
4295 
4296 private:
4297  friend class VmaDefragmentator;
4298 
4299  const VmaAllocator m_hAllocator;
4300  const uint32_t m_MemoryTypeIndex;
4301  const VkDeviceSize m_PreferredBlockSize;
4302  const size_t m_MinBlockCount;
4303  const size_t m_MaxBlockCount;
4304  const VkDeviceSize m_BufferImageGranularity;
4305  const uint32_t m_FrameInUseCount;
4306  const bool m_IsCustomPool;
4307  VMA_MUTEX m_Mutex;
4308  // Incrementally sorted by sumFreeSize, ascending.
4309  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
4310  /* There can be at most one allocation that is completely empty - a
4311  hysteresis to avoid pessimistic case of alternating creation and destruction
4312  of a VkDeviceMemory. */
4313  bool m_HasEmptyBlock;
4314  VmaDefragmentator* m_pDefragmentator;
4315  uint32_t m_NextBlockId;
4316 
4317  VkDeviceSize CalcMaxBlockSize() const;
4318 
4319  // Finds and removes given block from vector.
4320  void Remove(VmaDeviceMemoryBlock* pBlock);
4321 
4322  // Performs single step in sorting m_Blocks. They may not be fully sorted
4323  // after this call.
4324  void IncrementallySortBlocks();
4325 
4326  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
4327 };
4328 
4329 struct VmaPool_T
4330 {
4331  VMA_CLASS_NO_COPY(VmaPool_T)
4332 public:
4333  VmaBlockVector m_BlockVector;
4334 
4335  VmaPool_T(
4336  VmaAllocator hAllocator,
4337  const VmaPoolCreateInfo& createInfo);
4338  ~VmaPool_T();
4339 
4340  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
4341  uint32_t GetId() const { return m_Id; }
4342  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
4343 
4344 #if VMA_STATS_STRING_ENABLED
4345  //void PrintDetailedMap(class VmaStringBuilder& sb);
4346 #endif
4347 
4348 private:
4349  uint32_t m_Id;
4350 };
4351 
4352 class VmaDefragmentator
4353 {
4354  VMA_CLASS_NO_COPY(VmaDefragmentator)
4355 private:
4356  const VmaAllocator m_hAllocator;
4357  VmaBlockVector* const m_pBlockVector;
4358  uint32_t m_CurrentFrameIndex;
4359  VkDeviceSize m_BytesMoved;
4360  uint32_t m_AllocationsMoved;
4361 
4362  struct AllocationInfo
4363  {
4364  VmaAllocation m_hAllocation;
4365  VkBool32* m_pChanged;
4366 
4367  AllocationInfo() :
4368  m_hAllocation(VK_NULL_HANDLE),
4369  m_pChanged(VMA_NULL)
4370  {
4371  }
4372  };
4373 
4374  struct AllocationInfoSizeGreater
4375  {
4376  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
4377  {
4378  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
4379  }
4380  };
4381 
4382  // Used between AddAllocation and Defragment.
4383  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4384 
4385  struct BlockInfo
4386  {
4387  VmaDeviceMemoryBlock* m_pBlock;
4388  bool m_HasNonMovableAllocations;
4389  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4390 
4391  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
4392  m_pBlock(VMA_NULL),
4393  m_HasNonMovableAllocations(true),
4394  m_Allocations(pAllocationCallbacks),
4395  m_pMappedDataForDefragmentation(VMA_NULL)
4396  {
4397  }
4398 
4399  void CalcHasNonMovableAllocations()
4400  {
4401  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
4402  const size_t defragmentAllocCount = m_Allocations.size();
4403  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
4404  }
4405 
4406  void SortAllocationsBySizeDescecnding()
4407  {
4408  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
4409  }
4410 
4411  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
4412  void Unmap(VmaAllocator hAllocator);
4413 
4414  private:
4415  // Not null if mapped for defragmentation only, not originally mapped.
4416  void* m_pMappedDataForDefragmentation;
4417  };
4418 
4419  struct BlockPointerLess
4420  {
4421  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
4422  {
4423  return pLhsBlockInfo->m_pBlock < pRhsBlock;
4424  }
4425  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4426  {
4427  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
4428  }
4429  };
4430 
4431  // 1. Blocks with some non-movable allocations go first.
4432  // 2. Blocks with smaller sumFreeSize go first.
4433  struct BlockInfoCompareMoveDestination
4434  {
4435  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4436  {
4437  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
4438  {
4439  return true;
4440  }
4441  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
4442  {
4443  return false;
4444  }
4445  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
4446  {
4447  return true;
4448  }
4449  return false;
4450  }
4451  };
4452 
4453  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
4454  BlockInfoVector m_Blocks;
4455 
4456  VkResult DefragmentRound(
4457  VkDeviceSize maxBytesToMove,
4458  uint32_t maxAllocationsToMove);
4459 
4460  static bool MoveMakesSense(
4461  size_t dstBlockIndex, VkDeviceSize dstOffset,
4462  size_t srcBlockIndex, VkDeviceSize srcOffset);
4463 
4464 public:
4465  VmaDefragmentator(
4466  VmaAllocator hAllocator,
4467  VmaBlockVector* pBlockVector,
4468  uint32_t currentFrameIndex);
4469 
4470  ~VmaDefragmentator();
4471 
4472  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
4473  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
4474 
4475  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
4476 
4477  VkResult Defragment(
4478  VkDeviceSize maxBytesToMove,
4479  uint32_t maxAllocationsToMove);
4480 };
4481 
4482 // Main allocator object.
4483 struct VmaAllocator_T
4484 {
4485  VMA_CLASS_NO_COPY(VmaAllocator_T)
4486 public:
4487  bool m_UseMutex;
4488  bool m_UseKhrDedicatedAllocation;
4489  VkDevice m_hDevice;
4490  bool m_AllocationCallbacksSpecified;
4491  VkAllocationCallbacks m_AllocationCallbacks;
4492  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
4493 
4494  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
4495  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
4496  VMA_MUTEX m_HeapSizeLimitMutex;
4497 
4498  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
4499  VkPhysicalDeviceMemoryProperties m_MemProps;
4500 
4501  // Default pools.
4502  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
4503 
4504  // Each vector is sorted by memory (handle value).
4505  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
4506  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
4507  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
4508 
4509  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
4510  ~VmaAllocator_T();
4511 
4512  const VkAllocationCallbacks* GetAllocationCallbacks() const
4513  {
4514  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
4515  }
4516  const VmaVulkanFunctions& GetVulkanFunctions() const
4517  {
4518  return m_VulkanFunctions;
4519  }
4520 
4521  VkDeviceSize GetBufferImageGranularity() const
4522  {
4523  return VMA_MAX(
4524  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
4525  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
4526  }
4527 
4528  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
4529  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
4530 
4531  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
4532  {
4533  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
4534  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
4535  }
4536  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
4537  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
4538  {
4539  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
4540  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
4541  }
4542  // Minimum alignment for all allocations in specific memory type.
4543  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
4544  {
4545  return IsMemoryTypeNonCoherent(memTypeIndex) ?
4546  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
4547  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
4548  }
4549 
4550  bool IsIntegratedGpu() const
4551  {
4552  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
4553  }
4554 
4555  void GetBufferMemoryRequirements(
4556  VkBuffer hBuffer,
4557  VkMemoryRequirements& memReq,
4558  bool& requiresDedicatedAllocation,
4559  bool& prefersDedicatedAllocation) const;
4560  void GetImageMemoryRequirements(
4561  VkImage hImage,
4562  VkMemoryRequirements& memReq,
4563  bool& requiresDedicatedAllocation,
4564  bool& prefersDedicatedAllocation) const;
4565 
4566  // Main allocation function.
4567  VkResult AllocateMemory(
4568  const VkMemoryRequirements& vkMemReq,
4569  bool requiresDedicatedAllocation,
4570  bool prefersDedicatedAllocation,
4571  VkBuffer dedicatedBuffer,
4572  VkImage dedicatedImage,
4573  const VmaAllocationCreateInfo& createInfo,
4574  VmaSuballocationType suballocType,
4575  VmaAllocation* pAllocation);
4576 
4577  // Main deallocation function.
4578  void FreeMemory(const VmaAllocation allocation);
4579 
4580  void CalculateStats(VmaStats* pStats);
4581 
4582 #if VMA_STATS_STRING_ENABLED
4583  void PrintDetailedMap(class VmaJsonWriter& json);
4584 #endif
4585 
4586  VkResult Defragment(
4587  VmaAllocation* pAllocations,
4588  size_t allocationCount,
4589  VkBool32* pAllocationsChanged,
4590  const VmaDefragmentationInfo* pDefragmentationInfo,
4591  VmaDefragmentationStats* pDefragmentationStats);
4592 
4593  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
4594  bool TouchAllocation(VmaAllocation hAllocation);
4595 
4596  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
4597  void DestroyPool(VmaPool pool);
4598  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
4599 
4600  void SetCurrentFrameIndex(uint32_t frameIndex);
4601 
4602  void MakePoolAllocationsLost(
4603  VmaPool hPool,
4604  size_t* pLostAllocationCount);
4605 
4606  void CreateLostAllocation(VmaAllocation* pAllocation);
4607 
4608  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
4609  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
4610 
4611  VkResult Map(VmaAllocation hAllocation, void** ppData);
4612  void Unmap(VmaAllocation hAllocation);
4613 
4614  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
4615  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
4616 
4617  void FlushOrInvalidateAllocation(
4618  VmaAllocation hAllocation,
4619  VkDeviceSize offset, VkDeviceSize size,
4620  VMA_CACHE_OPERATION op);
4621 
4622 private:
4623  VkDeviceSize m_PreferredLargeHeapBlockSize;
4624 
4625  VkPhysicalDevice m_PhysicalDevice;
4626  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
4627 
4628  VMA_MUTEX m_PoolsMutex;
4629  // Protected by m_PoolsMutex. Sorted by pointer value.
4630  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
4631  uint32_t m_NextPoolId;
4632 
4633  VmaVulkanFunctions m_VulkanFunctions;
4634 
4635  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
4636 
4637  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
4638 
4639  VkResult AllocateMemoryOfType(
4640  VkDeviceSize size,
4641  VkDeviceSize alignment,
4642  bool dedicatedAllocation,
4643  VkBuffer dedicatedBuffer,
4644  VkImage dedicatedImage,
4645  const VmaAllocationCreateInfo& createInfo,
4646  uint32_t memTypeIndex,
4647  VmaSuballocationType suballocType,
4648  VmaAllocation* pAllocation);
4649 
4650  // Allocates and registers new VkDeviceMemory specifically for single allocation.
4651  VkResult AllocateDedicatedMemory(
4652  VkDeviceSize size,
4653  VmaSuballocationType suballocType,
4654  uint32_t memTypeIndex,
4655  bool map,
4656  bool isUserDataString,
4657  void* pUserData,
4658  VkBuffer dedicatedBuffer,
4659  VkImage dedicatedImage,
4660  VmaAllocation* pAllocation);
4661 
4662  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
4663  void FreeDedicatedMemory(VmaAllocation allocation);
4664 };
4665 
4667 // Memory allocation #2 after VmaAllocator_T definition
4668 
4669 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
4670 {
4671  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
4672 }
4673 
4674 static void VmaFree(VmaAllocator hAllocator, void* ptr)
4675 {
4676  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
4677 }
4678 
4679 template<typename T>
4680 static T* VmaAllocate(VmaAllocator hAllocator)
4681 {
4682  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
4683 }
4684 
4685 template<typename T>
4686 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
4687 {
4688  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
4689 }
4690 
4691 template<typename T>
4692 static void vma_delete(VmaAllocator hAllocator, T* ptr)
4693 {
4694  if(ptr != VMA_NULL)
4695  {
4696  ptr->~T();
4697  VmaFree(hAllocator, ptr);
4698  }
4699 }
4700 
4701 template<typename T>
4702 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
4703 {
4704  if(ptr != VMA_NULL)
4705  {
4706  for(size_t i = count; i--; )
4707  ptr[i].~T();
4708  VmaFree(hAllocator, ptr);
4709  }
4710 }
4711 
4713 // VmaStringBuilder
4714 
4715 #if VMA_STATS_STRING_ENABLED
4716 
4717 class VmaStringBuilder
4718 {
4719 public:
4720  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
4721  size_t GetLength() const { return m_Data.size(); }
4722  const char* GetData() const { return m_Data.data(); }
4723 
4724  void Add(char ch) { m_Data.push_back(ch); }
4725  void Add(const char* pStr);
4726  void AddNewLine() { Add('\n'); }
4727  void AddNumber(uint32_t num);
4728  void AddNumber(uint64_t num);
4729  void AddPointer(const void* ptr);
4730 
4731 private:
4732  VmaVector< char, VmaStlAllocator<char> > m_Data;
4733 };
4734 
4735 void VmaStringBuilder::Add(const char* pStr)
4736 {
4737  const size_t strLen = strlen(pStr);
4738  if(strLen > 0)
4739  {
4740  const size_t oldCount = m_Data.size();
4741  m_Data.resize(oldCount + strLen);
4742  memcpy(m_Data.data() + oldCount, pStr, strLen);
4743  }
4744 }
4745 
4746 void VmaStringBuilder::AddNumber(uint32_t num)
4747 {
4748  char buf[11];
4749  VmaUint32ToStr(buf, sizeof(buf), num);
4750  Add(buf);
4751 }
4752 
4753 void VmaStringBuilder::AddNumber(uint64_t num)
4754 {
4755  char buf[21];
4756  VmaUint64ToStr(buf, sizeof(buf), num);
4757  Add(buf);
4758 }
4759 
4760 void VmaStringBuilder::AddPointer(const void* ptr)
4761 {
4762  char buf[21];
4763  VmaPtrToStr(buf, sizeof(buf), ptr);
4764  Add(buf);
4765 }
4766 
4767 #endif // #if VMA_STATS_STRING_ENABLED
4768 
4770 // VmaJsonWriter
4771 
4772 #if VMA_STATS_STRING_ENABLED
4773 
4774 class VmaJsonWriter
4775 {
4776  VMA_CLASS_NO_COPY(VmaJsonWriter)
4777 public:
4778  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
4779  ~VmaJsonWriter();
4780 
4781  void BeginObject(bool singleLine = false);
4782  void EndObject();
4783 
4784  void BeginArray(bool singleLine = false);
4785  void EndArray();
4786 
4787  void WriteString(const char* pStr);
4788  void BeginString(const char* pStr = VMA_NULL);
4789  void ContinueString(const char* pStr);
4790  void ContinueString(uint32_t n);
4791  void ContinueString(uint64_t n);
4792  void ContinueString_Pointer(const void* ptr);
4793  void EndString(const char* pStr = VMA_NULL);
4794 
4795  void WriteNumber(uint32_t n);
4796  void WriteNumber(uint64_t n);
4797  void WriteBool(bool b);
4798  void WriteNull();
4799 
4800 private:
4801  static const char* const INDENT;
4802 
4803  enum COLLECTION_TYPE
4804  {
4805  COLLECTION_TYPE_OBJECT,
4806  COLLECTION_TYPE_ARRAY,
4807  };
4808  struct StackItem
4809  {
4810  COLLECTION_TYPE type;
4811  uint32_t valueCount;
4812  bool singleLineMode;
4813  };
4814 
4815  VmaStringBuilder& m_SB;
4816  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
4817  bool m_InsideString;
4818 
4819  void BeginValue(bool isString);
4820  void WriteIndent(bool oneLess = false);
4821 };
4822 
4823 const char* const VmaJsonWriter::INDENT = " ";
4824 
4825 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
4826  m_SB(sb),
4827  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4828  m_InsideString(false)
4829 {
4830 }
4831 
4832 VmaJsonWriter::~VmaJsonWriter()
4833 {
4834  VMA_ASSERT(!m_InsideString);
4835  VMA_ASSERT(m_Stack.empty());
4836 }
4837 
4838 void VmaJsonWriter::BeginObject(bool singleLine)
4839 {
4840  VMA_ASSERT(!m_InsideString);
4841 
4842  BeginValue(false);
4843  m_SB.Add('{');
4844 
4845  StackItem item;
4846  item.type = COLLECTION_TYPE_OBJECT;
4847  item.valueCount = 0;
4848  item.singleLineMode = singleLine;
4849  m_Stack.push_back(item);
4850 }
4851 
4852 void VmaJsonWriter::EndObject()
4853 {
4854  VMA_ASSERT(!m_InsideString);
4855 
4856  WriteIndent(true);
4857  m_SB.Add('}');
4858 
4859  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4860  m_Stack.pop_back();
4861 }
4862 
4863 void VmaJsonWriter::BeginArray(bool singleLine)
4864 {
4865  VMA_ASSERT(!m_InsideString);
4866 
4867  BeginValue(false);
4868  m_SB.Add('[');
4869 
4870  StackItem item;
4871  item.type = COLLECTION_TYPE_ARRAY;
4872  item.valueCount = 0;
4873  item.singleLineMode = singleLine;
4874  m_Stack.push_back(item);
4875 }
4876 
4877 void VmaJsonWriter::EndArray()
4878 {
4879  VMA_ASSERT(!m_InsideString);
4880 
4881  WriteIndent(true);
4882  m_SB.Add(']');
4883 
4884  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4885  m_Stack.pop_back();
4886 }
4887 
4888 void VmaJsonWriter::WriteString(const char* pStr)
4889 {
4890  BeginString(pStr);
4891  EndString();
4892 }
4893 
4894 void VmaJsonWriter::BeginString(const char* pStr)
4895 {
4896  VMA_ASSERT(!m_InsideString);
4897 
4898  BeginValue(true);
4899  m_SB.Add('"');
4900  m_InsideString = true;
4901  if(pStr != VMA_NULL && pStr[0] != '\0')
4902  {
4903  ContinueString(pStr);
4904  }
4905 }
4906 
4907 void VmaJsonWriter::ContinueString(const char* pStr)
4908 {
4909  VMA_ASSERT(m_InsideString);
4910 
4911  const size_t strLen = strlen(pStr);
4912  for(size_t i = 0; i < strLen; ++i)
4913  {
4914  char ch = pStr[i];
4915  if(ch == '\'')
4916  {
4917  m_SB.Add("\\\\");
4918  }
4919  else if(ch == '"')
4920  {
4921  m_SB.Add("\\\"");
4922  }
4923  else if(ch >= 32)
4924  {
4925  m_SB.Add(ch);
4926  }
4927  else switch(ch)
4928  {
4929  case '\b':
4930  m_SB.Add("\\b");
4931  break;
4932  case '\f':
4933  m_SB.Add("\\f");
4934  break;
4935  case '\n':
4936  m_SB.Add("\\n");
4937  break;
4938  case '\r':
4939  m_SB.Add("\\r");
4940  break;
4941  case '\t':
4942  m_SB.Add("\\t");
4943  break;
4944  default:
4945  VMA_ASSERT(0 && "Character not currently supported.");
4946  break;
4947  }
4948  }
4949 }
4950 
4951 void VmaJsonWriter::ContinueString(uint32_t n)
4952 {
4953  VMA_ASSERT(m_InsideString);
4954  m_SB.AddNumber(n);
4955 }
4956 
4957 void VmaJsonWriter::ContinueString(uint64_t n)
4958 {
4959  VMA_ASSERT(m_InsideString);
4960  m_SB.AddNumber(n);
4961 }
4962 
4963 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4964 {
4965  VMA_ASSERT(m_InsideString);
4966  m_SB.AddPointer(ptr);
4967 }
4968 
4969 void VmaJsonWriter::EndString(const char* pStr)
4970 {
4971  VMA_ASSERT(m_InsideString);
4972  if(pStr != VMA_NULL && pStr[0] != '\0')
4973  {
4974  ContinueString(pStr);
4975  }
4976  m_SB.Add('"');
4977  m_InsideString = false;
4978 }
4979 
4980 void VmaJsonWriter::WriteNumber(uint32_t n)
4981 {
4982  VMA_ASSERT(!m_InsideString);
4983  BeginValue(false);
4984  m_SB.AddNumber(n);
4985 }
4986 
4987 void VmaJsonWriter::WriteNumber(uint64_t n)
4988 {
4989  VMA_ASSERT(!m_InsideString);
4990  BeginValue(false);
4991  m_SB.AddNumber(n);
4992 }
4993 
4994 void VmaJsonWriter::WriteBool(bool b)
4995 {
4996  VMA_ASSERT(!m_InsideString);
4997  BeginValue(false);
4998  m_SB.Add(b ? "true" : "false");
4999 }
5000 
5001 void VmaJsonWriter::WriteNull()
5002 {
5003  VMA_ASSERT(!m_InsideString);
5004  BeginValue(false);
5005  m_SB.Add("null");
5006 }
5007 
5008 void VmaJsonWriter::BeginValue(bool isString)
5009 {
5010  if(!m_Stack.empty())
5011  {
5012  StackItem& currItem = m_Stack.back();
5013  if(currItem.type == COLLECTION_TYPE_OBJECT &&
5014  currItem.valueCount % 2 == 0)
5015  {
5016  VMA_ASSERT(isString);
5017  }
5018 
5019  if(currItem.type == COLLECTION_TYPE_OBJECT &&
5020  currItem.valueCount % 2 != 0)
5021  {
5022  m_SB.Add(": ");
5023  }
5024  else if(currItem.valueCount > 0)
5025  {
5026  m_SB.Add(", ");
5027  WriteIndent();
5028  }
5029  else
5030  {
5031  WriteIndent();
5032  }
5033  ++currItem.valueCount;
5034  }
5035 }
5036 
5037 void VmaJsonWriter::WriteIndent(bool oneLess)
5038 {
5039  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
5040  {
5041  m_SB.AddNewLine();
5042 
5043  size_t count = m_Stack.size();
5044  if(count > 0 && oneLess)
5045  {
5046  --count;
5047  }
5048  for(size_t i = 0; i < count; ++i)
5049  {
5050  m_SB.Add(INDENT);
5051  }
5052  }
5053 }
5054 
5055 #endif // #if VMA_STATS_STRING_ENABLED
5056 
5058 
5059 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
5060 {
5061  if(IsUserDataString())
5062  {
5063  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
5064 
5065  FreeUserDataString(hAllocator);
5066 
5067  if(pUserData != VMA_NULL)
5068  {
5069  const char* const newStrSrc = (char*)pUserData;
5070  const size_t newStrLen = strlen(newStrSrc);
5071  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
5072  memcpy(newStrDst, newStrSrc, newStrLen + 1);
5073  m_pUserData = newStrDst;
5074  }
5075  }
5076  else
5077  {
5078  m_pUserData = pUserData;
5079  }
5080 }
5081 
5082 void VmaAllocation_T::ChangeBlockAllocation(
5083  VmaAllocator hAllocator,
5084  VmaDeviceMemoryBlock* block,
5085  VkDeviceSize offset)
5086 {
5087  VMA_ASSERT(block != VMA_NULL);
5088  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5089 
5090  // Move mapping reference counter from old block to new block.
5091  if(block != m_BlockAllocation.m_Block)
5092  {
5093  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
5094  if(IsPersistentMap())
5095  ++mapRefCount;
5096  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
5097  block->Map(hAllocator, mapRefCount, VMA_NULL);
5098  }
5099 
5100  m_BlockAllocation.m_Block = block;
5101  m_BlockAllocation.m_Offset = offset;
5102 }
5103 
5104 VkDeviceSize VmaAllocation_T::GetOffset() const
5105 {
5106  switch(m_Type)
5107  {
5108  case ALLOCATION_TYPE_BLOCK:
5109  return m_BlockAllocation.m_Offset;
5110  case ALLOCATION_TYPE_DEDICATED:
5111  return 0;
5112  default:
5113  VMA_ASSERT(0);
5114  return 0;
5115  }
5116 }
5117 
5118 VkDeviceMemory VmaAllocation_T::GetMemory() const
5119 {
5120  switch(m_Type)
5121  {
5122  case ALLOCATION_TYPE_BLOCK:
5123  return m_BlockAllocation.m_Block->GetDeviceMemory();
5124  case ALLOCATION_TYPE_DEDICATED:
5125  return m_DedicatedAllocation.m_hMemory;
5126  default:
5127  VMA_ASSERT(0);
5128  return VK_NULL_HANDLE;
5129  }
5130 }
5131 
5132 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
5133 {
5134  switch(m_Type)
5135  {
5136  case ALLOCATION_TYPE_BLOCK:
5137  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
5138  case ALLOCATION_TYPE_DEDICATED:
5139  return m_DedicatedAllocation.m_MemoryTypeIndex;
5140  default:
5141  VMA_ASSERT(0);
5142  return UINT32_MAX;
5143  }
5144 }
5145 
5146 void* VmaAllocation_T::GetMappedData() const
5147 {
5148  switch(m_Type)
5149  {
5150  case ALLOCATION_TYPE_BLOCK:
5151  if(m_MapCount != 0)
5152  {
5153  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
5154  VMA_ASSERT(pBlockData != VMA_NULL);
5155  return (char*)pBlockData + m_BlockAllocation.m_Offset;
5156  }
5157  else
5158  {
5159  return VMA_NULL;
5160  }
5161  break;
5162  case ALLOCATION_TYPE_DEDICATED:
5163  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
5164  return m_DedicatedAllocation.m_pMappedData;
5165  default:
5166  VMA_ASSERT(0);
5167  return VMA_NULL;
5168  }
5169 }
5170 
5171 bool VmaAllocation_T::CanBecomeLost() const
5172 {
5173  switch(m_Type)
5174  {
5175  case ALLOCATION_TYPE_BLOCK:
5176  return m_BlockAllocation.m_CanBecomeLost;
5177  case ALLOCATION_TYPE_DEDICATED:
5178  return false;
5179  default:
5180  VMA_ASSERT(0);
5181  return false;
5182  }
5183 }
5184 
5185 VmaPool VmaAllocation_T::GetPool() const
5186 {
5187  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5188  return m_BlockAllocation.m_hPool;
5189 }
5190 
5191 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5192 {
5193  VMA_ASSERT(CanBecomeLost());
5194 
5195  /*
5196  Warning: This is a carefully designed algorithm.
5197  Do not modify unless you really know what you're doing :)
5198  */
5199  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
5200  for(;;)
5201  {
5202  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
5203  {
5204  VMA_ASSERT(0);
5205  return false;
5206  }
5207  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
5208  {
5209  return false;
5210  }
5211  else // Last use time earlier than current time.
5212  {
5213  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
5214  {
5215  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
5216  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
5217  return true;
5218  }
5219  }
5220  }
5221 }
5222 
5223 #if VMA_STATS_STRING_ENABLED
5224 
5225 // Correspond to values of enum VmaSuballocationType.
5226 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
5227  "FREE",
5228  "UNKNOWN",
5229  "BUFFER",
5230  "IMAGE_UNKNOWN",
5231  "IMAGE_LINEAR",
5232  "IMAGE_OPTIMAL",
5233 };
5234 
5235 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
5236 {
5237  json.WriteString("Type");
5238  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
5239 
5240  json.WriteString("Size");
5241  json.WriteNumber(m_Size);
5242 
5243  if(m_pUserData != VMA_NULL)
5244  {
5245  json.WriteString("UserData");
5246  if(IsUserDataString())
5247  {
5248  json.WriteString((const char*)m_pUserData);
5249  }
5250  else
5251  {
5252  json.BeginString();
5253  json.ContinueString_Pointer(m_pUserData);
5254  json.EndString();
5255  }
5256  }
5257 
5258  json.WriteString("CreationFrameIndex");
5259  json.WriteNumber(m_CreationFrameIndex);
5260 
5261  json.WriteString("LastUseFrameIndex");
5262  json.WriteNumber(GetLastUseFrameIndex());
5263 
5264  if(m_BufferImageUsage != 0)
5265  {
5266  json.WriteString("Usage");
5267  json.WriteNumber(m_BufferImageUsage);
5268  }
5269 }
5270 
5271 #endif
5272 
5273 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
5274 {
5275  VMA_ASSERT(IsUserDataString());
5276  if(m_pUserData != VMA_NULL)
5277  {
5278  char* const oldStr = (char*)m_pUserData;
5279  const size_t oldStrLen = strlen(oldStr);
5280  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
5281  m_pUserData = VMA_NULL;
5282  }
5283 }
5284 
5285 void VmaAllocation_T::BlockAllocMap()
5286 {
5287  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
5288 
5289  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
5290  {
5291  ++m_MapCount;
5292  }
5293  else
5294  {
5295  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
5296  }
5297 }
5298 
5299 void VmaAllocation_T::BlockAllocUnmap()
5300 {
5301  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
5302 
5303  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
5304  {
5305  --m_MapCount;
5306  }
5307  else
5308  {
5309  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
5310  }
5311 }
5312 
5313 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
5314 {
5315  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
5316 
5317  if(m_MapCount != 0)
5318  {
5319  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
5320  {
5321  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
5322  *ppData = m_DedicatedAllocation.m_pMappedData;
5323  ++m_MapCount;
5324  return VK_SUCCESS;
5325  }
5326  else
5327  {
5328  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
5329  return VK_ERROR_MEMORY_MAP_FAILED;
5330  }
5331  }
5332  else
5333  {
5334  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5335  hAllocator->m_hDevice,
5336  m_DedicatedAllocation.m_hMemory,
5337  0, // offset
5338  VK_WHOLE_SIZE,
5339  0, // flags
5340  ppData);
5341  if(result == VK_SUCCESS)
5342  {
5343  m_DedicatedAllocation.m_pMappedData = *ppData;
5344  m_MapCount = 1;
5345  }
5346  return result;
5347  }
5348 }
5349 
5350 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
5351 {
5352  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
5353 
5354  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
5355  {
5356  --m_MapCount;
5357  if(m_MapCount == 0)
5358  {
5359  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
5360  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
5361  hAllocator->m_hDevice,
5362  m_DedicatedAllocation.m_hMemory);
5363  }
5364  }
5365  else
5366  {
5367  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
5368  }
5369 }
5370 
5371 #if VMA_STATS_STRING_ENABLED
5372 
5373 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
5374 {
5375  json.BeginObject();
5376 
5377  json.WriteString("Blocks");
5378  json.WriteNumber(stat.blockCount);
5379 
5380  json.WriteString("Allocations");
5381  json.WriteNumber(stat.allocationCount);
5382 
5383  json.WriteString("UnusedRanges");
5384  json.WriteNumber(stat.unusedRangeCount);
5385 
5386  json.WriteString("UsedBytes");
5387  json.WriteNumber(stat.usedBytes);
5388 
5389  json.WriteString("UnusedBytes");
5390  json.WriteNumber(stat.unusedBytes);
5391 
5392  if(stat.allocationCount > 1)
5393  {
5394  json.WriteString("AllocationSize");
5395  json.BeginObject(true);
5396  json.WriteString("Min");
5397  json.WriteNumber(stat.allocationSizeMin);
5398  json.WriteString("Avg");
5399  json.WriteNumber(stat.allocationSizeAvg);
5400  json.WriteString("Max");
5401  json.WriteNumber(stat.allocationSizeMax);
5402  json.EndObject();
5403  }
5404 
5405  if(stat.unusedRangeCount > 1)
5406  {
5407  json.WriteString("UnusedRangeSize");
5408  json.BeginObject(true);
5409  json.WriteString("Min");
5410  json.WriteNumber(stat.unusedRangeSizeMin);
5411  json.WriteString("Avg");
5412  json.WriteNumber(stat.unusedRangeSizeAvg);
5413  json.WriteString("Max");
5414  json.WriteNumber(stat.unusedRangeSizeMax);
5415  json.EndObject();
5416  }
5417 
5418  json.EndObject();
5419 }
5420 
5421 #endif // #if VMA_STATS_STRING_ENABLED
5422 
5423 struct VmaSuballocationItemSizeLess
5424 {
5425  bool operator()(
5426  const VmaSuballocationList::iterator lhs,
5427  const VmaSuballocationList::iterator rhs) const
5428  {
5429  return lhs->size < rhs->size;
5430  }
5431  bool operator()(
5432  const VmaSuballocationList::iterator lhs,
5433  VkDeviceSize rhsSize) const
5434  {
5435  return lhs->size < rhsSize;
5436  }
5437 };
5438 
5440 // class VmaBlockMetadata
5441 
5442 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
5443  m_Size(0),
5444  m_FreeCount(0),
5445  m_SumFreeSize(0),
5446  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
5447  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
5448 {
5449 }
5450 
5451 VmaBlockMetadata::~VmaBlockMetadata()
5452 {
5453 }
5454 
5455 void VmaBlockMetadata::Init(VkDeviceSize size)
5456 {
5457  m_Size = size;
5458  m_FreeCount = 1;
5459  m_SumFreeSize = size;
5460 
5461  VmaSuballocation suballoc = {};
5462  suballoc.offset = 0;
5463  suballoc.size = size;
5464  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5465  suballoc.hAllocation = VK_NULL_HANDLE;
5466 
5467  m_Suballocations.push_back(suballoc);
5468  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
5469  --suballocItem;
5470  m_FreeSuballocationsBySize.push_back(suballocItem);
5471 }
5472 
5473 bool VmaBlockMetadata::Validate() const
5474 {
5475  if(m_Suballocations.empty())
5476  {
5477  return false;
5478  }
5479 
5480  // Expected offset of new suballocation as calculates from previous ones.
5481  VkDeviceSize calculatedOffset = 0;
5482  // Expected number of free suballocations as calculated from traversing their list.
5483  uint32_t calculatedFreeCount = 0;
5484  // Expected sum size of free suballocations as calculated from traversing their list.
5485  VkDeviceSize calculatedSumFreeSize = 0;
5486  // Expected number of free suballocations that should be registered in
5487  // m_FreeSuballocationsBySize calculated from traversing their list.
5488  size_t freeSuballocationsToRegister = 0;
5489  // True if previous visisted suballocation was free.
5490  bool prevFree = false;
5491 
5492  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5493  suballocItem != m_Suballocations.cend();
5494  ++suballocItem)
5495  {
5496  const VmaSuballocation& subAlloc = *suballocItem;
5497 
5498  // Actual offset of this suballocation doesn't match expected one.
5499  if(subAlloc.offset != calculatedOffset)
5500  {
5501  return false;
5502  }
5503 
5504  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
5505  // Two adjacent free suballocations are invalid. They should be merged.
5506  if(prevFree && currFree)
5507  {
5508  return false;
5509  }
5510 
5511  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
5512  {
5513  return false;
5514  }
5515 
5516  if(currFree)
5517  {
5518  calculatedSumFreeSize += subAlloc.size;
5519  ++calculatedFreeCount;
5520  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5521  {
5522  ++freeSuballocationsToRegister;
5523  }
5524  }
5525  else
5526  {
5527  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
5528  {
5529  return false;
5530  }
5531  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
5532  {
5533  return false;
5534  }
5535  }
5536 
5537  calculatedOffset += subAlloc.size;
5538  prevFree = currFree;
5539  }
5540 
5541  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
5542  // match expected one.
5543  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
5544  {
5545  return false;
5546  }
5547 
5548  VkDeviceSize lastSize = 0;
5549  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
5550  {
5551  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
5552 
5553  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
5554  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5555  {
5556  return false;
5557  }
5558  // They must be sorted by size ascending.
5559  if(suballocItem->size < lastSize)
5560  {
5561  return false;
5562  }
5563 
5564  lastSize = suballocItem->size;
5565  }
5566 
5567  // Check if totals match calculacted values.
5568  if(!ValidateFreeSuballocationList() ||
5569  (calculatedOffset != m_Size) ||
5570  (calculatedSumFreeSize != m_SumFreeSize) ||
5571  (calculatedFreeCount != m_FreeCount))
5572  {
5573  return false;
5574  }
5575 
5576  return true;
5577 }
5578 
5579 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
5580 {
5581  if(!m_FreeSuballocationsBySize.empty())
5582  {
5583  return m_FreeSuballocationsBySize.back()->size;
5584  }
5585  else
5586  {
5587  return 0;
5588  }
5589 }
5590 
5591 bool VmaBlockMetadata::IsEmpty() const
5592 {
5593  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
5594 }
5595 
5596 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
5597 {
5598  outInfo.blockCount = 1;
5599 
5600  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5601  outInfo.allocationCount = rangeCount - m_FreeCount;
5602  outInfo.unusedRangeCount = m_FreeCount;
5603 
5604  outInfo.unusedBytes = m_SumFreeSize;
5605  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
5606 
5607  outInfo.allocationSizeMin = UINT64_MAX;
5608  outInfo.allocationSizeMax = 0;
5609  outInfo.unusedRangeSizeMin = UINT64_MAX;
5610  outInfo.unusedRangeSizeMax = 0;
5611 
5612  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5613  suballocItem != m_Suballocations.cend();
5614  ++suballocItem)
5615  {
5616  const VmaSuballocation& suballoc = *suballocItem;
5617  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5618  {
5619  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
5620  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
5621  }
5622  else
5623  {
5624  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
5625  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
5626  }
5627  }
5628 }
5629 
5630 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
5631 {
5632  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5633 
5634  inoutStats.size += m_Size;
5635  inoutStats.unusedSize += m_SumFreeSize;
5636  inoutStats.allocationCount += rangeCount - m_FreeCount;
5637  inoutStats.unusedRangeCount += m_FreeCount;
5638  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
5639 }
5640 
5641 #if VMA_STATS_STRING_ENABLED
5642 
5643 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
5644 {
5645  json.BeginObject();
5646 
5647  json.WriteString("TotalBytes");
5648  json.WriteNumber(m_Size);
5649 
5650  json.WriteString("UnusedBytes");
5651  json.WriteNumber(m_SumFreeSize);
5652 
5653  json.WriteString("Allocations");
5654  json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);
5655 
5656  json.WriteString("UnusedRanges");
5657  json.WriteNumber(m_FreeCount);
5658 
5659  json.WriteString("Suballocations");
5660  json.BeginArray();
5661  size_t i = 0;
5662  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5663  suballocItem != m_Suballocations.cend();
5664  ++suballocItem, ++i)
5665  {
5666  json.BeginObject(true);
5667 
5668  json.WriteString("Offset");
5669  json.WriteNumber(suballocItem->offset);
5670 
5671  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5672  {
5673  json.WriteString("Type");
5674  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
5675 
5676  json.WriteString("Size");
5677  json.WriteNumber(suballocItem->size);
5678  }
5679  else
5680  {
5681  suballocItem->hAllocation->PrintParameters(json);
5682  }
5683 
5684  json.EndObject();
5685  }
5686  json.EndArray();
5687 
5688  json.EndObject();
5689 }
5690 
5691 #endif // #if VMA_STATS_STRING_ENABLED
5692 
5693 /*
5694 How many suitable free suballocations to analyze before choosing best one.
5695 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
5696  be chosen.
5697 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
5698  suballocations will be analized and best one will be chosen.
5699 - Any other value is also acceptable.
5700 */
5701 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
5702 
5703 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
5704 {
5705  VMA_ASSERT(IsEmpty());
5706  pAllocationRequest->offset = 0;
5707  pAllocationRequest->sumFreeSize = m_SumFreeSize;
5708  pAllocationRequest->sumItemSize = 0;
5709  pAllocationRequest->item = m_Suballocations.begin();
5710  pAllocationRequest->itemsToMakeLostCount = 0;
5711 }
5712 
5713 bool VmaBlockMetadata::CreateAllocationRequest(
5714  uint32_t currentFrameIndex,
5715  uint32_t frameInUseCount,
5716  VkDeviceSize bufferImageGranularity,
5717  VkDeviceSize allocSize,
5718  VkDeviceSize allocAlignment,
5719  VmaSuballocationType allocType,
5720  bool canMakeOtherLost,
5721  VmaAllocationRequest* pAllocationRequest)
5722 {
5723  VMA_ASSERT(allocSize > 0);
5724  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5725  VMA_ASSERT(pAllocationRequest != VMA_NULL);
5726  VMA_HEAVY_ASSERT(Validate());
5727 
5728  // There is not enough total free space in this block to fullfill the request: Early return.
5729  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
5730  {
5731  return false;
5732  }
5733 
5734  // New algorithm, efficiently searching freeSuballocationsBySize.
5735  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
5736  if(freeSuballocCount > 0)
5737  {
5738  if(VMA_BEST_FIT)
5739  {
5740  // Find first free suballocation with size not less than allocSize.
5741  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5742  m_FreeSuballocationsBySize.data(),
5743  m_FreeSuballocationsBySize.data() + freeSuballocCount,
5744  allocSize,
5745  VmaSuballocationItemSizeLess());
5746  size_t index = it - m_FreeSuballocationsBySize.data();
5747  for(; index < freeSuballocCount; ++index)
5748  {
5749  if(CheckAllocation(
5750  currentFrameIndex,
5751  frameInUseCount,
5752  bufferImageGranularity,
5753  allocSize,
5754  allocAlignment,
5755  allocType,
5756  m_FreeSuballocationsBySize[index],
5757  false, // canMakeOtherLost
5758  &pAllocationRequest->offset,
5759  &pAllocationRequest->itemsToMakeLostCount,
5760  &pAllocationRequest->sumFreeSize,
5761  &pAllocationRequest->sumItemSize))
5762  {
5763  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5764  return true;
5765  }
5766  }
5767  }
5768  else
5769  {
5770  // Search staring from biggest suballocations.
5771  for(size_t index = freeSuballocCount; index--; )
5772  {
5773  if(CheckAllocation(
5774  currentFrameIndex,
5775  frameInUseCount,
5776  bufferImageGranularity,
5777  allocSize,
5778  allocAlignment,
5779  allocType,
5780  m_FreeSuballocationsBySize[index],
5781  false, // canMakeOtherLost
5782  &pAllocationRequest->offset,
5783  &pAllocationRequest->itemsToMakeLostCount,
5784  &pAllocationRequest->sumFreeSize,
5785  &pAllocationRequest->sumItemSize))
5786  {
5787  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5788  return true;
5789  }
5790  }
5791  }
5792  }
5793 
5794  if(canMakeOtherLost)
5795  {
5796  // Brute-force algorithm. TODO: Come up with something better.
5797 
5798  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
5799  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
5800 
5801  VmaAllocationRequest tmpAllocRequest = {};
5802  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
5803  suballocIt != m_Suballocations.end();
5804  ++suballocIt)
5805  {
5806  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
5807  suballocIt->hAllocation->CanBecomeLost())
5808  {
5809  if(CheckAllocation(
5810  currentFrameIndex,
5811  frameInUseCount,
5812  bufferImageGranularity,
5813  allocSize,
5814  allocAlignment,
5815  allocType,
5816  suballocIt,
5817  canMakeOtherLost,
5818  &tmpAllocRequest.offset,
5819  &tmpAllocRequest.itemsToMakeLostCount,
5820  &tmpAllocRequest.sumFreeSize,
5821  &tmpAllocRequest.sumItemSize))
5822  {
5823  tmpAllocRequest.item = suballocIt;
5824 
5825  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
5826  {
5827  *pAllocationRequest = tmpAllocRequest;
5828  }
5829  }
5830  }
5831  }
5832 
5833  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
5834  {
5835  return true;
5836  }
5837  }
5838 
5839  return false;
5840 }
5841 
5842 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
5843  uint32_t currentFrameIndex,
5844  uint32_t frameInUseCount,
5845  VmaAllocationRequest* pAllocationRequest)
5846 {
5847  while(pAllocationRequest->itemsToMakeLostCount > 0)
5848  {
5849  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
5850  {
5851  ++pAllocationRequest->item;
5852  }
5853  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5854  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
5855  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
5856  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5857  {
5858  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
5859  --pAllocationRequest->itemsToMakeLostCount;
5860  }
5861  else
5862  {
5863  return false;
5864  }
5865  }
5866 
5867  VMA_HEAVY_ASSERT(Validate());
5868  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5869  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
5870 
5871  return true;
5872 }
5873 
5874 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5875 {
5876  uint32_t lostAllocationCount = 0;
5877  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
5878  it != m_Suballocations.end();
5879  ++it)
5880  {
5881  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
5882  it->hAllocation->CanBecomeLost() &&
5883  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5884  {
5885  it = FreeSuballocation(it);
5886  ++lostAllocationCount;
5887  }
5888  }
5889  return lostAllocationCount;
5890 }
5891 
5892 void VmaBlockMetadata::Alloc(
5893  const VmaAllocationRequest& request,
5894  VmaSuballocationType type,
5895  VkDeviceSize allocSize,
5896  VmaAllocation hAllocation)
5897 {
5898  VMA_ASSERT(request.item != m_Suballocations.end());
5899  VmaSuballocation& suballoc = *request.item;
5900  // Given suballocation is a free block.
5901  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5902  // Given offset is inside this suballocation.
5903  VMA_ASSERT(request.offset >= suballoc.offset);
5904  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
5905  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
5906  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
5907 
5908  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
5909  // it to become used.
5910  UnregisterFreeSuballocation(request.item);
5911 
5912  suballoc.offset = request.offset;
5913  suballoc.size = allocSize;
5914  suballoc.type = type;
5915  suballoc.hAllocation = hAllocation;
5916 
5917  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
5918  if(paddingEnd)
5919  {
5920  VmaSuballocation paddingSuballoc = {};
5921  paddingSuballoc.offset = request.offset + allocSize;
5922  paddingSuballoc.size = paddingEnd;
5923  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5924  VmaSuballocationList::iterator next = request.item;
5925  ++next;
5926  const VmaSuballocationList::iterator paddingEndItem =
5927  m_Suballocations.insert(next, paddingSuballoc);
5928  RegisterFreeSuballocation(paddingEndItem);
5929  }
5930 
5931  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5932  if(paddingBegin)
5933  {
5934  VmaSuballocation paddingSuballoc = {};
5935  paddingSuballoc.offset = request.offset - paddingBegin;
5936  paddingSuballoc.size = paddingBegin;
5937  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5938  const VmaSuballocationList::iterator paddingBeginItem =
5939  m_Suballocations.insert(request.item, paddingSuballoc);
5940  RegisterFreeSuballocation(paddingBeginItem);
5941  }
5942 
5943  // Update totals.
5944  m_FreeCount = m_FreeCount - 1;
5945  if(paddingBegin > 0)
5946  {
5947  ++m_FreeCount;
5948  }
5949  if(paddingEnd > 0)
5950  {
5951  ++m_FreeCount;
5952  }
5953  m_SumFreeSize -= allocSize;
5954 }
5955 
5956 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5957 {
5958  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5959  suballocItem != m_Suballocations.end();
5960  ++suballocItem)
5961  {
5962  VmaSuballocation& suballoc = *suballocItem;
5963  if(suballoc.hAllocation == allocation)
5964  {
5965  FreeSuballocation(suballocItem);
5966  VMA_HEAVY_ASSERT(Validate());
5967  return;
5968  }
5969  }
5970  VMA_ASSERT(0 && "Not found!");
5971 }
5972 
5973 void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)
5974 {
5975  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5976  suballocItem != m_Suballocations.end();
5977  ++suballocItem)
5978  {
5979  VmaSuballocation& suballoc = *suballocItem;
5980  if(suballoc.offset == offset)
5981  {
5982  FreeSuballocation(suballocItem);
5983  return;
5984  }
5985  }
5986  VMA_ASSERT(0 && "Not found!");
5987 }
5988 
5989 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5990 {
5991  VkDeviceSize lastSize = 0;
5992  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5993  {
5994  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5995 
5996  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5997  {
5998  VMA_ASSERT(0);
5999  return false;
6000  }
6001  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6002  {
6003  VMA_ASSERT(0);
6004  return false;
6005  }
6006  if(it->size < lastSize)
6007  {
6008  VMA_ASSERT(0);
6009  return false;
6010  }
6011 
6012  lastSize = it->size;
6013  }
6014  return true;
6015 }
6016 
6017 bool VmaBlockMetadata::CheckAllocation(
6018  uint32_t currentFrameIndex,
6019  uint32_t frameInUseCount,
6020  VkDeviceSize bufferImageGranularity,
6021  VkDeviceSize allocSize,
6022  VkDeviceSize allocAlignment,
6023  VmaSuballocationType allocType,
6024  VmaSuballocationList::const_iterator suballocItem,
6025  bool canMakeOtherLost,
6026  VkDeviceSize* pOffset,
6027  size_t* itemsToMakeLostCount,
6028  VkDeviceSize* pSumFreeSize,
6029  VkDeviceSize* pSumItemSize) const
6030 {
6031  VMA_ASSERT(allocSize > 0);
6032  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6033  VMA_ASSERT(suballocItem != m_Suballocations.cend());
6034  VMA_ASSERT(pOffset != VMA_NULL);
6035 
6036  *itemsToMakeLostCount = 0;
6037  *pSumFreeSize = 0;
6038  *pSumItemSize = 0;
6039 
6040  if(canMakeOtherLost)
6041  {
6042  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6043  {
6044  *pSumFreeSize = suballocItem->size;
6045  }
6046  else
6047  {
6048  if(suballocItem->hAllocation->CanBecomeLost() &&
6049  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
6050  {
6051  ++*itemsToMakeLostCount;
6052  *pSumItemSize = suballocItem->size;
6053  }
6054  else
6055  {
6056  return false;
6057  }
6058  }
6059 
6060  // Remaining size is too small for this request: Early return.
6061  if(m_Size - suballocItem->offset < allocSize)
6062  {
6063  return false;
6064  }
6065 
6066  // Start from offset equal to beginning of this suballocation.
6067  *pOffset = suballocItem->offset;
6068 
6069  // Apply VMA_DEBUG_MARGIN at the beginning.
6070  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
6071  {
6072  *pOffset += VMA_DEBUG_MARGIN;
6073  }
6074 
6075  // Apply alignment.
6076  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
6077 
6078  // Check previous suballocations for BufferImageGranularity conflicts.
6079  // Make bigger alignment if necessary.
6080  if(bufferImageGranularity > 1)
6081  {
6082  bool bufferImageGranularityConflict = false;
6083  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
6084  while(prevSuballocItem != m_Suballocations.cbegin())
6085  {
6086  --prevSuballocItem;
6087  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
6088  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
6089  {
6090  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
6091  {
6092  bufferImageGranularityConflict = true;
6093  break;
6094  }
6095  }
6096  else
6097  // Already on previous page.
6098  break;
6099  }
6100  if(bufferImageGranularityConflict)
6101  {
6102  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
6103  }
6104  }
6105 
6106  // Now that we have final *pOffset, check if we are past suballocItem.
6107  // If yes, return false - this function should be called for another suballocItem as starting point.
6108  if(*pOffset >= suballocItem->offset + suballocItem->size)
6109  {
6110  return false;
6111  }
6112 
6113  // Calculate padding at the beginning based on current offset.
6114  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
6115 
6116  // Calculate required margin at the end if this is not last suballocation.
6117  VmaSuballocationList::const_iterator next = suballocItem;
6118  ++next;
6119  const VkDeviceSize requiredEndMargin =
6120  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
6121 
6122  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
6123  // Another early return check.
6124  if(suballocItem->offset + totalSize > m_Size)
6125  {
6126  return false;
6127  }
6128 
6129  // Advance lastSuballocItem until desired size is reached.
6130  // Update itemsToMakeLostCount.
6131  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
6132  if(totalSize > suballocItem->size)
6133  {
6134  VkDeviceSize remainingSize = totalSize - suballocItem->size;
6135  while(remainingSize > 0)
6136  {
6137  ++lastSuballocItem;
6138  if(lastSuballocItem == m_Suballocations.cend())
6139  {
6140  return false;
6141  }
6142  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6143  {
6144  *pSumFreeSize += lastSuballocItem->size;
6145  }
6146  else
6147  {
6148  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
6149  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
6150  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
6151  {
6152  ++*itemsToMakeLostCount;
6153  *pSumItemSize += lastSuballocItem->size;
6154  }
6155  else
6156  {
6157  return false;
6158  }
6159  }
6160  remainingSize = (lastSuballocItem->size < remainingSize) ?
6161  remainingSize - lastSuballocItem->size : 0;
6162  }
6163  }
6164 
6165  // Check next suballocations for BufferImageGranularity conflicts.
6166  // If conflict exists, we must mark more allocations lost or fail.
6167  if(bufferImageGranularity > 1)
6168  {
6169  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
6170  ++nextSuballocItem;
6171  while(nextSuballocItem != m_Suballocations.cend())
6172  {
6173  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
6174  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
6175  {
6176  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
6177  {
6178  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
6179  if(nextSuballoc.hAllocation->CanBecomeLost() &&
6180  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
6181  {
6182  ++*itemsToMakeLostCount;
6183  }
6184  else
6185  {
6186  return false;
6187  }
6188  }
6189  }
6190  else
6191  {
6192  // Already on next page.
6193  break;
6194  }
6195  ++nextSuballocItem;
6196  }
6197  }
6198  }
6199  else
6200  {
6201  const VmaSuballocation& suballoc = *suballocItem;
6202  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
6203 
6204  *pSumFreeSize = suballoc.size;
6205 
6206  // Size of this suballocation is too small for this request: Early return.
6207  if(suballoc.size < allocSize)
6208  {
6209  return false;
6210  }
6211 
6212  // Start from offset equal to beginning of this suballocation.
6213  *pOffset = suballoc.offset;
6214 
6215  // Apply VMA_DEBUG_MARGIN at the beginning.
6216  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
6217  {
6218  *pOffset += VMA_DEBUG_MARGIN;
6219  }
6220 
6221  // Apply alignment.
6222  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
6223 
6224  // Check previous suballocations for BufferImageGranularity conflicts.
6225  // Make bigger alignment if necessary.
6226  if(bufferImageGranularity > 1)
6227  {
6228  bool bufferImageGranularityConflict = false;
6229  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
6230  while(prevSuballocItem != m_Suballocations.cbegin())
6231  {
6232  --prevSuballocItem;
6233  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
6234  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
6235  {
6236  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
6237  {
6238  bufferImageGranularityConflict = true;
6239  break;
6240  }
6241  }
6242  else
6243  // Already on previous page.
6244  break;
6245  }
6246  if(bufferImageGranularityConflict)
6247  {
6248  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
6249  }
6250  }
6251 
6252  // Calculate padding at the beginning based on current offset.
6253  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
6254 
6255  // Calculate required margin at the end if this is not last suballocation.
6256  VmaSuballocationList::const_iterator next = suballocItem;
6257  ++next;
6258  const VkDeviceSize requiredEndMargin =
6259  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
6260 
6261  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
6262  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
6263  {
6264  return false;
6265  }
6266 
6267  // Check next suballocations for BufferImageGranularity conflicts.
6268  // If conflict exists, allocation cannot be made here.
6269  if(bufferImageGranularity > 1)
6270  {
6271  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
6272  ++nextSuballocItem;
6273  while(nextSuballocItem != m_Suballocations.cend())
6274  {
6275  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
6276  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
6277  {
6278  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
6279  {
6280  return false;
6281  }
6282  }
6283  else
6284  {
6285  // Already on next page.
6286  break;
6287  }
6288  ++nextSuballocItem;
6289  }
6290  }
6291  }
6292 
6293  // All tests passed: Success. pOffset is already filled.
6294  return true;
6295 }
6296 
6297 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
6298 {
6299  VMA_ASSERT(item != m_Suballocations.end());
6300  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6301 
6302  VmaSuballocationList::iterator nextItem = item;
6303  ++nextItem;
6304  VMA_ASSERT(nextItem != m_Suballocations.end());
6305  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
6306 
6307  item->size += nextItem->size;
6308  --m_FreeCount;
6309  m_Suballocations.erase(nextItem);
6310 }
6311 
6312 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
6313 {
6314  // Change this suballocation to be marked as free.
6315  VmaSuballocation& suballoc = *suballocItem;
6316  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6317  suballoc.hAllocation = VK_NULL_HANDLE;
6318 
6319  // Update totals.
6320  ++m_FreeCount;
6321  m_SumFreeSize += suballoc.size;
6322 
6323  // Merge with previous and/or next suballocation if it's also free.
6324  bool mergeWithNext = false;
6325  bool mergeWithPrev = false;
6326 
6327  VmaSuballocationList::iterator nextItem = suballocItem;
6328  ++nextItem;
6329  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
6330  {
6331  mergeWithNext = true;
6332  }
6333 
6334  VmaSuballocationList::iterator prevItem = suballocItem;
6335  if(suballocItem != m_Suballocations.begin())
6336  {
6337  --prevItem;
6338  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6339  {
6340  mergeWithPrev = true;
6341  }
6342  }
6343 
6344  if(mergeWithNext)
6345  {
6346  UnregisterFreeSuballocation(nextItem);
6347  MergeFreeWithNext(suballocItem);
6348  }
6349 
6350  if(mergeWithPrev)
6351  {
6352  UnregisterFreeSuballocation(prevItem);
6353  MergeFreeWithNext(prevItem);
6354  RegisterFreeSuballocation(prevItem);
6355  return prevItem;
6356  }
6357  else
6358  {
6359  RegisterFreeSuballocation(suballocItem);
6360  return suballocItem;
6361  }
6362 }
6363 
6364 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
6365 {
6366  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6367  VMA_ASSERT(item->size > 0);
6368 
6369  // You may want to enable this validation at the beginning or at the end of
6370  // this function, depending on what do you want to check.
6371  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6372 
6373  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6374  {
6375  if(m_FreeSuballocationsBySize.empty())
6376  {
6377  m_FreeSuballocationsBySize.push_back(item);
6378  }
6379  else
6380  {
6381  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
6382  }
6383  }
6384 
6385  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6386 }
6387 
6388 
6389 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
6390 {
6391  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6392  VMA_ASSERT(item->size > 0);
6393 
6394  // You may want to enable this validation at the beginning or at the end of
6395  // this function, depending on what do you want to check.
6396  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6397 
6398  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6399  {
6400  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6401  m_FreeSuballocationsBySize.data(),
6402  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
6403  item,
6404  VmaSuballocationItemSizeLess());
6405  for(size_t index = it - m_FreeSuballocationsBySize.data();
6406  index < m_FreeSuballocationsBySize.size();
6407  ++index)
6408  {
6409  if(m_FreeSuballocationsBySize[index] == item)
6410  {
6411  VmaVectorRemove(m_FreeSuballocationsBySize, index);
6412  return;
6413  }
6414  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
6415  }
6416  VMA_ASSERT(0 && "Not found.");
6417  }
6418 
6419  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6420 }
6421 
6423 // class VmaDeviceMemoryBlock
6424 
6425 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
6426  m_Metadata(hAllocator),
6427  m_MemoryTypeIndex(UINT32_MAX),
6428  m_Id(0),
6429  m_hMemory(VK_NULL_HANDLE),
6430  m_MapCount(0),
6431  m_pMappedData(VMA_NULL)
6432 {
6433 }
6434 
6435 void VmaDeviceMemoryBlock::Init(
6436  uint32_t newMemoryTypeIndex,
6437  VkDeviceMemory newMemory,
6438  VkDeviceSize newSize,
6439  uint32_t id)
6440 {
6441  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6442 
6443  m_MemoryTypeIndex = newMemoryTypeIndex;
6444  m_Id = id;
6445  m_hMemory = newMemory;
6446 
6447  m_Metadata.Init(newSize);
6448 }
6449 
6450 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
6451 {
6452  // This is the most important assert in the entire library.
6453  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
6454  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
6455 
6456  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
6457  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
6458  m_hMemory = VK_NULL_HANDLE;
6459 }
6460 
6461 bool VmaDeviceMemoryBlock::Validate() const
6462 {
6463  if((m_hMemory == VK_NULL_HANDLE) ||
6464  (m_Metadata.GetSize() == 0))
6465  {
6466  return false;
6467  }
6468 
6469  return m_Metadata.Validate();
6470 }
6471 
6472 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
6473 {
6474  if(count == 0)
6475  {
6476  return VK_SUCCESS;
6477  }
6478 
6479  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6480  if(m_MapCount != 0)
6481  {
6482  m_MapCount += count;
6483  VMA_ASSERT(m_pMappedData != VMA_NULL);
6484  if(ppData != VMA_NULL)
6485  {
6486  *ppData = m_pMappedData;
6487  }
6488  return VK_SUCCESS;
6489  }
6490  else
6491  {
6492  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6493  hAllocator->m_hDevice,
6494  m_hMemory,
6495  0, // offset
6496  VK_WHOLE_SIZE,
6497  0, // flags
6498  &m_pMappedData);
6499  if(result == VK_SUCCESS)
6500  {
6501  if(ppData != VMA_NULL)
6502  {
6503  *ppData = m_pMappedData;
6504  }
6505  m_MapCount = count;
6506  }
6507  return result;
6508  }
6509 }
6510 
6511 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
6512 {
6513  if(count == 0)
6514  {
6515  return;
6516  }
6517 
6518  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6519  if(m_MapCount >= count)
6520  {
6521  m_MapCount -= count;
6522  if(m_MapCount == 0)
6523  {
6524  m_pMappedData = VMA_NULL;
6525  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
6526  }
6527  }
6528  else
6529  {
6530  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
6531  }
6532 }
6533 
6534 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
6535  const VmaAllocator hAllocator,
6536  const VmaAllocation hAllocation,
6537  VkBuffer hBuffer)
6538 {
6539  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
6540  hAllocation->GetBlock() == this);
6541  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
6542  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6543  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
6544  hAllocator->m_hDevice,
6545  hBuffer,
6546  m_hMemory,
6547  hAllocation->GetOffset());
6548 }
6549 
6550 VkResult VmaDeviceMemoryBlock::BindImageMemory(
6551  const VmaAllocator hAllocator,
6552  const VmaAllocation hAllocation,
6553  VkImage hImage)
6554 {
6555  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
6556  hAllocation->GetBlock() == this);
6557  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
6558  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6559  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
6560  hAllocator->m_hDevice,
6561  hImage,
6562  m_hMemory,
6563  hAllocation->GetOffset());
6564 }
6565 
6566 static void InitStatInfo(VmaStatInfo& outInfo)
6567 {
6568  memset(&outInfo, 0, sizeof(outInfo));
6569  outInfo.allocationSizeMin = UINT64_MAX;
6570  outInfo.unusedRangeSizeMin = UINT64_MAX;
6571 }
6572 
6573 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
6574 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
6575 {
6576  inoutInfo.blockCount += srcInfo.blockCount;
6577  inoutInfo.allocationCount += srcInfo.allocationCount;
6578  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
6579  inoutInfo.usedBytes += srcInfo.usedBytes;
6580  inoutInfo.unusedBytes += srcInfo.unusedBytes;
6581  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
6582  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
6583  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
6584  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
6585 }
6586 
6587 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
6588 {
6589  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
6590  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
6591  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
6592  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
6593 }
6594 
6595 VmaPool_T::VmaPool_T(
6596  VmaAllocator hAllocator,
6597  const VmaPoolCreateInfo& createInfo) :
6598  m_BlockVector(
6599  hAllocator,
6600  createInfo.memoryTypeIndex,
6601  createInfo.blockSize,
6602  createInfo.minBlockCount,
6603  createInfo.maxBlockCount,
6604  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
6605  createInfo.frameInUseCount,
6606  true), // isCustomPool
6607  m_Id(0)
6608 {
6609 }
6610 
6611 VmaPool_T::~VmaPool_T()
6612 {
6613 }
6614 
6615 #if VMA_STATS_STRING_ENABLED
6616 
6617 #endif // #if VMA_STATS_STRING_ENABLED
6618 
6619 VmaBlockVector::VmaBlockVector(
6620  VmaAllocator hAllocator,
6621  uint32_t memoryTypeIndex,
6622  VkDeviceSize preferredBlockSize,
6623  size_t minBlockCount,
6624  size_t maxBlockCount,
6625  VkDeviceSize bufferImageGranularity,
6626  uint32_t frameInUseCount,
6627  bool isCustomPool) :
6628  m_hAllocator(hAllocator),
6629  m_MemoryTypeIndex(memoryTypeIndex),
6630  m_PreferredBlockSize(preferredBlockSize),
6631  m_MinBlockCount(minBlockCount),
6632  m_MaxBlockCount(maxBlockCount),
6633  m_BufferImageGranularity(bufferImageGranularity),
6634  m_FrameInUseCount(frameInUseCount),
6635  m_IsCustomPool(isCustomPool),
6636  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
6637  m_HasEmptyBlock(false),
6638  m_pDefragmentator(VMA_NULL),
6639  m_NextBlockId(0)
6640 {
6641 }
6642 
6643 VmaBlockVector::~VmaBlockVector()
6644 {
6645  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
6646 
6647  for(size_t i = m_Blocks.size(); i--; )
6648  {
6649  m_Blocks[i]->Destroy(m_hAllocator);
6650  vma_delete(m_hAllocator, m_Blocks[i]);
6651  }
6652 }
6653 
6654 VkResult VmaBlockVector::CreateMinBlocks()
6655 {
6656  for(size_t i = 0; i < m_MinBlockCount; ++i)
6657  {
6658  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
6659  if(res != VK_SUCCESS)
6660  {
6661  return res;
6662  }
6663  }
6664  return VK_SUCCESS;
6665 }
6666 
6667 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
6668 {
6669  pStats->size = 0;
6670  pStats->unusedSize = 0;
6671  pStats->allocationCount = 0;
6672  pStats->unusedRangeCount = 0;
6673  pStats->unusedRangeSizeMax = 0;
6674 
6675  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6676 
6677  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6678  {
6679  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6680  VMA_ASSERT(pBlock);
6681  VMA_HEAVY_ASSERT(pBlock->Validate());
6682  pBlock->m_Metadata.AddPoolStats(*pStats);
6683  }
6684 }
6685 
6686 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
6687 
6688 VkResult VmaBlockVector::Allocate(
6689  VmaPool hCurrentPool,
6690  uint32_t currentFrameIndex,
6691  VkDeviceSize size,
6692  VkDeviceSize alignment,
6693  const VmaAllocationCreateInfo& createInfo,
6694  VmaSuballocationType suballocType,
6695  VmaAllocation* pAllocation)
6696 {
6697  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
6698  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
6699 
6700  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6701 
6702  // 1. Search existing allocations. Try to allocate without making other allocations lost.
6703  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6704  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6705  {
6706  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6707  VMA_ASSERT(pCurrBlock);
6708  VmaAllocationRequest currRequest = {};
6709  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6710  currentFrameIndex,
6711  m_FrameInUseCount,
6712  m_BufferImageGranularity,
6713  size,
6714  alignment,
6715  suballocType,
6716  false, // canMakeOtherLost
6717  &currRequest))
6718  {
6719  // Allocate from pCurrBlock.
6720  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
6721 
6722  if(mapped)
6723  {
6724  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
6725  if(res != VK_SUCCESS)
6726  {
6727  return res;
6728  }
6729  }
6730 
6731  // We no longer have an empty Allocation.
6732  if(pCurrBlock->m_Metadata.IsEmpty())
6733  {
6734  m_HasEmptyBlock = false;
6735  }
6736 
6737  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6738  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, size, *pAllocation);
6739  (*pAllocation)->InitBlockAllocation(
6740  hCurrentPool,
6741  pCurrBlock,
6742  currRequest.offset,
6743  alignment,
6744  size,
6745  suballocType,
6746  mapped,
6747  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6748  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
6749  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6750  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6751  return VK_SUCCESS;
6752  }
6753  }
6754 
6755  const bool canCreateNewBlock =
6756  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
6757  (m_Blocks.size() < m_MaxBlockCount);
6758 
6759  // 2. Try to create new block.
6760  if(canCreateNewBlock)
6761  {
6762  // Calculate optimal size for new block.
6763  VkDeviceSize newBlockSize = m_PreferredBlockSize;
6764  uint32_t newBlockSizeShift = 0;
6765  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
6766 
6767  // Allocating blocks of other sizes is allowed only in default pools.
6768  // In custom pools block size is fixed.
6769  if(m_IsCustomPool == false)
6770  {
6771  // Allocate 1/8, 1/4, 1/2 as first blocks.
6772  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
6773  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
6774  {
6775  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6776  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
6777  {
6778  newBlockSize = smallerNewBlockSize;
6779  ++newBlockSizeShift;
6780  }
6781  else
6782  {
6783  break;
6784  }
6785  }
6786  }
6787 
6788  size_t newBlockIndex = 0;
6789  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
6790  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
6791  if(m_IsCustomPool == false)
6792  {
6793  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
6794  {
6795  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6796  if(smallerNewBlockSize >= size)
6797  {
6798  newBlockSize = smallerNewBlockSize;
6799  ++newBlockSizeShift;
6800  res = CreateBlock(newBlockSize, &newBlockIndex);
6801  }
6802  else
6803  {
6804  break;
6805  }
6806  }
6807  }
6808 
6809  if(res == VK_SUCCESS)
6810  {
6811  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
6812  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= size);
6813 
6814  if(mapped)
6815  {
6816  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
6817  if(res != VK_SUCCESS)
6818  {
6819  return res;
6820  }
6821  }
6822 
6823  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
6824  VmaAllocationRequest allocRequest;
6825  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
6826  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6827  pBlock->m_Metadata.Alloc(allocRequest, suballocType, size, *pAllocation);
6828  (*pAllocation)->InitBlockAllocation(
6829  hCurrentPool,
6830  pBlock,
6831  allocRequest.offset,
6832  alignment,
6833  size,
6834  suballocType,
6835  mapped,
6836  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6837  VMA_HEAVY_ASSERT(pBlock->Validate());
6838  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
6839  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6840  return VK_SUCCESS;
6841  }
6842  }
6843 
6844  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
6845 
6846  // 3. Try to allocate from existing blocks with making other allocations lost.
6847  if(canMakeOtherLost)
6848  {
6849  uint32_t tryIndex = 0;
6850  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
6851  {
6852  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
6853  VmaAllocationRequest bestRequest = {};
6854  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
6855 
6856  // 1. Search existing allocations.
6857  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6858  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6859  {
6860  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6861  VMA_ASSERT(pCurrBlock);
6862  VmaAllocationRequest currRequest = {};
6863  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6864  currentFrameIndex,
6865  m_FrameInUseCount,
6866  m_BufferImageGranularity,
6867  size,
6868  alignment,
6869  suballocType,
6870  canMakeOtherLost,
6871  &currRequest))
6872  {
6873  const VkDeviceSize currRequestCost = currRequest.CalcCost();
6874  if(pBestRequestBlock == VMA_NULL ||
6875  currRequestCost < bestRequestCost)
6876  {
6877  pBestRequestBlock = pCurrBlock;
6878  bestRequest = currRequest;
6879  bestRequestCost = currRequestCost;
6880 
6881  if(bestRequestCost == 0)
6882  {
6883  break;
6884  }
6885  }
6886  }
6887  }
6888 
6889  if(pBestRequestBlock != VMA_NULL)
6890  {
6891  if(mapped)
6892  {
6893  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
6894  if(res != VK_SUCCESS)
6895  {
6896  return res;
6897  }
6898  }
6899 
6900  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
6901  currentFrameIndex,
6902  m_FrameInUseCount,
6903  &bestRequest))
6904  {
6905  // We no longer have an empty Allocation.
6906  if(pBestRequestBlock->m_Metadata.IsEmpty())
6907  {
6908  m_HasEmptyBlock = false;
6909  }
6910  // Allocate from this pBlock.
6911  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6912  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, size, *pAllocation);
6913  (*pAllocation)->InitBlockAllocation(
6914  hCurrentPool,
6915  pBestRequestBlock,
6916  bestRequest.offset,
6917  alignment,
6918  size,
6919  suballocType,
6920  mapped,
6921  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6922  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
6923  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6924  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6925  return VK_SUCCESS;
6926  }
6927  // else: Some allocations must have been touched while we are here. Next try.
6928  }
6929  else
6930  {
6931  // Could not find place in any of the blocks - break outer loop.
6932  break;
6933  }
6934  }
6935  /* Maximum number of tries exceeded - a very unlike event when many other
6936  threads are simultaneously touching allocations making it impossible to make
6937  lost at the same time as we try to allocate. */
6938  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
6939  {
6940  return VK_ERROR_TOO_MANY_OBJECTS;
6941  }
6942  }
6943 
6944  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6945 }
6946 
6947 void VmaBlockVector::Free(
6948  VmaAllocation hAllocation)
6949 {
6950  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
6951 
6952  // Scope for lock.
6953  {
6954  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6955 
6956  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
6957 
6958  if(hAllocation->IsPersistentMap())
6959  {
6960  pBlock->Unmap(m_hAllocator, 1);
6961  }
6962 
6963  pBlock->m_Metadata.Free(hAllocation);
6964  VMA_HEAVY_ASSERT(pBlock->Validate());
6965 
6966  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
6967 
6968  // pBlock became empty after this deallocation.
6969  if(pBlock->m_Metadata.IsEmpty())
6970  {
6971  // Already has empty Allocation. We don't want to have two, so delete this one.
6972  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
6973  {
6974  pBlockToDelete = pBlock;
6975  Remove(pBlock);
6976  }
6977  // We now have first empty block.
6978  else
6979  {
6980  m_HasEmptyBlock = true;
6981  }
6982  }
6983  // pBlock didn't become empty, but we have another empty block - find and free that one.
6984  // (This is optional, heuristics.)
6985  else if(m_HasEmptyBlock)
6986  {
6987  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6988  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6989  {
6990  pBlockToDelete = pLastBlock;
6991  m_Blocks.pop_back();
6992  m_HasEmptyBlock = false;
6993  }
6994  }
6995 
6996  IncrementallySortBlocks();
6997  }
6998 
6999  // Destruction of a free Allocation. Deferred until this point, outside of mutex
7000  // lock, for performance reason.
7001  if(pBlockToDelete != VMA_NULL)
7002  {
7003  VMA_DEBUG_LOG(" Deleted empty allocation");
7004  pBlockToDelete->Destroy(m_hAllocator);
7005  vma_delete(m_hAllocator, pBlockToDelete);
7006  }
7007 }
7008 
7009 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
7010 {
7011  VkDeviceSize result = 0;
7012  for(size_t i = m_Blocks.size(); i--; )
7013  {
7014  result = VMA_MAX(result, m_Blocks[i]->m_Metadata.GetSize());
7015  if(result >= m_PreferredBlockSize)
7016  {
7017  break;
7018  }
7019  }
7020  return result;
7021 }
7022 
7023 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
7024 {
7025  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
7026  {
7027  if(m_Blocks[blockIndex] == pBlock)
7028  {
7029  VmaVectorRemove(m_Blocks, blockIndex);
7030  return;
7031  }
7032  }
7033  VMA_ASSERT(0);
7034 }
7035 
7036 void VmaBlockVector::IncrementallySortBlocks()
7037 {
7038  // Bubble sort only until first swap.
7039  for(size_t i = 1; i < m_Blocks.size(); ++i)
7040  {
7041  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
7042  {
7043  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
7044  return;
7045  }
7046  }
7047 }
7048 
7049 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
7050 {
7051  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
7052  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
7053  allocInfo.allocationSize = blockSize;
7054  VkDeviceMemory mem = VK_NULL_HANDLE;
7055  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
7056  if(res < 0)
7057  {
7058  return res;
7059  }
7060 
7061  // New VkDeviceMemory successfully created.
7062 
7063  // Create new Allocation for it.
7064  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
7065  pBlock->Init(
7066  m_MemoryTypeIndex,
7067  mem,
7068  allocInfo.allocationSize,
7069  m_NextBlockId++);
7070 
7071  m_Blocks.push_back(pBlock);
7072  if(pNewBlockIndex != VMA_NULL)
7073  {
7074  *pNewBlockIndex = m_Blocks.size() - 1;
7075  }
7076 
7077  return VK_SUCCESS;
7078 }
7079 
7080 #if VMA_STATS_STRING_ENABLED
7081 
7082 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
7083 {
7084  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
7085 
7086  json.BeginObject();
7087 
7088  if(m_IsCustomPool)
7089  {
7090  json.WriteString("MemoryTypeIndex");
7091  json.WriteNumber(m_MemoryTypeIndex);
7092 
7093  json.WriteString("BlockSize");
7094  json.WriteNumber(m_PreferredBlockSize);
7095 
7096  json.WriteString("BlockCount");
7097  json.BeginObject(true);
7098  if(m_MinBlockCount > 0)
7099  {
7100  json.WriteString("Min");
7101  json.WriteNumber((uint64_t)m_MinBlockCount);
7102  }
7103  if(m_MaxBlockCount < SIZE_MAX)
7104  {
7105  json.WriteString("Max");
7106  json.WriteNumber((uint64_t)m_MaxBlockCount);
7107  }
7108  json.WriteString("Cur");
7109  json.WriteNumber((uint64_t)m_Blocks.size());
7110  json.EndObject();
7111 
7112  if(m_FrameInUseCount > 0)
7113  {
7114  json.WriteString("FrameInUseCount");
7115  json.WriteNumber(m_FrameInUseCount);
7116  }
7117  }
7118  else
7119  {
7120  json.WriteString("PreferredBlockSize");
7121  json.WriteNumber(m_PreferredBlockSize);
7122  }
7123 
7124  json.WriteString("Blocks");
7125  json.BeginObject();
7126  for(size_t i = 0; i < m_Blocks.size(); ++i)
7127  {
7128  json.BeginString();
7129  json.ContinueString(m_Blocks[i]->GetId());
7130  json.EndString();
7131 
7132  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
7133  }
7134  json.EndObject();
7135 
7136  json.EndObject();
7137 }
7138 
7139 #endif // #if VMA_STATS_STRING_ENABLED
7140 
7141 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
7142  VmaAllocator hAllocator,
7143  uint32_t currentFrameIndex)
7144 {
7145  if(m_pDefragmentator == VMA_NULL)
7146  {
7147  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
7148  hAllocator,
7149  this,
7150  currentFrameIndex);
7151  }
7152 
7153  return m_pDefragmentator;
7154 }
7155 
7156 VkResult VmaBlockVector::Defragment(
7157  VmaDefragmentationStats* pDefragmentationStats,
7158  VkDeviceSize& maxBytesToMove,
7159  uint32_t& maxAllocationsToMove)
7160 {
7161  if(m_pDefragmentator == VMA_NULL)
7162  {
7163  return VK_SUCCESS;
7164  }
7165 
7166  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
7167 
7168  // Defragment.
7169  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
7170 
7171  // Accumulate statistics.
7172  if(pDefragmentationStats != VMA_NULL)
7173  {
7174  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
7175  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
7176  pDefragmentationStats->bytesMoved += bytesMoved;
7177  pDefragmentationStats->allocationsMoved += allocationsMoved;
7178  VMA_ASSERT(bytesMoved <= maxBytesToMove);
7179  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
7180  maxBytesToMove -= bytesMoved;
7181  maxAllocationsToMove -= allocationsMoved;
7182  }
7183 
7184  // Free empty blocks.
7185  m_HasEmptyBlock = false;
7186  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
7187  {
7188  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
7189  if(pBlock->m_Metadata.IsEmpty())
7190  {
7191  if(m_Blocks.size() > m_MinBlockCount)
7192  {
7193  if(pDefragmentationStats != VMA_NULL)
7194  {
7195  ++pDefragmentationStats->deviceMemoryBlocksFreed;
7196  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
7197  }
7198 
7199  VmaVectorRemove(m_Blocks, blockIndex);
7200  pBlock->Destroy(m_hAllocator);
7201  vma_delete(m_hAllocator, pBlock);
7202  }
7203  else
7204  {
7205  m_HasEmptyBlock = true;
7206  }
7207  }
7208  }
7209 
7210  return result;
7211 }
7212 
7213 void VmaBlockVector::DestroyDefragmentator()
7214 {
7215  if(m_pDefragmentator != VMA_NULL)
7216  {
7217  vma_delete(m_hAllocator, m_pDefragmentator);
7218  m_pDefragmentator = VMA_NULL;
7219  }
7220 }
7221 
7222 void VmaBlockVector::MakePoolAllocationsLost(
7223  uint32_t currentFrameIndex,
7224  size_t* pLostAllocationCount)
7225 {
7226  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
7227  size_t lostAllocationCount = 0;
7228  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
7229  {
7230  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
7231  VMA_ASSERT(pBlock);
7232  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
7233  }
7234  if(pLostAllocationCount != VMA_NULL)
7235  {
7236  *pLostAllocationCount = lostAllocationCount;
7237  }
7238 }
7239 
7240 void VmaBlockVector::AddStats(VmaStats* pStats)
7241 {
7242  const uint32_t memTypeIndex = m_MemoryTypeIndex;
7243  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
7244 
7245  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
7246 
7247  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
7248  {
7249  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
7250  VMA_ASSERT(pBlock);
7251  VMA_HEAVY_ASSERT(pBlock->Validate());
7252  VmaStatInfo allocationStatInfo;
7253  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
7254  VmaAddStatInfo(pStats->total, allocationStatInfo);
7255  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7256  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7257  }
7258 }
7259 
7261 // VmaDefragmentator members definition
7262 
7263 VmaDefragmentator::VmaDefragmentator(
7264  VmaAllocator hAllocator,
7265  VmaBlockVector* pBlockVector,
7266  uint32_t currentFrameIndex) :
7267  m_hAllocator(hAllocator),
7268  m_pBlockVector(pBlockVector),
7269  m_CurrentFrameIndex(currentFrameIndex),
7270  m_BytesMoved(0),
7271  m_AllocationsMoved(0),
7272  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
7273  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
7274 {
7275 }
7276 
7277 VmaDefragmentator::~VmaDefragmentator()
7278 {
7279  for(size_t i = m_Blocks.size(); i--; )
7280  {
7281  vma_delete(m_hAllocator, m_Blocks[i]);
7282  }
7283 }
7284 
7285 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
7286 {
7287  AllocationInfo allocInfo;
7288  allocInfo.m_hAllocation = hAlloc;
7289  allocInfo.m_pChanged = pChanged;
7290  m_Allocations.push_back(allocInfo);
7291 }
7292 
7293 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
7294 {
7295  // It has already been mapped for defragmentation.
7296  if(m_pMappedDataForDefragmentation)
7297  {
7298  *ppMappedData = m_pMappedDataForDefragmentation;
7299  return VK_SUCCESS;
7300  }
7301 
7302  // It is originally mapped.
7303  if(m_pBlock->GetMappedData())
7304  {
7305  *ppMappedData = m_pBlock->GetMappedData();
7306  return VK_SUCCESS;
7307  }
7308 
7309  // Map on first usage.
7310  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
7311  *ppMappedData = m_pMappedDataForDefragmentation;
7312  return res;
7313 }
7314 
7315 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
7316 {
7317  if(m_pMappedDataForDefragmentation != VMA_NULL)
7318  {
7319  m_pBlock->Unmap(hAllocator, 1);
7320  }
7321 }
7322 
7323 VkResult VmaDefragmentator::DefragmentRound(
7324  VkDeviceSize maxBytesToMove,
7325  uint32_t maxAllocationsToMove)
7326 {
7327  if(m_Blocks.empty())
7328  {
7329  return VK_SUCCESS;
7330  }
7331 
7332  size_t srcBlockIndex = m_Blocks.size() - 1;
7333  size_t srcAllocIndex = SIZE_MAX;
7334  for(;;)
7335  {
7336  // 1. Find next allocation to move.
7337  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
7338  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
7339  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
7340  {
7341  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
7342  {
7343  // Finished: no more allocations to process.
7344  if(srcBlockIndex == 0)
7345  {
7346  return VK_SUCCESS;
7347  }
7348  else
7349  {
7350  --srcBlockIndex;
7351  srcAllocIndex = SIZE_MAX;
7352  }
7353  }
7354  else
7355  {
7356  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
7357  }
7358  }
7359 
7360  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
7361  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
7362 
7363  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
7364  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
7365  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
7366  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
7367 
7368  // 2. Try to find new place for this allocation in preceding or current block.
7369  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
7370  {
7371  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
7372  VmaAllocationRequest dstAllocRequest;
7373  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
7374  m_CurrentFrameIndex,
7375  m_pBlockVector->GetFrameInUseCount(),
7376  m_pBlockVector->GetBufferImageGranularity(),
7377  size,
7378  alignment,
7379  suballocType,
7380  false, // canMakeOtherLost
7381  &dstAllocRequest) &&
7382  MoveMakesSense(
7383  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
7384  {
7385  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
7386 
7387  // Reached limit on number of allocations or bytes to move.
7388  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
7389  (m_BytesMoved + size > maxBytesToMove))
7390  {
7391  return VK_INCOMPLETE;
7392  }
7393 
7394  void* pDstMappedData = VMA_NULL;
7395  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
7396  if(res != VK_SUCCESS)
7397  {
7398  return res;
7399  }
7400 
7401  void* pSrcMappedData = VMA_NULL;
7402  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
7403  if(res != VK_SUCCESS)
7404  {
7405  return res;
7406  }
7407 
7408  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
7409  memcpy(
7410  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
7411  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
7412  static_cast<size_t>(size));
7413 
7414  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
7415  pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);
7416 
7417  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
7418 
7419  if(allocInfo.m_pChanged != VMA_NULL)
7420  {
7421  *allocInfo.m_pChanged = VK_TRUE;
7422  }
7423 
7424  ++m_AllocationsMoved;
7425  m_BytesMoved += size;
7426 
7427  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
7428 
7429  break;
7430  }
7431  }
7432 
7433  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
7434 
7435  if(srcAllocIndex > 0)
7436  {
7437  --srcAllocIndex;
7438  }
7439  else
7440  {
7441  if(srcBlockIndex > 0)
7442  {
7443  --srcBlockIndex;
7444  srcAllocIndex = SIZE_MAX;
7445  }
7446  else
7447  {
7448  return VK_SUCCESS;
7449  }
7450  }
7451  }
7452 }
7453 
7454 VkResult VmaDefragmentator::Defragment(
7455  VkDeviceSize maxBytesToMove,
7456  uint32_t maxAllocationsToMove)
7457 {
7458  if(m_Allocations.empty())
7459  {
7460  return VK_SUCCESS;
7461  }
7462 
7463  // Create block info for each block.
7464  const size_t blockCount = m_pBlockVector->m_Blocks.size();
7465  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7466  {
7467  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
7468  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
7469  m_Blocks.push_back(pBlockInfo);
7470  }
7471 
7472  // Sort them by m_pBlock pointer value.
7473  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
7474 
7475  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
7476  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
7477  {
7478  AllocationInfo& allocInfo = m_Allocations[blockIndex];
7479  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
7480  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7481  {
7482  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
7483  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
7484  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
7485  {
7486  (*it)->m_Allocations.push_back(allocInfo);
7487  }
7488  else
7489  {
7490  VMA_ASSERT(0);
7491  }
7492  }
7493  }
7494  m_Allocations.clear();
7495 
7496  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7497  {
7498  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
7499  pBlockInfo->CalcHasNonMovableAllocations();
7500  pBlockInfo->SortAllocationsBySizeDescecnding();
7501  }
7502 
7503  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
7504  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
7505 
7506  // Execute defragmentation rounds (the main part).
7507  VkResult result = VK_SUCCESS;
7508  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
7509  {
7510  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
7511  }
7512 
7513  // Unmap blocks that were mapped for defragmentation.
7514  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7515  {
7516  m_Blocks[blockIndex]->Unmap(m_hAllocator);
7517  }
7518 
7519  return result;
7520 }
7521 
7522 bool VmaDefragmentator::MoveMakesSense(
7523  size_t dstBlockIndex, VkDeviceSize dstOffset,
7524  size_t srcBlockIndex, VkDeviceSize srcOffset)
7525 {
7526  if(dstBlockIndex < srcBlockIndex)
7527  {
7528  return true;
7529  }
7530  if(dstBlockIndex > srcBlockIndex)
7531  {
7532  return false;
7533  }
7534  if(dstOffset < srcOffset)
7535  {
7536  return true;
7537  }
7538  return false;
7539 }
7540 
7542 // VmaAllocator_T
7543 
7544 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
7545  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
7546  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
7547  m_hDevice(pCreateInfo->device),
7548  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
7549  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
7550  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
7551  m_PreferredLargeHeapBlockSize(0),
7552  m_PhysicalDevice(pCreateInfo->physicalDevice),
7553  m_CurrentFrameIndex(0),
7554  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
7555  m_NextPoolId(0)
7556 {
7557  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
7558 
7559 #if !(VMA_DEDICATED_ALLOCATION)
7561  {
7562  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
7563  }
7564 #endif
7565 
7566  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
7567  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
7568  memset(&m_MemProps, 0, sizeof(m_MemProps));
7569 
7570  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
7571  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
7572 
7573  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7574  {
7575  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
7576  }
7577 
7578  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
7579  {
7580  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
7581  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
7582  }
7583 
7584  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
7585 
7586  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
7587  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
7588 
7589  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
7590  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
7591 
7592  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
7593  {
7594  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
7595  {
7596  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
7597  if(limit != VK_WHOLE_SIZE)
7598  {
7599  m_HeapSizeLimit[heapIndex] = limit;
7600  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
7601  {
7602  m_MemProps.memoryHeaps[heapIndex].size = limit;
7603  }
7604  }
7605  }
7606  }
7607 
7608  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7609  {
7610  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
7611 
7612  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
7613  this,
7614  memTypeIndex,
7615  preferredBlockSize,
7616  0,
7617  SIZE_MAX,
7618  GetBufferImageGranularity(),
7619  pCreateInfo->frameInUseCount,
7620  false); // isCustomPool
7621  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
7622  // becase minBlockCount is 0.
7623  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
7624 
7625  }
7626 }
7627 
7628 VmaAllocator_T::~VmaAllocator_T()
7629 {
7630  VMA_ASSERT(m_Pools.empty());
7631 
7632  for(size_t i = GetMemoryTypeCount(); i--; )
7633  {
7634  vma_delete(this, m_pDedicatedAllocations[i]);
7635  vma_delete(this, m_pBlockVectors[i]);
7636  }
7637 }
7638 
7639 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
7640 {
7641 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7642  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
7643  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
7644  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
7645  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
7646  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
7647  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
7648  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
7649  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
7650  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
7651  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
7652  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
7653  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
7654  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
7655  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
7656  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
7657  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
7658 #if VMA_DEDICATED_ALLOCATION
7659  if(m_UseKhrDedicatedAllocation)
7660  {
7661  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
7662  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
7663  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
7664  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
7665  }
7666 #endif // #if VMA_DEDICATED_ALLOCATION
7667 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7668 
7669 #define VMA_COPY_IF_NOT_NULL(funcName) \
7670  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
7671 
7672  if(pVulkanFunctions != VMA_NULL)
7673  {
7674  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
7675  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
7676  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
7677  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
7678  VMA_COPY_IF_NOT_NULL(vkMapMemory);
7679  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
7680  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
7681  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
7682  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
7683  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
7684  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
7685  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
7686  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
7687  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
7688  VMA_COPY_IF_NOT_NULL(vkCreateImage);
7689  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
7690 #if VMA_DEDICATED_ALLOCATION
7691  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
7692  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
7693 #endif
7694  }
7695 
7696 #undef VMA_COPY_IF_NOT_NULL
7697 
7698  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
7699  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
7700  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
7701  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
7702  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
7703  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
7704  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
7705  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
7706  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
7707  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
7708  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
7709  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
7710  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
7711  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
7712  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
7713  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
7714  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
7715  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
7716 #if VMA_DEDICATED_ALLOCATION
7717  if(m_UseKhrDedicatedAllocation)
7718  {
7719  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
7720  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
7721  }
7722 #endif
7723 }
7724 
7725 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
7726 {
7727  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7728  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
7729  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
7730  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
7731 }
7732 
7733 VkResult VmaAllocator_T::AllocateMemoryOfType(
7734  VkDeviceSize size,
7735  VkDeviceSize alignment,
7736  bool dedicatedAllocation,
7737  VkBuffer dedicatedBuffer,
7738  VkImage dedicatedImage,
7739  const VmaAllocationCreateInfo& createInfo,
7740  uint32_t memTypeIndex,
7741  VmaSuballocationType suballocType,
7742  VmaAllocation* pAllocation)
7743 {
7744  VMA_ASSERT(pAllocation != VMA_NULL);
7745  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
7746 
7747  VmaAllocationCreateInfo finalCreateInfo = createInfo;
7748 
7749  // If memory type is not HOST_VISIBLE, disable MAPPED.
7750  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7751  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
7752  {
7753  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
7754  }
7755 
7756  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
7757  VMA_ASSERT(blockVector);
7758 
7759  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
7760  bool preferDedicatedMemory =
7761  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
7762  dedicatedAllocation ||
7763  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
7764  size > preferredBlockSize / 2;
7765 
7766  if(preferDedicatedMemory &&
7767  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
7768  finalCreateInfo.pool == VK_NULL_HANDLE)
7769  {
7771  }
7772 
7773  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
7774  {
7775  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7776  {
7777  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7778  }
7779  else
7780  {
7781  return AllocateDedicatedMemory(
7782  size,
7783  suballocType,
7784  memTypeIndex,
7785  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7786  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7787  finalCreateInfo.pUserData,
7788  dedicatedBuffer,
7789  dedicatedImage,
7790  pAllocation);
7791  }
7792  }
7793  else
7794  {
7795  VkResult res = blockVector->Allocate(
7796  VK_NULL_HANDLE, // hCurrentPool
7797  m_CurrentFrameIndex.load(),
7798  size,
7799  alignment,
7800  finalCreateInfo,
7801  suballocType,
7802  pAllocation);
7803  if(res == VK_SUCCESS)
7804  {
7805  return res;
7806  }
7807 
7808  // 5. Try dedicated memory.
7809  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7810  {
7811  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7812  }
7813  else
7814  {
7815  res = AllocateDedicatedMemory(
7816  size,
7817  suballocType,
7818  memTypeIndex,
7819  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7820  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7821  finalCreateInfo.pUserData,
7822  dedicatedBuffer,
7823  dedicatedImage,
7824  pAllocation);
7825  if(res == VK_SUCCESS)
7826  {
7827  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
7828  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
7829  return VK_SUCCESS;
7830  }
7831  else
7832  {
7833  // Everything failed: Return error code.
7834  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7835  return res;
7836  }
7837  }
7838  }
7839 }
7840 
7841 VkResult VmaAllocator_T::AllocateDedicatedMemory(
7842  VkDeviceSize size,
7843  VmaSuballocationType suballocType,
7844  uint32_t memTypeIndex,
7845  bool map,
7846  bool isUserDataString,
7847  void* pUserData,
7848  VkBuffer dedicatedBuffer,
7849  VkImage dedicatedImage,
7850  VmaAllocation* pAllocation)
7851 {
7852  VMA_ASSERT(pAllocation);
7853 
7854  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
7855  allocInfo.memoryTypeIndex = memTypeIndex;
7856  allocInfo.allocationSize = size;
7857 
7858 #if VMA_DEDICATED_ALLOCATION
7859  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
7860  if(m_UseKhrDedicatedAllocation)
7861  {
7862  if(dedicatedBuffer != VK_NULL_HANDLE)
7863  {
7864  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
7865  dedicatedAllocInfo.buffer = dedicatedBuffer;
7866  allocInfo.pNext = &dedicatedAllocInfo;
7867  }
7868  else if(dedicatedImage != VK_NULL_HANDLE)
7869  {
7870  dedicatedAllocInfo.image = dedicatedImage;
7871  allocInfo.pNext = &dedicatedAllocInfo;
7872  }
7873  }
7874 #endif // #if VMA_DEDICATED_ALLOCATION
7875 
7876  // Allocate VkDeviceMemory.
7877  VkDeviceMemory hMemory = VK_NULL_HANDLE;
7878  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
7879  if(res < 0)
7880  {
7881  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7882  return res;
7883  }
7884 
7885  void* pMappedData = VMA_NULL;
7886  if(map)
7887  {
7888  res = (*m_VulkanFunctions.vkMapMemory)(
7889  m_hDevice,
7890  hMemory,
7891  0,
7892  VK_WHOLE_SIZE,
7893  0,
7894  &pMappedData);
7895  if(res < 0)
7896  {
7897  VMA_DEBUG_LOG(" vkMapMemory FAILED");
7898  FreeVulkanMemory(memTypeIndex, size, hMemory);
7899  return res;
7900  }
7901  }
7902 
7903  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
7904  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
7905  (*pAllocation)->SetUserData(this, pUserData);
7906 
7907  // Register it in m_pDedicatedAllocations.
7908  {
7909  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7910  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7911  VMA_ASSERT(pDedicatedAllocations);
7912  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
7913  }
7914 
7915  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
7916 
7917  return VK_SUCCESS;
7918 }
7919 
7920 void VmaAllocator_T::GetBufferMemoryRequirements(
7921  VkBuffer hBuffer,
7922  VkMemoryRequirements& memReq,
7923  bool& requiresDedicatedAllocation,
7924  bool& prefersDedicatedAllocation) const
7925 {
7926 #if VMA_DEDICATED_ALLOCATION
7927  if(m_UseKhrDedicatedAllocation)
7928  {
7929  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
7930  memReqInfo.buffer = hBuffer;
7931 
7932  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7933 
7934  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7935  memReq2.pNext = &memDedicatedReq;
7936 
7937  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7938 
7939  memReq = memReq2.memoryRequirements;
7940  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7941  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7942  }
7943  else
7944 #endif // #if VMA_DEDICATED_ALLOCATION
7945  {
7946  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
7947  requiresDedicatedAllocation = false;
7948  prefersDedicatedAllocation = false;
7949  }
7950 }
7951 
7952 void VmaAllocator_T::GetImageMemoryRequirements(
7953  VkImage hImage,
7954  VkMemoryRequirements& memReq,
7955  bool& requiresDedicatedAllocation,
7956  bool& prefersDedicatedAllocation) const
7957 {
7958 #if VMA_DEDICATED_ALLOCATION
7959  if(m_UseKhrDedicatedAllocation)
7960  {
7961  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
7962  memReqInfo.image = hImage;
7963 
7964  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7965 
7966  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7967  memReq2.pNext = &memDedicatedReq;
7968 
7969  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7970 
7971  memReq = memReq2.memoryRequirements;
7972  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7973  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7974  }
7975  else
7976 #endif // #if VMA_DEDICATED_ALLOCATION
7977  {
7978  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
7979  requiresDedicatedAllocation = false;
7980  prefersDedicatedAllocation = false;
7981  }
7982 }
7983 
7984 VkResult VmaAllocator_T::AllocateMemory(
7985  const VkMemoryRequirements& vkMemReq,
7986  bool requiresDedicatedAllocation,
7987  bool prefersDedicatedAllocation,
7988  VkBuffer dedicatedBuffer,
7989  VkImage dedicatedImage,
7990  const VmaAllocationCreateInfo& createInfo,
7991  VmaSuballocationType suballocType,
7992  VmaAllocation* pAllocation)
7993 {
7994  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
7995  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7996  {
7997  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
7998  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7999  }
8000  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
8002  {
8003  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
8004  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
8005  }
8006  if(requiresDedicatedAllocation)
8007  {
8008  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
8009  {
8010  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
8011  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
8012  }
8013  if(createInfo.pool != VK_NULL_HANDLE)
8014  {
8015  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
8016  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
8017  }
8018  }
8019  if((createInfo.pool != VK_NULL_HANDLE) &&
8020  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
8021  {
8022  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
8023  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
8024  }
8025 
8026  if(createInfo.pool != VK_NULL_HANDLE)
8027  {
8028  const VkDeviceSize alignmentForPool = VMA_MAX(
8029  vkMemReq.alignment,
8030  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
8031  return createInfo.pool->m_BlockVector.Allocate(
8032  createInfo.pool,
8033  m_CurrentFrameIndex.load(),
8034  vkMemReq.size,
8035  alignmentForPool,
8036  createInfo,
8037  suballocType,
8038  pAllocation);
8039  }
8040  else
8041  {
8042  // Bit mask of memory Vulkan types acceptable for this allocation.
8043  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
8044  uint32_t memTypeIndex = UINT32_MAX;
8045  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
8046  if(res == VK_SUCCESS)
8047  {
8048  VkDeviceSize alignmentForMemType = VMA_MAX(
8049  vkMemReq.alignment,
8050  GetMemoryTypeMinAlignment(memTypeIndex));
8051 
8052  res = AllocateMemoryOfType(
8053  vkMemReq.size,
8054  alignmentForMemType,
8055  requiresDedicatedAllocation || prefersDedicatedAllocation,
8056  dedicatedBuffer,
8057  dedicatedImage,
8058  createInfo,
8059  memTypeIndex,
8060  suballocType,
8061  pAllocation);
8062  // Succeeded on first try.
8063  if(res == VK_SUCCESS)
8064  {
8065  return res;
8066  }
8067  // Allocation from this memory type failed. Try other compatible memory types.
8068  else
8069  {
8070  for(;;)
8071  {
8072  // Remove old memTypeIndex from list of possibilities.
8073  memoryTypeBits &= ~(1u << memTypeIndex);
8074  // Find alternative memTypeIndex.
8075  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
8076  if(res == VK_SUCCESS)
8077  {
8078  alignmentForMemType = VMA_MAX(
8079  vkMemReq.alignment,
8080  GetMemoryTypeMinAlignment(memTypeIndex));
8081 
8082  res = AllocateMemoryOfType(
8083  vkMemReq.size,
8084  alignmentForMemType,
8085  requiresDedicatedAllocation || prefersDedicatedAllocation,
8086  dedicatedBuffer,
8087  dedicatedImage,
8088  createInfo,
8089  memTypeIndex,
8090  suballocType,
8091  pAllocation);
8092  // Allocation from this alternative memory type succeeded.
8093  if(res == VK_SUCCESS)
8094  {
8095  return res;
8096  }
8097  // else: Allocation from this memory type failed. Try next one - next loop iteration.
8098  }
8099  // No other matching memory type index could be found.
8100  else
8101  {
8102  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
8103  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
8104  }
8105  }
8106  }
8107  }
8108  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
8109  else
8110  return res;
8111  }
8112 }
8113 
8114 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
8115 {
8116  VMA_ASSERT(allocation);
8117 
8118  if(allocation->CanBecomeLost() == false ||
8119  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
8120  {
8121  switch(allocation->GetType())
8122  {
8123  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8124  {
8125  VmaBlockVector* pBlockVector = VMA_NULL;
8126  VmaPool hPool = allocation->GetPool();
8127  if(hPool != VK_NULL_HANDLE)
8128  {
8129  pBlockVector = &hPool->m_BlockVector;
8130  }
8131  else
8132  {
8133  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
8134  pBlockVector = m_pBlockVectors[memTypeIndex];
8135  }
8136  pBlockVector->Free(allocation);
8137  }
8138  break;
8139  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8140  FreeDedicatedMemory(allocation);
8141  break;
8142  default:
8143  VMA_ASSERT(0);
8144  }
8145  }
8146 
8147  allocation->SetUserData(this, VMA_NULL);
8148  vma_delete(this, allocation);
8149 }
8150 
8151 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
8152 {
8153  // Initialize.
8154  InitStatInfo(pStats->total);
8155  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
8156  InitStatInfo(pStats->memoryType[i]);
8157  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
8158  InitStatInfo(pStats->memoryHeap[i]);
8159 
8160  // Process default pools.
8161  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8162  {
8163  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
8164  VMA_ASSERT(pBlockVector);
8165  pBlockVector->AddStats(pStats);
8166  }
8167 
8168  // Process custom pools.
8169  {
8170  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8171  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
8172  {
8173  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
8174  }
8175  }
8176 
8177  // Process dedicated allocations.
8178  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8179  {
8180  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
8181  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8182  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
8183  VMA_ASSERT(pDedicatedAllocVector);
8184  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
8185  {
8186  VmaStatInfo allocationStatInfo;
8187  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
8188  VmaAddStatInfo(pStats->total, allocationStatInfo);
8189  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
8190  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
8191  }
8192  }
8193 
8194  // Postprocess.
8195  VmaPostprocessCalcStatInfo(pStats->total);
8196  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
8197  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
8198  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
8199  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
8200 }
8201 
8202 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
8203 
8204 VkResult VmaAllocator_T::Defragment(
8205  VmaAllocation* pAllocations,
8206  size_t allocationCount,
8207  VkBool32* pAllocationsChanged,
8208  const VmaDefragmentationInfo* pDefragmentationInfo,
8209  VmaDefragmentationStats* pDefragmentationStats)
8210 {
8211  if(pAllocationsChanged != VMA_NULL)
8212  {
8213  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
8214  }
8215  if(pDefragmentationStats != VMA_NULL)
8216  {
8217  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
8218  }
8219 
8220  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
8221 
8222  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
8223 
8224  const size_t poolCount = m_Pools.size();
8225 
8226  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
8227  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
8228  {
8229  VmaAllocation hAlloc = pAllocations[allocIndex];
8230  VMA_ASSERT(hAlloc);
8231  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
8232  // DedicatedAlloc cannot be defragmented.
8233  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
8234  // Only HOST_VISIBLE memory types can be defragmented.
8235  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
8236  // Lost allocation cannot be defragmented.
8237  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
8238  {
8239  VmaBlockVector* pAllocBlockVector = VMA_NULL;
8240 
8241  const VmaPool hAllocPool = hAlloc->GetPool();
8242  // This allocation belongs to custom pool.
8243  if(hAllocPool != VK_NULL_HANDLE)
8244  {
8245  pAllocBlockVector = &hAllocPool->GetBlockVector();
8246  }
8247  // This allocation belongs to general pool.
8248  else
8249  {
8250  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
8251  }
8252 
8253  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
8254 
8255  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
8256  &pAllocationsChanged[allocIndex] : VMA_NULL;
8257  pDefragmentator->AddAllocation(hAlloc, pChanged);
8258  }
8259  }
8260 
8261  VkResult result = VK_SUCCESS;
8262 
8263  // ======== Main processing.
8264 
8265  VkDeviceSize maxBytesToMove = SIZE_MAX;
8266  uint32_t maxAllocationsToMove = UINT32_MAX;
8267  if(pDefragmentationInfo != VMA_NULL)
8268  {
8269  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
8270  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
8271  }
8272 
8273  // Process standard memory.
8274  for(uint32_t memTypeIndex = 0;
8275  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
8276  ++memTypeIndex)
8277  {
8278  // Only HOST_VISIBLE memory types can be defragmented.
8279  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8280  {
8281  result = m_pBlockVectors[memTypeIndex]->Defragment(
8282  pDefragmentationStats,
8283  maxBytesToMove,
8284  maxAllocationsToMove);
8285  }
8286  }
8287 
8288  // Process custom pools.
8289  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
8290  {
8291  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
8292  pDefragmentationStats,
8293  maxBytesToMove,
8294  maxAllocationsToMove);
8295  }
8296 
8297  // ======== Destroy defragmentators.
8298 
8299  // Process custom pools.
8300  for(size_t poolIndex = poolCount; poolIndex--; )
8301  {
8302  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
8303  }
8304 
8305  // Process standard memory.
8306  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
8307  {
8308  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8309  {
8310  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
8311  }
8312  }
8313 
8314  return result;
8315 }
8316 
8317 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
8318 {
8319  if(hAllocation->CanBecomeLost())
8320  {
8321  /*
8322  Warning: This is a carefully designed algorithm.
8323  Do not modify unless you really know what you're doing :)
8324  */
8325  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
8326  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
8327  for(;;)
8328  {
8329  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
8330  {
8331  pAllocationInfo->memoryType = UINT32_MAX;
8332  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
8333  pAllocationInfo->offset = 0;
8334  pAllocationInfo->size = hAllocation->GetSize();
8335  pAllocationInfo->pMappedData = VMA_NULL;
8336  pAllocationInfo->pUserData = hAllocation->GetUserData();
8337  return;
8338  }
8339  else if(localLastUseFrameIndex == localCurrFrameIndex)
8340  {
8341  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
8342  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
8343  pAllocationInfo->offset = hAllocation->GetOffset();
8344  pAllocationInfo->size = hAllocation->GetSize();
8345  pAllocationInfo->pMappedData = VMA_NULL;
8346  pAllocationInfo->pUserData = hAllocation->GetUserData();
8347  return;
8348  }
8349  else // Last use time earlier than current time.
8350  {
8351  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
8352  {
8353  localLastUseFrameIndex = localCurrFrameIndex;
8354  }
8355  }
8356  }
8357  }
8358  else
8359  {
8360 #if VMA_STATS_STRING_ENABLED
8361  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
8362  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
8363  for(;;)
8364  {
8365  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
8366  if(localLastUseFrameIndex == localCurrFrameIndex)
8367  {
8368  break;
8369  }
8370  else // Last use time earlier than current time.
8371  {
8372  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
8373  {
8374  localLastUseFrameIndex = localCurrFrameIndex;
8375  }
8376  }
8377  }
8378 #endif
8379 
8380  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
8381  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
8382  pAllocationInfo->offset = hAllocation->GetOffset();
8383  pAllocationInfo->size = hAllocation->GetSize();
8384  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
8385  pAllocationInfo->pUserData = hAllocation->GetUserData();
8386  }
8387 }
8388 
8389 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
8390 {
8391  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
8392  if(hAllocation->CanBecomeLost())
8393  {
8394  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
8395  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
8396  for(;;)
8397  {
8398  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
8399  {
8400  return false;
8401  }
8402  else if(localLastUseFrameIndex == localCurrFrameIndex)
8403  {
8404  return true;
8405  }
8406  else // Last use time earlier than current time.
8407  {
8408  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
8409  {
8410  localLastUseFrameIndex = localCurrFrameIndex;
8411  }
8412  }
8413  }
8414  }
8415  else
8416  {
8417 #if VMA_STATS_STRING_ENABLED
8418  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
8419  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
8420  for(;;)
8421  {
8422  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
8423  if(localLastUseFrameIndex == localCurrFrameIndex)
8424  {
8425  break;
8426  }
8427  else // Last use time earlier than current time.
8428  {
8429  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
8430  {
8431  localLastUseFrameIndex = localCurrFrameIndex;
8432  }
8433  }
8434  }
8435 #endif
8436 
8437  return true;
8438  }
8439 }
8440 
8441 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
8442 {
8443  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
8444 
8445  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
8446 
8447  if(newCreateInfo.maxBlockCount == 0)
8448  {
8449  newCreateInfo.maxBlockCount = SIZE_MAX;
8450  }
8451  if(newCreateInfo.blockSize == 0)
8452  {
8453  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
8454  }
8455 
8456  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
8457 
8458  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
8459  if(res != VK_SUCCESS)
8460  {
8461  vma_delete(this, *pPool);
8462  *pPool = VMA_NULL;
8463  return res;
8464  }
8465 
8466  // Add to m_Pools.
8467  {
8468  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8469  (*pPool)->SetId(m_NextPoolId++);
8470  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
8471  }
8472 
8473  return VK_SUCCESS;
8474 }
8475 
8476 void VmaAllocator_T::DestroyPool(VmaPool pool)
8477 {
8478  // Remove from m_Pools.
8479  {
8480  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8481  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
8482  VMA_ASSERT(success && "Pool not found in Allocator.");
8483  }
8484 
8485  vma_delete(this, pool);
8486 }
8487 
8488 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
8489 {
8490  pool->m_BlockVector.GetPoolStats(pPoolStats);
8491 }
8492 
8493 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
8494 {
8495  m_CurrentFrameIndex.store(frameIndex);
8496 }
8497 
8498 void VmaAllocator_T::MakePoolAllocationsLost(
8499  VmaPool hPool,
8500  size_t* pLostAllocationCount)
8501 {
8502  hPool->m_BlockVector.MakePoolAllocationsLost(
8503  m_CurrentFrameIndex.load(),
8504  pLostAllocationCount);
8505 }
8506 
8507 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
8508 {
8509  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
8510  (*pAllocation)->InitLost();
8511 }
8512 
8513 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
8514 {
8515  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
8516 
8517  VkResult res;
8518  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8519  {
8520  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8521  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
8522  {
8523  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8524  if(res == VK_SUCCESS)
8525  {
8526  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
8527  }
8528  }
8529  else
8530  {
8531  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
8532  }
8533  }
8534  else
8535  {
8536  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8537  }
8538 
8539  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
8540  {
8541  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
8542  }
8543 
8544  return res;
8545 }
8546 
8547 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
8548 {
8549  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
8550  {
8551  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
8552  }
8553 
8554  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
8555 
8556  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
8557  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8558  {
8559  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8560  m_HeapSizeLimit[heapIndex] += size;
8561  }
8562 }
8563 
8564 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
8565 {
8566  if(hAllocation->CanBecomeLost())
8567  {
8568  return VK_ERROR_MEMORY_MAP_FAILED;
8569  }
8570 
8571  switch(hAllocation->GetType())
8572  {
8573  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8574  {
8575  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8576  char *pBytes = VMA_NULL;
8577  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
8578  if(res == VK_SUCCESS)
8579  {
8580  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
8581  hAllocation->BlockAllocMap();
8582  }
8583  return res;
8584  }
8585  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8586  return hAllocation->DedicatedAllocMap(this, ppData);
8587  default:
8588  VMA_ASSERT(0);
8589  return VK_ERROR_MEMORY_MAP_FAILED;
8590  }
8591 }
8592 
8593 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
8594 {
8595  switch(hAllocation->GetType())
8596  {
8597  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8598  {
8599  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8600  hAllocation->BlockAllocUnmap();
8601  pBlock->Unmap(this, 1);
8602  }
8603  break;
8604  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8605  hAllocation->DedicatedAllocUnmap(this);
8606  break;
8607  default:
8608  VMA_ASSERT(0);
8609  }
8610 }
8611 
8612 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
8613 {
8614  VkResult res = VK_SUCCESS;
8615  switch(hAllocation->GetType())
8616  {
8617  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8618  res = GetVulkanFunctions().vkBindBufferMemory(
8619  m_hDevice,
8620  hBuffer,
8621  hAllocation->GetMemory(),
8622  0); //memoryOffset
8623  break;
8624  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8625  {
8626  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
8627  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
8628  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
8629  break;
8630  }
8631  default:
8632  VMA_ASSERT(0);
8633  }
8634  return res;
8635 }
8636 
8637 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
8638 {
8639  VkResult res = VK_SUCCESS;
8640  switch(hAllocation->GetType())
8641  {
8642  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8643  res = GetVulkanFunctions().vkBindImageMemory(
8644  m_hDevice,
8645  hImage,
8646  hAllocation->GetMemory(),
8647  0); //memoryOffset
8648  break;
8649  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8650  {
8651  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
8652  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
8653  res = pBlock->BindImageMemory(this, hAllocation, hImage);
8654  break;
8655  }
8656  default:
8657  VMA_ASSERT(0);
8658  }
8659  return res;
8660 }
8661 
8662 void VmaAllocator_T::FlushOrInvalidateAllocation(
8663  VmaAllocation hAllocation,
8664  VkDeviceSize offset, VkDeviceSize size,
8665  VMA_CACHE_OPERATION op)
8666 {
8667  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
8668  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
8669  {
8670  const VkDeviceSize allocationSize = hAllocation->GetSize();
8671  VMA_ASSERT(offset <= allocationSize);
8672 
8673  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
8674 
8675  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
8676  memRange.memory = hAllocation->GetMemory();
8677 
8678  switch(hAllocation->GetType())
8679  {
8680  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8681  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
8682  if(size == VK_WHOLE_SIZE)
8683  {
8684  memRange.size = allocationSize - memRange.offset;
8685  }
8686  else
8687  {
8688  VMA_ASSERT(offset + size <= allocationSize);
8689  memRange.size = VMA_MIN(
8690  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
8691  allocationSize - memRange.offset);
8692  }
8693  break;
8694 
8695  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8696  {
8697  // 1. Still within this allocation.
8698  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
8699  if(size == VK_WHOLE_SIZE)
8700  {
8701  size = allocationSize - offset;
8702  }
8703  else
8704  {
8705  VMA_ASSERT(offset + size <= allocationSize);
8706  }
8707  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
8708 
8709  // 2. Adjust to whole block.
8710  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
8711  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
8712  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_Metadata.GetSize();
8713  memRange.offset += allocationOffset;
8714  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
8715 
8716  break;
8717  }
8718 
8719  default:
8720  VMA_ASSERT(0);
8721  }
8722 
8723  switch(op)
8724  {
8725  case VMA_CACHE_FLUSH:
8726  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
8727  break;
8728  case VMA_CACHE_INVALIDATE:
8729  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
8730  break;
8731  default:
8732  VMA_ASSERT(0);
8733  }
8734  }
8735  // else: Just ignore this call.
8736 }
8737 
8738 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
8739 {
8740  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
8741 
8742  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
8743  {
8744  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8745  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
8746  VMA_ASSERT(pDedicatedAllocations);
8747  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
8748  VMA_ASSERT(success);
8749  }
8750 
8751  VkDeviceMemory hMemory = allocation->GetMemory();
8752 
8753  if(allocation->GetMappedData() != VMA_NULL)
8754  {
8755  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
8756  }
8757 
8758  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
8759 
8760  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
8761 }
8762 
8763 #if VMA_STATS_STRING_ENABLED
8764 
8765 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
8766 {
8767  bool dedicatedAllocationsStarted = false;
8768  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8769  {
8770  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8771  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
8772  VMA_ASSERT(pDedicatedAllocVector);
8773  if(pDedicatedAllocVector->empty() == false)
8774  {
8775  if(dedicatedAllocationsStarted == false)
8776  {
8777  dedicatedAllocationsStarted = true;
8778  json.WriteString("DedicatedAllocations");
8779  json.BeginObject();
8780  }
8781 
8782  json.BeginString("Type ");
8783  json.ContinueString(memTypeIndex);
8784  json.EndString();
8785 
8786  json.BeginArray();
8787 
8788  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
8789  {
8790  json.BeginObject(true);
8791  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
8792  hAlloc->PrintParameters(json);
8793  json.EndObject();
8794  }
8795 
8796  json.EndArray();
8797  }
8798  }
8799  if(dedicatedAllocationsStarted)
8800  {
8801  json.EndObject();
8802  }
8803 
8804  {
8805  bool allocationsStarted = false;
8806  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8807  {
8808  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
8809  {
8810  if(allocationsStarted == false)
8811  {
8812  allocationsStarted = true;
8813  json.WriteString("DefaultPools");
8814  json.BeginObject();
8815  }
8816 
8817  json.BeginString("Type ");
8818  json.ContinueString(memTypeIndex);
8819  json.EndString();
8820 
8821  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
8822  }
8823  }
8824  if(allocationsStarted)
8825  {
8826  json.EndObject();
8827  }
8828  }
8829 
8830  {
8831  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8832  const size_t poolCount = m_Pools.size();
8833  if(poolCount > 0)
8834  {
8835  json.WriteString("Pools");
8836  json.BeginObject();
8837  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
8838  {
8839  json.BeginString();
8840  json.ContinueString(m_Pools[poolIndex]->GetId());
8841  json.EndString();
8842 
8843  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
8844  }
8845  json.EndObject();
8846  }
8847  }
8848 }
8849 
8850 #endif // #if VMA_STATS_STRING_ENABLED
8851 
8852 static VkResult AllocateMemoryForImage(
8853  VmaAllocator allocator,
8854  VkImage image,
8855  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8856  VmaSuballocationType suballocType,
8857  VmaAllocation* pAllocation)
8858 {
8859  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
8860 
8861  VkMemoryRequirements vkMemReq = {};
8862  bool requiresDedicatedAllocation = false;
8863  bool prefersDedicatedAllocation = false;
8864  allocator->GetImageMemoryRequirements(image, vkMemReq,
8865  requiresDedicatedAllocation, prefersDedicatedAllocation);
8866 
8867  return allocator->AllocateMemory(
8868  vkMemReq,
8869  requiresDedicatedAllocation,
8870  prefersDedicatedAllocation,
8871  VK_NULL_HANDLE, // dedicatedBuffer
8872  image, // dedicatedImage
8873  *pAllocationCreateInfo,
8874  suballocType,
8875  pAllocation);
8876 }
8877 
8879 // Public interface
8880 
8881 VkResult vmaCreateAllocator(
8882  const VmaAllocatorCreateInfo* pCreateInfo,
8883  VmaAllocator* pAllocator)
8884 {
8885  VMA_ASSERT(pCreateInfo && pAllocator);
8886  VMA_DEBUG_LOG("vmaCreateAllocator");
8887  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
8888  return VK_SUCCESS;
8889 }
8890 
8891 void vmaDestroyAllocator(
8892  VmaAllocator allocator)
8893 {
8894  if(allocator != VK_NULL_HANDLE)
8895  {
8896  VMA_DEBUG_LOG("vmaDestroyAllocator");
8897  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
8898  vma_delete(&allocationCallbacks, allocator);
8899  }
8900 }
8901 
8903  VmaAllocator allocator,
8904  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
8905 {
8906  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
8907  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
8908 }
8909 
8911  VmaAllocator allocator,
8912  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
8913 {
8914  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
8915  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
8916 }
8917 
8919  VmaAllocator allocator,
8920  uint32_t memoryTypeIndex,
8921  VkMemoryPropertyFlags* pFlags)
8922 {
8923  VMA_ASSERT(allocator && pFlags);
8924  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
8925  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
8926 }
8927 
8929  VmaAllocator allocator,
8930  uint32_t frameIndex)
8931 {
8932  VMA_ASSERT(allocator);
8933  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
8934 
8935  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8936 
8937  allocator->SetCurrentFrameIndex(frameIndex);
8938 }
8939 
8940 void vmaCalculateStats(
8941  VmaAllocator allocator,
8942  VmaStats* pStats)
8943 {
8944  VMA_ASSERT(allocator && pStats);
8945  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8946  allocator->CalculateStats(pStats);
8947 }
8948 
8949 #if VMA_STATS_STRING_ENABLED
8950 
8951 void vmaBuildStatsString(
8952  VmaAllocator allocator,
8953  char** ppStatsString,
8954  VkBool32 detailedMap)
8955 {
8956  VMA_ASSERT(allocator && ppStatsString);
8957  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8958 
8959  VmaStringBuilder sb(allocator);
8960  {
8961  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
8962  json.BeginObject();
8963 
8964  VmaStats stats;
8965  allocator->CalculateStats(&stats);
8966 
8967  json.WriteString("Total");
8968  VmaPrintStatInfo(json, stats.total);
8969 
8970  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
8971  {
8972  json.BeginString("Heap ");
8973  json.ContinueString(heapIndex);
8974  json.EndString();
8975  json.BeginObject();
8976 
8977  json.WriteString("Size");
8978  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
8979 
8980  json.WriteString("Flags");
8981  json.BeginArray(true);
8982  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
8983  {
8984  json.WriteString("DEVICE_LOCAL");
8985  }
8986  json.EndArray();
8987 
8988  if(stats.memoryHeap[heapIndex].blockCount > 0)
8989  {
8990  json.WriteString("Stats");
8991  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
8992  }
8993 
8994  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
8995  {
8996  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
8997  {
8998  json.BeginString("Type ");
8999  json.ContinueString(typeIndex);
9000  json.EndString();
9001 
9002  json.BeginObject();
9003 
9004  json.WriteString("Flags");
9005  json.BeginArray(true);
9006  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
9007  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
9008  {
9009  json.WriteString("DEVICE_LOCAL");
9010  }
9011  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
9012  {
9013  json.WriteString("HOST_VISIBLE");
9014  }
9015  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
9016  {
9017  json.WriteString("HOST_COHERENT");
9018  }
9019  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
9020  {
9021  json.WriteString("HOST_CACHED");
9022  }
9023  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
9024  {
9025  json.WriteString("LAZILY_ALLOCATED");
9026  }
9027  json.EndArray();
9028 
9029  if(stats.memoryType[typeIndex].blockCount > 0)
9030  {
9031  json.WriteString("Stats");
9032  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
9033  }
9034 
9035  json.EndObject();
9036  }
9037  }
9038 
9039  json.EndObject();
9040  }
9041  if(detailedMap == VK_TRUE)
9042  {
9043  allocator->PrintDetailedMap(json);
9044  }
9045 
9046  json.EndObject();
9047  }
9048 
9049  const size_t len = sb.GetLength();
9050  char* const pChars = vma_new_array(allocator, char, len + 1);
9051  if(len > 0)
9052  {
9053  memcpy(pChars, sb.GetData(), len);
9054  }
9055  pChars[len] = '\0';
9056  *ppStatsString = pChars;
9057 }
9058 
9059 void vmaFreeStatsString(
9060  VmaAllocator allocator,
9061  char* pStatsString)
9062 {
9063  if(pStatsString != VMA_NULL)
9064  {
9065  VMA_ASSERT(allocator);
9066  size_t len = strlen(pStatsString);
9067  vma_delete_array(allocator, pStatsString, len + 1);
9068  }
9069 }
9070 
9071 #endif // #if VMA_STATS_STRING_ENABLED
9072 
9073 /*
9074 This function is not protected by any mutex because it just reads immutable data.
9075 */
9076 VkResult vmaFindMemoryTypeIndex(
9077  VmaAllocator allocator,
9078  uint32_t memoryTypeBits,
9079  const VmaAllocationCreateInfo* pAllocationCreateInfo,
9080  uint32_t* pMemoryTypeIndex)
9081 {
9082  VMA_ASSERT(allocator != VK_NULL_HANDLE);
9083  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
9084  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
9085 
9086  if(pAllocationCreateInfo->memoryTypeBits != 0)
9087  {
9088  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
9089  }
9090 
9091  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
9092  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
9093 
9094  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
9095  if(mapped)
9096  {
9097  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
9098  }
9099 
9100  // Convert usage to requiredFlags and preferredFlags.
9101  switch(pAllocationCreateInfo->usage)
9102  {
9104  break;
9106  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
9107  {
9108  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
9109  }
9110  break;
9112  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
9113  break;
9115  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
9116  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
9117  {
9118  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
9119  }
9120  break;
9122  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
9123  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
9124  break;
9125  default:
9126  break;
9127  }
9128 
9129  *pMemoryTypeIndex = UINT32_MAX;
9130  uint32_t minCost = UINT32_MAX;
9131  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
9132  memTypeIndex < allocator->GetMemoryTypeCount();
9133  ++memTypeIndex, memTypeBit <<= 1)
9134  {
9135  // This memory type is acceptable according to memoryTypeBits bitmask.
9136  if((memTypeBit & memoryTypeBits) != 0)
9137  {
9138  const VkMemoryPropertyFlags currFlags =
9139  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
9140  // This memory type contains requiredFlags.
9141  if((requiredFlags & ~currFlags) == 0)
9142  {
9143  // Calculate cost as number of bits from preferredFlags not present in this memory type.
9144  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
9145  // Remember memory type with lowest cost.
9146  if(currCost < minCost)
9147  {
9148  *pMemoryTypeIndex = memTypeIndex;
9149  if(currCost == 0)
9150  {
9151  return VK_SUCCESS;
9152  }
9153  minCost = currCost;
9154  }
9155  }
9156  }
9157  }
9158  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
9159 }
9160 
9162  VmaAllocator allocator,
9163  const VkBufferCreateInfo* pBufferCreateInfo,
9164  const VmaAllocationCreateInfo* pAllocationCreateInfo,
9165  uint32_t* pMemoryTypeIndex)
9166 {
9167  VMA_ASSERT(allocator != VK_NULL_HANDLE);
9168  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
9169  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
9170  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
9171 
9172  const VkDevice hDev = allocator->m_hDevice;
9173  VkBuffer hBuffer = VK_NULL_HANDLE;
9174  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
9175  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
9176  if(res == VK_SUCCESS)
9177  {
9178  VkMemoryRequirements memReq = {};
9179  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
9180  hDev, hBuffer, &memReq);
9181 
9182  res = vmaFindMemoryTypeIndex(
9183  allocator,
9184  memReq.memoryTypeBits,
9185  pAllocationCreateInfo,
9186  pMemoryTypeIndex);
9187 
9188  allocator->GetVulkanFunctions().vkDestroyBuffer(
9189  hDev, hBuffer, allocator->GetAllocationCallbacks());
9190  }
9191  return res;
9192 }
9193 
9195  VmaAllocator allocator,
9196  const VkImageCreateInfo* pImageCreateInfo,
9197  const VmaAllocationCreateInfo* pAllocationCreateInfo,
9198  uint32_t* pMemoryTypeIndex)
9199 {
9200  VMA_ASSERT(allocator != VK_NULL_HANDLE);
9201  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
9202  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
9203  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
9204 
9205  const VkDevice hDev = allocator->m_hDevice;
9206  VkImage hImage = VK_NULL_HANDLE;
9207  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
9208  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
9209  if(res == VK_SUCCESS)
9210  {
9211  VkMemoryRequirements memReq = {};
9212  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
9213  hDev, hImage, &memReq);
9214 
9215  res = vmaFindMemoryTypeIndex(
9216  allocator,
9217  memReq.memoryTypeBits,
9218  pAllocationCreateInfo,
9219  pMemoryTypeIndex);
9220 
9221  allocator->GetVulkanFunctions().vkDestroyImage(
9222  hDev, hImage, allocator->GetAllocationCallbacks());
9223  }
9224  return res;
9225 }
9226 
9227 VkResult vmaCreatePool(
9228  VmaAllocator allocator,
9229  const VmaPoolCreateInfo* pCreateInfo,
9230  VmaPool* pPool)
9231 {
9232  VMA_ASSERT(allocator && pCreateInfo && pPool);
9233 
9234  VMA_DEBUG_LOG("vmaCreatePool");
9235 
9236  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9237 
9238  return allocator->CreatePool(pCreateInfo, pPool);
9239 }
9240 
9241 void vmaDestroyPool(
9242  VmaAllocator allocator,
9243  VmaPool pool)
9244 {
9245  VMA_ASSERT(allocator);
9246 
9247  if(pool == VK_NULL_HANDLE)
9248  {
9249  return;
9250  }
9251 
9252  VMA_DEBUG_LOG("vmaDestroyPool");
9253 
9254  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9255 
9256  allocator->DestroyPool(pool);
9257 }
9258 
9259 void vmaGetPoolStats(
9260  VmaAllocator allocator,
9261  VmaPool pool,
9262  VmaPoolStats* pPoolStats)
9263 {
9264  VMA_ASSERT(allocator && pool && pPoolStats);
9265 
9266  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9267 
9268  allocator->GetPoolStats(pool, pPoolStats);
9269 }
9270 
9272  VmaAllocator allocator,
9273  VmaPool pool,
9274  size_t* pLostAllocationCount)
9275 {
9276  VMA_ASSERT(allocator && pool);
9277 
9278  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9279 
9280  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
9281 }
9282 
9283 VkResult vmaAllocateMemory(
9284  VmaAllocator allocator,
9285  const VkMemoryRequirements* pVkMemoryRequirements,
9286  const VmaAllocationCreateInfo* pCreateInfo,
9287  VmaAllocation* pAllocation,
9288  VmaAllocationInfo* pAllocationInfo)
9289 {
9290  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
9291 
9292  VMA_DEBUG_LOG("vmaAllocateMemory");
9293 
9294  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9295 
9296  VkResult result = allocator->AllocateMemory(
9297  *pVkMemoryRequirements,
9298  false, // requiresDedicatedAllocation
9299  false, // prefersDedicatedAllocation
9300  VK_NULL_HANDLE, // dedicatedBuffer
9301  VK_NULL_HANDLE, // dedicatedImage
9302  *pCreateInfo,
9303  VMA_SUBALLOCATION_TYPE_UNKNOWN,
9304  pAllocation);
9305 
9306  if(pAllocationInfo && result == VK_SUCCESS)
9307  {
9308  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9309  }
9310 
9311  return result;
9312 }
9313 
9315  VmaAllocator allocator,
9316  VkBuffer buffer,
9317  const VmaAllocationCreateInfo* pCreateInfo,
9318  VmaAllocation* pAllocation,
9319  VmaAllocationInfo* pAllocationInfo)
9320 {
9321  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
9322 
9323  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
9324 
9325  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9326 
9327  VkMemoryRequirements vkMemReq = {};
9328  bool requiresDedicatedAllocation = false;
9329  bool prefersDedicatedAllocation = false;
9330  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
9331  requiresDedicatedAllocation,
9332  prefersDedicatedAllocation);
9333 
9334  VkResult result = allocator->AllocateMemory(
9335  vkMemReq,
9336  requiresDedicatedAllocation,
9337  prefersDedicatedAllocation,
9338  buffer, // dedicatedBuffer
9339  VK_NULL_HANDLE, // dedicatedImage
9340  *pCreateInfo,
9341  VMA_SUBALLOCATION_TYPE_BUFFER,
9342  pAllocation);
9343 
9344  if(pAllocationInfo && result == VK_SUCCESS)
9345  {
9346  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9347  }
9348 
9349  return result;
9350 }
9351 
9352 VkResult vmaAllocateMemoryForImage(
9353  VmaAllocator allocator,
9354  VkImage image,
9355  const VmaAllocationCreateInfo* pCreateInfo,
9356  VmaAllocation* pAllocation,
9357  VmaAllocationInfo* pAllocationInfo)
9358 {
9359  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
9360 
9361  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
9362 
9363  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9364 
9365  VkResult result = AllocateMemoryForImage(
9366  allocator,
9367  image,
9368  pCreateInfo,
9369  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
9370  pAllocation);
9371 
9372  if(pAllocationInfo && result == VK_SUCCESS)
9373  {
9374  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9375  }
9376 
9377  return result;
9378 }
9379 
9380 void vmaFreeMemory(
9381  VmaAllocator allocator,
9382  VmaAllocation allocation)
9383 {
9384  VMA_ASSERT(allocator);
9385  VMA_DEBUG_LOG("vmaFreeMemory");
9386  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9387  if(allocation != VK_NULL_HANDLE)
9388  {
9389  allocator->FreeMemory(allocation);
9390  }
9391 }
9392 
9394  VmaAllocator allocator,
9395  VmaAllocation allocation,
9396  VmaAllocationInfo* pAllocationInfo)
9397 {
9398  VMA_ASSERT(allocator && allocation && pAllocationInfo);
9399 
9400  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9401 
9402  allocator->GetAllocationInfo(allocation, pAllocationInfo);
9403 }
9404 
9405 VkBool32 vmaTouchAllocation(
9406  VmaAllocator allocator,
9407  VmaAllocation allocation)
9408 {
9409  VMA_ASSERT(allocator && allocation);
9410 
9411  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9412 
9413  return allocator->TouchAllocation(allocation);
9414 }
9415 
9417  VmaAllocator allocator,
9418  VmaAllocation allocation,
9419  void* pUserData)
9420 {
9421  VMA_ASSERT(allocator && allocation);
9422 
9423  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9424 
9425  allocation->SetUserData(allocator, pUserData);
9426 }
9427 
9429  VmaAllocator allocator,
9430  VmaAllocation* pAllocation)
9431 {
9432  VMA_ASSERT(allocator && pAllocation);
9433 
9434  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
9435 
9436  allocator->CreateLostAllocation(pAllocation);
9437 }
9438 
9439 VkResult vmaMapMemory(
9440  VmaAllocator allocator,
9441  VmaAllocation allocation,
9442  void** ppData)
9443 {
9444  VMA_ASSERT(allocator && allocation && ppData);
9445 
9446  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9447 
9448  return allocator->Map(allocation, ppData);
9449 }
9450 
9451 void vmaUnmapMemory(
9452  VmaAllocator allocator,
9453  VmaAllocation allocation)
9454 {
9455  VMA_ASSERT(allocator && allocation);
9456 
9457  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9458 
9459  allocator->Unmap(allocation);
9460 }
9461 
9462 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
9463 {
9464  VMA_ASSERT(allocator && allocation);
9465 
9466  VMA_DEBUG_LOG("vmaFlushAllocation");
9467 
9468  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9469 
9470  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
9471 }
9472 
9473 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
9474 {
9475  VMA_ASSERT(allocator && allocation);
9476 
9477  VMA_DEBUG_LOG("vmaInvalidateAllocation");
9478 
9479  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9480 
9481  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
9482 }
9483 
9484 VkResult vmaDefragment(
9485  VmaAllocator allocator,
9486  VmaAllocation* pAllocations,
9487  size_t allocationCount,
9488  VkBool32* pAllocationsChanged,
9489  const VmaDefragmentationInfo *pDefragmentationInfo,
9490  VmaDefragmentationStats* pDefragmentationStats)
9491 {
9492  VMA_ASSERT(allocator && pAllocations);
9493 
9494  VMA_DEBUG_LOG("vmaDefragment");
9495 
9496  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9497 
9498  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
9499 }
9500 
9501 VkResult vmaBindBufferMemory(
9502  VmaAllocator allocator,
9503  VmaAllocation allocation,
9504  VkBuffer buffer)
9505 {
9506  VMA_ASSERT(allocator && allocation && buffer);
9507 
9508  VMA_DEBUG_LOG("vmaBindBufferMemory");
9509 
9510  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9511 
9512  return allocator->BindBufferMemory(allocation, buffer);
9513 }
9514 
9515 VkResult vmaBindImageMemory(
9516  VmaAllocator allocator,
9517  VmaAllocation allocation,
9518  VkImage image)
9519 {
9520  VMA_ASSERT(allocator && allocation && image);
9521 
9522  VMA_DEBUG_LOG("vmaBindImageMemory");
9523 
9524  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9525 
9526  return allocator->BindImageMemory(allocation, image);
9527 }
9528 
9529 VkResult vmaCreateBuffer(
9530  VmaAllocator allocator,
9531  const VkBufferCreateInfo* pBufferCreateInfo,
9532  const VmaAllocationCreateInfo* pAllocationCreateInfo,
9533  VkBuffer* pBuffer,
9534  VmaAllocation* pAllocation,
9535  VmaAllocationInfo* pAllocationInfo)
9536 {
9537  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
9538 
9539  VMA_DEBUG_LOG("vmaCreateBuffer");
9540 
9541  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9542 
9543  *pBuffer = VK_NULL_HANDLE;
9544  *pAllocation = VK_NULL_HANDLE;
9545 
9546  // 1. Create VkBuffer.
9547  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
9548  allocator->m_hDevice,
9549  pBufferCreateInfo,
9550  allocator->GetAllocationCallbacks(),
9551  pBuffer);
9552  if(res >= 0)
9553  {
9554  // 2. vkGetBufferMemoryRequirements.
9555  VkMemoryRequirements vkMemReq = {};
9556  bool requiresDedicatedAllocation = false;
9557  bool prefersDedicatedAllocation = false;
9558  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
9559  requiresDedicatedAllocation, prefersDedicatedAllocation);
9560 
9561  // Make sure alignment requirements for specific buffer usages reported
9562  // in Physical Device Properties are included in alignment reported by memory requirements.
9563  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
9564  {
9565  VMA_ASSERT(vkMemReq.alignment %
9566  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
9567  }
9568  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
9569  {
9570  VMA_ASSERT(vkMemReq.alignment %
9571  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
9572  }
9573  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
9574  {
9575  VMA_ASSERT(vkMemReq.alignment %
9576  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
9577  }
9578 
9579  // 3. Allocate memory using allocator.
9580  res = allocator->AllocateMemory(
9581  vkMemReq,
9582  requiresDedicatedAllocation,
9583  prefersDedicatedAllocation,
9584  *pBuffer, // dedicatedBuffer
9585  VK_NULL_HANDLE, // dedicatedImage
9586  *pAllocationCreateInfo,
9587  VMA_SUBALLOCATION_TYPE_BUFFER,
9588  pAllocation);
9589  if(res >= 0)
9590  {
9591  // 3. Bind buffer with memory.
9592  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
9593  if(res >= 0)
9594  {
9595  // All steps succeeded.
9596  #if VMA_STATS_STRING_ENABLED
9597  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
9598  #endif
9599  if(pAllocationInfo != VMA_NULL)
9600  {
9601  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9602  }
9603  return VK_SUCCESS;
9604  }
9605  allocator->FreeMemory(*pAllocation);
9606  *pAllocation = VK_NULL_HANDLE;
9607  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
9608  *pBuffer = VK_NULL_HANDLE;
9609  return res;
9610  }
9611  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
9612  *pBuffer = VK_NULL_HANDLE;
9613  return res;
9614  }
9615  return res;
9616 }
9617 
9618 void vmaDestroyBuffer(
9619  VmaAllocator allocator,
9620  VkBuffer buffer,
9621  VmaAllocation allocation)
9622 {
9623  VMA_ASSERT(allocator);
9624  VMA_DEBUG_LOG("vmaDestroyBuffer");
9625  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9626  if(buffer != VK_NULL_HANDLE)
9627  {
9628  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
9629  }
9630  if(allocation != VK_NULL_HANDLE)
9631  {
9632  allocator->FreeMemory(allocation);
9633  }
9634 }
9635 
9636 VkResult vmaCreateImage(
9637  VmaAllocator allocator,
9638  const VkImageCreateInfo* pImageCreateInfo,
9639  const VmaAllocationCreateInfo* pAllocationCreateInfo,
9640  VkImage* pImage,
9641  VmaAllocation* pAllocation,
9642  VmaAllocationInfo* pAllocationInfo)
9643 {
9644  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
9645 
9646  VMA_DEBUG_LOG("vmaCreateImage");
9647 
9648  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9649 
9650  *pImage = VK_NULL_HANDLE;
9651  *pAllocation = VK_NULL_HANDLE;
9652 
9653  // 1. Create VkImage.
9654  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
9655  allocator->m_hDevice,
9656  pImageCreateInfo,
9657  allocator->GetAllocationCallbacks(),
9658  pImage);
9659  if(res >= 0)
9660  {
9661  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
9662  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
9663  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
9664 
9665  // 2. Allocate memory using allocator.
9666  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
9667  if(res >= 0)
9668  {
9669  // 3. Bind image with memory.
9670  res = allocator->BindImageMemory(*pAllocation, *pImage);
9671  if(res >= 0)
9672  {
9673  // All steps succeeded.
9674  #if VMA_STATS_STRING_ENABLED
9675  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
9676  #endif
9677  if(pAllocationInfo != VMA_NULL)
9678  {
9679  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9680  }
9681  return VK_SUCCESS;
9682  }
9683  allocator->FreeMemory(*pAllocation);
9684  *pAllocation = VK_NULL_HANDLE;
9685  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
9686  *pImage = VK_NULL_HANDLE;
9687  return res;
9688  }
9689  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
9690  *pImage = VK_NULL_HANDLE;
9691  return res;
9692  }
9693  return res;
9694 }
9695 
9696 void vmaDestroyImage(
9697  VmaAllocator allocator,
9698  VkImage image,
9699  VmaAllocation allocation)
9700 {
9701  VMA_ASSERT(allocator);
9702  VMA_DEBUG_LOG("vmaDestroyImage");
9703  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9704  if(image != VK_NULL_HANDLE)
9705  {
9706  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
9707  }
9708  if(allocation != VK_NULL_HANDLE)
9709  {
9710  allocator->FreeMemory(allocation);
9711  }
9712 }
9713 
9714 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1171
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1437
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1200
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1183
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1394
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1175
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1767
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1197
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1994
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1613
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1667
Definition: vk_mem_alloc.h:1474
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1164
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1512
Definition: vk_mem_alloc.h:1421
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1209
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1262
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1194
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1425
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1327
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1180
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1326
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1998
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1226
VmaStatInfo total
Definition: vk_mem_alloc.h:1336
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2006
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1496
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1989
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1181
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1106
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1203
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1621
Definition: vk_mem_alloc.h:1615
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1777
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1176
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1533
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1637
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1673
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1162
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1624
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaMemoryUsage
Definition: vk_mem_alloc.h:1372
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1984
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:2002
Definition: vk_mem_alloc.h:1411
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1520
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1179
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1332
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1112
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1133
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1138
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2004
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1507
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1683
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1172
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1315
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1632
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1125
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1481
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1328
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1129
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1627
Definition: vk_mem_alloc.h:1420
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1178
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1502
Definition: vk_mem_alloc.h:1493
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1318
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1174
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:1645
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1212
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1676
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1491
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1526
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1250
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1334
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1461
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1327
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1185
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1127
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1184
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1659
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1177
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1791
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1206
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1327
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1324
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1664
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1772
Definition: vk_mem_alloc.h:1489
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2000
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1170
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1322
Definition: vk_mem_alloc.h:1377
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1617
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1320
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1182
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1186
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1448
Definition: vk_mem_alloc.h:1404
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1786
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1160
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1173
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1753
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1595
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1328
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1335
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1670
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1328
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1758