Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1184 #include <vulkan/vulkan.h>
1185 
1186 #if !defined(VMA_DEDICATED_ALLOCATION)
1187  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1188  #define VMA_DEDICATED_ALLOCATION 1
1189  #else
1190  #define VMA_DEDICATED_ALLOCATION 0
1191  #endif
1192 #endif
1193 
1203 VK_DEFINE_HANDLE(VmaAllocator)
1204 
1205 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1207  VmaAllocator allocator,
1208  uint32_t memoryType,
1209  VkDeviceMemory memory,
1210  VkDeviceSize size);
1212 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1213  VmaAllocator allocator,
1214  uint32_t memoryType,
1215  VkDeviceMemory memory,
1216  VkDeviceSize size);
1217 
1231 
1261 
1264 typedef VkFlags VmaAllocatorCreateFlags;
1265 
1270 typedef struct VmaVulkanFunctions {
1271  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1272  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1273  PFN_vkAllocateMemory vkAllocateMemory;
1274  PFN_vkFreeMemory vkFreeMemory;
1275  PFN_vkMapMemory vkMapMemory;
1276  PFN_vkUnmapMemory vkUnmapMemory;
1277  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1278  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1279  PFN_vkBindBufferMemory vkBindBufferMemory;
1280  PFN_vkBindImageMemory vkBindImageMemory;
1281  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1282  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1283  PFN_vkCreateBuffer vkCreateBuffer;
1284  PFN_vkDestroyBuffer vkDestroyBuffer;
1285  PFN_vkCreateImage vkCreateImage;
1286  PFN_vkDestroyImage vkDestroyImage;
1287 #if VMA_DEDICATED_ALLOCATION
1288  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1289  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1290 #endif
1292 
1295 {
1297  VmaAllocatorCreateFlags flags;
1299 
1300  VkPhysicalDevice physicalDevice;
1302 
1303  VkDevice device;
1305 
1308 
1309  const VkAllocationCallbacks* pAllocationCallbacks;
1311 
1350  const VkDeviceSize* pHeapSizeLimit;
1364 
1366 VkResult vmaCreateAllocator(
1367  const VmaAllocatorCreateInfo* pCreateInfo,
1368  VmaAllocator* pAllocator);
1369 
1371 void vmaDestroyAllocator(
1372  VmaAllocator allocator);
1373 
1379  VmaAllocator allocator,
1380  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1381 
1387  VmaAllocator allocator,
1388  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1389 
1397  VmaAllocator allocator,
1398  uint32_t memoryTypeIndex,
1399  VkMemoryPropertyFlags* pFlags);
1400 
1410  VmaAllocator allocator,
1411  uint32_t frameIndex);
1412 
1415 typedef struct VmaStatInfo
1416 {
1418  uint32_t blockCount;
1424  VkDeviceSize usedBytes;
1426  VkDeviceSize unusedBytes;
1427  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1428  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1429 } VmaStatInfo;
1430 
1432 typedef struct VmaStats
1433 {
1434  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1435  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1437 } VmaStats;
1438 
1440 void vmaCalculateStats(
1441  VmaAllocator allocator,
1442  VmaStats* pStats);
1443 
1444 #define VMA_STATS_STRING_ENABLED 1
1445 
1446 #if VMA_STATS_STRING_ENABLED
1447 
1449 
1451 void vmaBuildStatsString(
1452  VmaAllocator allocator,
1453  char** ppStatsString,
1454  VkBool32 detailedMap);
1455 
1456 void vmaFreeStatsString(
1457  VmaAllocator allocator,
1458  char* pStatsString);
1459 
1460 #endif // #if VMA_STATS_STRING_ENABLED
1461 
1470 VK_DEFINE_HANDLE(VmaPool)
1471 
1472 typedef enum VmaMemoryUsage
1473 {
1522 } VmaMemoryUsage;
1523 
1538 
1588 
1592 
1594 {
1596  VmaAllocationCreateFlags flags;
1607  VkMemoryPropertyFlags requiredFlags;
1612  VkMemoryPropertyFlags preferredFlags;
1620  uint32_t memoryTypeBits;
1633  void* pUserData;
1635 
1652 VkResult vmaFindMemoryTypeIndex(
1653  VmaAllocator allocator,
1654  uint32_t memoryTypeBits,
1655  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1656  uint32_t* pMemoryTypeIndex);
1657 
1671  VmaAllocator allocator,
1672  const VkBufferCreateInfo* pBufferCreateInfo,
1673  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1674  uint32_t* pMemoryTypeIndex);
1675 
1689  VmaAllocator allocator,
1690  const VkImageCreateInfo* pImageCreateInfo,
1691  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1692  uint32_t* pMemoryTypeIndex);
1693 
1714 
1717 typedef VkFlags VmaPoolCreateFlags;
1718 
1721 typedef struct VmaPoolCreateInfo {
1727  VmaPoolCreateFlags flags;
1732  VkDeviceSize blockSize;
1761 
1764 typedef struct VmaPoolStats {
1767  VkDeviceSize size;
1770  VkDeviceSize unusedSize;
1783  VkDeviceSize unusedRangeSizeMax;
1784 } VmaPoolStats;
1785 
1792 VkResult vmaCreatePool(
1793  VmaAllocator allocator,
1794  const VmaPoolCreateInfo* pCreateInfo,
1795  VmaPool* pPool);
1796 
1799 void vmaDestroyPool(
1800  VmaAllocator allocator,
1801  VmaPool pool);
1802 
1809 void vmaGetPoolStats(
1810  VmaAllocator allocator,
1811  VmaPool pool,
1812  VmaPoolStats* pPoolStats);
1813 
1821  VmaAllocator allocator,
1822  VmaPool pool,
1823  size_t* pLostAllocationCount);
1824 
1839 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
1840 
1865 VK_DEFINE_HANDLE(VmaAllocation)
1866 
1867 
1869 typedef struct VmaAllocationInfo {
1874  uint32_t memoryType;
1883  VkDeviceMemory deviceMemory;
1888  VkDeviceSize offset;
1893  VkDeviceSize size;
1907  void* pUserData;
1909 
1920 VkResult vmaAllocateMemory(
1921  VmaAllocator allocator,
1922  const VkMemoryRequirements* pVkMemoryRequirements,
1923  const VmaAllocationCreateInfo* pCreateInfo,
1924  VmaAllocation* pAllocation,
1925  VmaAllocationInfo* pAllocationInfo);
1926 
1934  VmaAllocator allocator,
1935  VkBuffer buffer,
1936  const VmaAllocationCreateInfo* pCreateInfo,
1937  VmaAllocation* pAllocation,
1938  VmaAllocationInfo* pAllocationInfo);
1939 
1941 VkResult vmaAllocateMemoryForImage(
1942  VmaAllocator allocator,
1943  VkImage image,
1944  const VmaAllocationCreateInfo* pCreateInfo,
1945  VmaAllocation* pAllocation,
1946  VmaAllocationInfo* pAllocationInfo);
1947 
1949 void vmaFreeMemory(
1950  VmaAllocator allocator,
1951  VmaAllocation allocation);
1952 
1970  VmaAllocator allocator,
1971  VmaAllocation allocation,
1972  VmaAllocationInfo* pAllocationInfo);
1973 
1988 VkBool32 vmaTouchAllocation(
1989  VmaAllocator allocator,
1990  VmaAllocation allocation);
1991 
2006  VmaAllocator allocator,
2007  VmaAllocation allocation,
2008  void* pUserData);
2009 
2021  VmaAllocator allocator,
2022  VmaAllocation* pAllocation);
2023 
2058 VkResult vmaMapMemory(
2059  VmaAllocator allocator,
2060  VmaAllocation allocation,
2061  void** ppData);
2062 
2067 void vmaUnmapMemory(
2068  VmaAllocator allocator,
2069  VmaAllocation allocation);
2070 
2083 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2084 
2097 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2098 
2115 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2116 
2118 typedef struct VmaDefragmentationInfo {
2123  VkDeviceSize maxBytesToMove;
2130 
2132 typedef struct VmaDefragmentationStats {
2134  VkDeviceSize bytesMoved;
2136  VkDeviceSize bytesFreed;
2142 
2225 VkResult vmaDefragment(
2226  VmaAllocator allocator,
2227  VmaAllocation* pAllocations,
2228  size_t allocationCount,
2229  VkBool32* pAllocationsChanged,
2230  const VmaDefragmentationInfo *pDefragmentationInfo,
2231  VmaDefragmentationStats* pDefragmentationStats);
2232 
2245 VkResult vmaBindBufferMemory(
2246  VmaAllocator allocator,
2247  VmaAllocation allocation,
2248  VkBuffer buffer);
2249 
2262 VkResult vmaBindImageMemory(
2263  VmaAllocator allocator,
2264  VmaAllocation allocation,
2265  VkImage image);
2266 
2293 VkResult vmaCreateBuffer(
2294  VmaAllocator allocator,
2295  const VkBufferCreateInfo* pBufferCreateInfo,
2296  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2297  VkBuffer* pBuffer,
2298  VmaAllocation* pAllocation,
2299  VmaAllocationInfo* pAllocationInfo);
2300 
2312 void vmaDestroyBuffer(
2313  VmaAllocator allocator,
2314  VkBuffer buffer,
2315  VmaAllocation allocation);
2316 
2318 VkResult vmaCreateImage(
2319  VmaAllocator allocator,
2320  const VkImageCreateInfo* pImageCreateInfo,
2321  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2322  VkImage* pImage,
2323  VmaAllocation* pAllocation,
2324  VmaAllocationInfo* pAllocationInfo);
2325 
2337 void vmaDestroyImage(
2338  VmaAllocator allocator,
2339  VkImage image,
2340  VmaAllocation allocation);
2341 
2342 #ifdef __cplusplus
2343 }
2344 #endif
2345 
2346 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2347 
2348 // For Visual Studio IntelliSense.
2349 #if defined(__cplusplus) && defined(__INTELLISENSE__)
2350 #define VMA_IMPLEMENTATION
2351 #endif
2352 
2353 #ifdef VMA_IMPLEMENTATION
2354 #undef VMA_IMPLEMENTATION
2355 
2356 #include <cstdint>
2357 #include <cstdlib>
2358 #include <cstring>
2359 
2360 /*******************************************************************************
2361 CONFIGURATION SECTION
2362 
2363 Define some of these macros before each #include of this header or change them
2364 here if you need other then default behavior depending on your environment.
2365 */
2366 
2367 /*
2368 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2369 internally, like:
2370 
2371  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2372 
2373 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2374 VmaAllocatorCreateInfo::pVulkanFunctions.
2375 */
2376 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2377 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2378 #endif
2379 
2380 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2381 //#define VMA_USE_STL_CONTAINERS 1
2382 
2383 /* Set this macro to 1 to make the library including and using STL containers:
2384 std::pair, std::vector, std::list, std::unordered_map.
2385 
2386 Set it to 0 or undefined to make the library using its own implementation of
2387 the containers.
2388 */
2389 #if VMA_USE_STL_CONTAINERS
2390  #define VMA_USE_STL_VECTOR 1
2391  #define VMA_USE_STL_UNORDERED_MAP 1
2392  #define VMA_USE_STL_LIST 1
2393 #endif
2394 
2395 #if VMA_USE_STL_VECTOR
2396  #include <vector>
2397 #endif
2398 
2399 #if VMA_USE_STL_UNORDERED_MAP
2400  #include <unordered_map>
2401 #endif
2402 
2403 #if VMA_USE_STL_LIST
2404  #include <list>
2405 #endif
2406 
2407 /*
2408 Following headers are used in this CONFIGURATION section only, so feel free to
2409 remove them if not needed.
2410 */
2411 #include <cassert> // for assert
2412 #include <algorithm> // for min, max
2413 #include <mutex> // for std::mutex
2414 #include <atomic> // for std::atomic
2415 
2416 #ifndef VMA_NULL
2417  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2418  #define VMA_NULL nullptr
2419 #endif
2420 
2421 #if defined(__APPLE__) || defined(__ANDROID__)
2422 #include <cstdlib>
2423 void *aligned_alloc(size_t alignment, size_t size)
2424 {
2425  // alignment must be >= sizeof(void*)
2426  if(alignment < sizeof(void*))
2427  {
2428  alignment = sizeof(void*);
2429  }
2430 
2431  void *pointer;
2432  if(posix_memalign(&pointer, alignment, size) == 0)
2433  return pointer;
2434  return VMA_NULL;
2435 }
2436 #endif
2437 
2438 // If your compiler is not compatible with C++11 and definition of
2439 // aligned_alloc() function is missing, uncommeting following line may help:
2440 
2441 //#include <malloc.h>
2442 
2443 // Normal assert to check for programmer's errors, especially in Debug configuration.
2444 #ifndef VMA_ASSERT
2445  #ifdef _DEBUG
2446  #define VMA_ASSERT(expr) assert(expr)
2447  #else
2448  #define VMA_ASSERT(expr)
2449  #endif
2450 #endif
2451 
2452 // Assert that will be called very often, like inside data structures e.g. operator[].
2453 // Making it non-empty can make program slow.
2454 #ifndef VMA_HEAVY_ASSERT
2455  #ifdef _DEBUG
2456  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2457  #else
2458  #define VMA_HEAVY_ASSERT(expr)
2459  #endif
2460 #endif
2461 
2462 #ifndef VMA_ALIGN_OF
2463  #define VMA_ALIGN_OF(type) (__alignof(type))
2464 #endif
2465 
2466 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2467  #if defined(_WIN32)
2468  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2469  #else
2470  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2471  #endif
2472 #endif
2473 
2474 #ifndef VMA_SYSTEM_FREE
2475  #if defined(_WIN32)
2476  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2477  #else
2478  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2479  #endif
2480 #endif
2481 
2482 #ifndef VMA_MIN
2483  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2484 #endif
2485 
2486 #ifndef VMA_MAX
2487  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2488 #endif
2489 
2490 #ifndef VMA_SWAP
2491  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2492 #endif
2493 
2494 #ifndef VMA_SORT
2495  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2496 #endif
2497 
2498 #ifndef VMA_DEBUG_LOG
2499  #define VMA_DEBUG_LOG(format, ...)
2500  /*
2501  #define VMA_DEBUG_LOG(format, ...) do { \
2502  printf(format, __VA_ARGS__); \
2503  printf("\n"); \
2504  } while(false)
2505  */
2506 #endif
2507 
2508 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2509 #if VMA_STATS_STRING_ENABLED
2510  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2511  {
2512  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2513  }
2514  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2515  {
2516  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2517  }
2518  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2519  {
2520  snprintf(outStr, strLen, "%p", ptr);
2521  }
2522 #endif
2523 
2524 #ifndef VMA_MUTEX
2525  class VmaMutex
2526  {
2527  public:
2528  VmaMutex() { }
2529  ~VmaMutex() { }
2530  void Lock() { m_Mutex.lock(); }
2531  void Unlock() { m_Mutex.unlock(); }
2532  private:
2533  std::mutex m_Mutex;
2534  };
2535  #define VMA_MUTEX VmaMutex
2536 #endif
2537 
2538 /*
2539 If providing your own implementation, you need to implement a subset of std::atomic:
2540 
2541 - Constructor(uint32_t desired)
2542 - uint32_t load() const
2543 - void store(uint32_t desired)
2544 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2545 */
2546 #ifndef VMA_ATOMIC_UINT32
2547  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2548 #endif
2549 
2550 #ifndef VMA_BEST_FIT
2551 
2563  #define VMA_BEST_FIT (1)
2564 #endif
2565 
2566 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2567 
2571  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2572 #endif
2573 
2574 #ifndef VMA_DEBUG_ALIGNMENT
2575 
2579  #define VMA_DEBUG_ALIGNMENT (1)
2580 #endif
2581 
2582 #ifndef VMA_DEBUG_MARGIN
2583 
2587  #define VMA_DEBUG_MARGIN (0)
2588 #endif
2589 
2590 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
2591 
2595  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
2596 #endif
2597 
2598 #ifndef VMA_DEBUG_DETECT_CORRUPTION
2599 
2604  #define VMA_DEBUG_DETECT_CORRUPTION (0)
2605 #endif
2606 
2607 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2608 
2612  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2613 #endif
2614 
2615 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2616 
2620  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2621 #endif
2622 
2623 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2624  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2626 #endif
2627 
2628 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2629  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2631 #endif
2632 
2633 #ifndef VMA_CLASS_NO_COPY
2634  #define VMA_CLASS_NO_COPY(className) \
2635  private: \
2636  className(const className&) = delete; \
2637  className& operator=(const className&) = delete;
2638 #endif
2639 
2640 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2641 
2642 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
2643 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
2644 
2645 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
2646 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
2647 
2648 /*******************************************************************************
2649 END OF CONFIGURATION
2650 */
2651 
2652 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2653  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2654 
2655 // Returns number of bits set to 1 in (v).
2656 static inline uint32_t VmaCountBitsSet(uint32_t v)
2657 {
2658  uint32_t c = v - ((v >> 1) & 0x55555555);
2659  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2660  c = ((c >> 4) + c) & 0x0F0F0F0F;
2661  c = ((c >> 8) + c) & 0x00FF00FF;
2662  c = ((c >> 16) + c) & 0x0000FFFF;
2663  return c;
2664 }
2665 
2666 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2667 // Use types like uint32_t, uint64_t as T.
2668 template <typename T>
2669 static inline T VmaAlignUp(T val, T align)
2670 {
2671  return (val + align - 1) / align * align;
2672 }
2673 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
2674 // Use types like uint32_t, uint64_t as T.
2675 template <typename T>
2676 static inline T VmaAlignDown(T val, T align)
2677 {
2678  return val / align * align;
2679 }
2680 
2681 // Division with mathematical rounding to nearest number.
2682 template <typename T>
2683 inline T VmaRoundDiv(T x, T y)
2684 {
2685  return (x + (y / (T)2)) / y;
2686 }
2687 
2688 #ifndef VMA_SORT
2689 
2690 template<typename Iterator, typename Compare>
2691 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
2692 {
2693  Iterator centerValue = end; --centerValue;
2694  Iterator insertIndex = beg;
2695  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
2696  {
2697  if(cmp(*memTypeIndex, *centerValue))
2698  {
2699  if(insertIndex != memTypeIndex)
2700  {
2701  VMA_SWAP(*memTypeIndex, *insertIndex);
2702  }
2703  ++insertIndex;
2704  }
2705  }
2706  if(insertIndex != centerValue)
2707  {
2708  VMA_SWAP(*insertIndex, *centerValue);
2709  }
2710  return insertIndex;
2711 }
2712 
2713 template<typename Iterator, typename Compare>
2714 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
2715 {
2716  if(beg < end)
2717  {
2718  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
2719  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
2720  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
2721  }
2722 }
2723 
2724 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
2725 
2726 #endif // #ifndef VMA_SORT
2727 
2728 /*
2729 Returns true if two memory blocks occupy overlapping pages.
2730 ResourceA must be in less memory offset than ResourceB.
2731 
2732 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
2733 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
2734 */
2735 static inline bool VmaBlocksOnSamePage(
2736  VkDeviceSize resourceAOffset,
2737  VkDeviceSize resourceASize,
2738  VkDeviceSize resourceBOffset,
2739  VkDeviceSize pageSize)
2740 {
2741  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
2742  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
2743  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
2744  VkDeviceSize resourceBStart = resourceBOffset;
2745  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
2746  return resourceAEndPage == resourceBStartPage;
2747 }
2748 
2749 enum VmaSuballocationType
2750 {
2751  VMA_SUBALLOCATION_TYPE_FREE = 0,
2752  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
2753  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
2754  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
2755  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
2756  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
2757  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
2758 };
2759 
2760 /*
2761 Returns true if given suballocation types could conflict and must respect
2762 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
2763 or linear image and another one is optimal image. If type is unknown, behave
2764 conservatively.
2765 */
2766 static inline bool VmaIsBufferImageGranularityConflict(
2767  VmaSuballocationType suballocType1,
2768  VmaSuballocationType suballocType2)
2769 {
2770  if(suballocType1 > suballocType2)
2771  {
2772  VMA_SWAP(suballocType1, suballocType2);
2773  }
2774 
2775  switch(suballocType1)
2776  {
2777  case VMA_SUBALLOCATION_TYPE_FREE:
2778  return false;
2779  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
2780  return true;
2781  case VMA_SUBALLOCATION_TYPE_BUFFER:
2782  return
2783  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2784  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2785  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
2786  return
2787  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2788  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
2789  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2790  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
2791  return
2792  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2793  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
2794  return false;
2795  default:
2796  VMA_ASSERT(0);
2797  return true;
2798  }
2799 }
2800 
2801 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
2802 {
2803  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
2804  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
2805  for(size_t i = 0; i < numberCount; ++i, ++pDst)
2806  {
2807  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
2808  }
2809 }
2810 
2811 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
2812 {
2813  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
2814  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
2815  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
2816  {
2817  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
2818  {
2819  return false;
2820  }
2821  }
2822  return true;
2823 }
2824 
2825 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
2826 struct VmaMutexLock
2827 {
2828  VMA_CLASS_NO_COPY(VmaMutexLock)
2829 public:
2830  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
2831  m_pMutex(useMutex ? &mutex : VMA_NULL)
2832  {
2833  if(m_pMutex)
2834  {
2835  m_pMutex->Lock();
2836  }
2837  }
2838 
2839  ~VmaMutexLock()
2840  {
2841  if(m_pMutex)
2842  {
2843  m_pMutex->Unlock();
2844  }
2845  }
2846 
2847 private:
2848  VMA_MUTEX* m_pMutex;
2849 };
2850 
2851 #if VMA_DEBUG_GLOBAL_MUTEX
2852  static VMA_MUTEX gDebugGlobalMutex;
2853  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
2854 #else
2855  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
2856 #endif
2857 
2858 // Minimum size of a free suballocation to register it in the free suballocation collection.
2859 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
2860 
2861 /*
2862 Performs binary search and returns iterator to first element that is greater or
2863 equal to (key), according to comparison (cmp).
2864 
2865 Cmp should return true if first argument is less than second argument.
2866 
2867 Returned value is the found element, if present in the collection or place where
2868 new element with value (key) should be inserted.
2869 */
2870 template <typename IterT, typename KeyT, typename CmpT>
2871 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
2872 {
2873  size_t down = 0, up = (end - beg);
2874  while(down < up)
2875  {
2876  const size_t mid = (down + up) / 2;
2877  if(cmp(*(beg+mid), key))
2878  {
2879  down = mid + 1;
2880  }
2881  else
2882  {
2883  up = mid;
2884  }
2885  }
2886  return beg + down;
2887 }
2888 
2890 // Memory allocation
2891 
2892 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
2893 {
2894  if((pAllocationCallbacks != VMA_NULL) &&
2895  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
2896  {
2897  return (*pAllocationCallbacks->pfnAllocation)(
2898  pAllocationCallbacks->pUserData,
2899  size,
2900  alignment,
2901  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2902  }
2903  else
2904  {
2905  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
2906  }
2907 }
2908 
2909 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
2910 {
2911  if((pAllocationCallbacks != VMA_NULL) &&
2912  (pAllocationCallbacks->pfnFree != VMA_NULL))
2913  {
2914  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
2915  }
2916  else
2917  {
2918  VMA_SYSTEM_FREE(ptr);
2919  }
2920 }
2921 
2922 template<typename T>
2923 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
2924 {
2925  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
2926 }
2927 
2928 template<typename T>
2929 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
2930 {
2931  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
2932 }
2933 
2934 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2935 
2936 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2937 
2938 template<typename T>
2939 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2940 {
2941  ptr->~T();
2942  VmaFree(pAllocationCallbacks, ptr);
2943 }
2944 
2945 template<typename T>
2946 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2947 {
2948  if(ptr != VMA_NULL)
2949  {
2950  for(size_t i = count; i--; )
2951  {
2952  ptr[i].~T();
2953  }
2954  VmaFree(pAllocationCallbacks, ptr);
2955  }
2956 }
2957 
2958 // STL-compatible allocator.
2959 template<typename T>
2960 class VmaStlAllocator
2961 {
2962 public:
2963  const VkAllocationCallbacks* const m_pCallbacks;
2964  typedef T value_type;
2965 
2966  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2967  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2968 
2969  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2970  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2971 
2972  template<typename U>
2973  bool operator==(const VmaStlAllocator<U>& rhs) const
2974  {
2975  return m_pCallbacks == rhs.m_pCallbacks;
2976  }
2977  template<typename U>
2978  bool operator!=(const VmaStlAllocator<U>& rhs) const
2979  {
2980  return m_pCallbacks != rhs.m_pCallbacks;
2981  }
2982 
2983  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2984 };
2985 
2986 #if VMA_USE_STL_VECTOR
2987 
2988 #define VmaVector std::vector
2989 
2990 template<typename T, typename allocatorT>
2991 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2992 {
2993  vec.insert(vec.begin() + index, item);
2994 }
2995 
2996 template<typename T, typename allocatorT>
2997 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2998 {
2999  vec.erase(vec.begin() + index);
3000 }
3001 
3002 #else // #if VMA_USE_STL_VECTOR
3003 
3004 /* Class with interface compatible with subset of std::vector.
3005 T must be POD because constructors and destructors are not called and memcpy is
3006 used for these objects. */
3007 template<typename T, typename AllocatorT>
3008 class VmaVector
3009 {
3010 public:
3011  typedef T value_type;
3012 
3013  VmaVector(const AllocatorT& allocator) :
3014  m_Allocator(allocator),
3015  m_pArray(VMA_NULL),
3016  m_Count(0),
3017  m_Capacity(0)
3018  {
3019  }
3020 
3021  VmaVector(size_t count, const AllocatorT& allocator) :
3022  m_Allocator(allocator),
3023  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3024  m_Count(count),
3025  m_Capacity(count)
3026  {
3027  }
3028 
3029  VmaVector(const VmaVector<T, AllocatorT>& src) :
3030  m_Allocator(src.m_Allocator),
3031  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3032  m_Count(src.m_Count),
3033  m_Capacity(src.m_Count)
3034  {
3035  if(m_Count != 0)
3036  {
3037  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3038  }
3039  }
3040 
3041  ~VmaVector()
3042  {
3043  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3044  }
3045 
3046  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3047  {
3048  if(&rhs != this)
3049  {
3050  resize(rhs.m_Count);
3051  if(m_Count != 0)
3052  {
3053  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3054  }
3055  }
3056  return *this;
3057  }
3058 
3059  bool empty() const { return m_Count == 0; }
3060  size_t size() const { return m_Count; }
3061  T* data() { return m_pArray; }
3062  const T* data() const { return m_pArray; }
3063 
3064  T& operator[](size_t index)
3065  {
3066  VMA_HEAVY_ASSERT(index < m_Count);
3067  return m_pArray[index];
3068  }
3069  const T& operator[](size_t index) const
3070  {
3071  VMA_HEAVY_ASSERT(index < m_Count);
3072  return m_pArray[index];
3073  }
3074 
3075  T& front()
3076  {
3077  VMA_HEAVY_ASSERT(m_Count > 0);
3078  return m_pArray[0];
3079  }
3080  const T& front() const
3081  {
3082  VMA_HEAVY_ASSERT(m_Count > 0);
3083  return m_pArray[0];
3084  }
3085  T& back()
3086  {
3087  VMA_HEAVY_ASSERT(m_Count > 0);
3088  return m_pArray[m_Count - 1];
3089  }
3090  const T& back() const
3091  {
3092  VMA_HEAVY_ASSERT(m_Count > 0);
3093  return m_pArray[m_Count - 1];
3094  }
3095 
3096  void reserve(size_t newCapacity, bool freeMemory = false)
3097  {
3098  newCapacity = VMA_MAX(newCapacity, m_Count);
3099 
3100  if((newCapacity < m_Capacity) && !freeMemory)
3101  {
3102  newCapacity = m_Capacity;
3103  }
3104 
3105  if(newCapacity != m_Capacity)
3106  {
3107  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
3108  if(m_Count != 0)
3109  {
3110  memcpy(newArray, m_pArray, m_Count * sizeof(T));
3111  }
3112  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3113  m_Capacity = newCapacity;
3114  m_pArray = newArray;
3115  }
3116  }
3117 
3118  void resize(size_t newCount, bool freeMemory = false)
3119  {
3120  size_t newCapacity = m_Capacity;
3121  if(newCount > m_Capacity)
3122  {
3123  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
3124  }
3125  else if(freeMemory)
3126  {
3127  newCapacity = newCount;
3128  }
3129 
3130  if(newCapacity != m_Capacity)
3131  {
3132  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
3133  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
3134  if(elementsToCopy != 0)
3135  {
3136  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
3137  }
3138  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3139  m_Capacity = newCapacity;
3140  m_pArray = newArray;
3141  }
3142 
3143  m_Count = newCount;
3144  }
3145 
3146  void clear(bool freeMemory = false)
3147  {
3148  resize(0, freeMemory);
3149  }
3150 
3151  void insert(size_t index, const T& src)
3152  {
3153  VMA_HEAVY_ASSERT(index <= m_Count);
3154  const size_t oldCount = size();
3155  resize(oldCount + 1);
3156  if(index < oldCount)
3157  {
3158  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
3159  }
3160  m_pArray[index] = src;
3161  }
3162 
3163  void remove(size_t index)
3164  {
3165  VMA_HEAVY_ASSERT(index < m_Count);
3166  const size_t oldCount = size();
3167  if(index < oldCount - 1)
3168  {
3169  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
3170  }
3171  resize(oldCount - 1);
3172  }
3173 
3174  void push_back(const T& src)
3175  {
3176  const size_t newIndex = size();
3177  resize(newIndex + 1);
3178  m_pArray[newIndex] = src;
3179  }
3180 
3181  void pop_back()
3182  {
3183  VMA_HEAVY_ASSERT(m_Count > 0);
3184  resize(size() - 1);
3185  }
3186 
3187  void push_front(const T& src)
3188  {
3189  insert(0, src);
3190  }
3191 
3192  void pop_front()
3193  {
3194  VMA_HEAVY_ASSERT(m_Count > 0);
3195  remove(0);
3196  }
3197 
3198  typedef T* iterator;
3199 
3200  iterator begin() { return m_pArray; }
3201  iterator end() { return m_pArray + m_Count; }
3202 
3203 private:
3204  AllocatorT m_Allocator;
3205  T* m_pArray;
3206  size_t m_Count;
3207  size_t m_Capacity;
3208 };
3209 
3210 template<typename T, typename allocatorT>
3211 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
3212 {
3213  vec.insert(index, item);
3214 }
3215 
3216 template<typename T, typename allocatorT>
3217 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
3218 {
3219  vec.remove(index);
3220 }
3221 
3222 #endif // #if VMA_USE_STL_VECTOR
3223 
3224 template<typename CmpLess, typename VectorT>
3225 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3226 {
3227  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3228  vector.data(),
3229  vector.data() + vector.size(),
3230  value,
3231  CmpLess()) - vector.data();
3232  VmaVectorInsert(vector, indexToInsert, value);
3233  return indexToInsert;
3234 }
3235 
3236 template<typename CmpLess, typename VectorT>
3237 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3238 {
3239  CmpLess comparator;
3240  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3241  vector.begin(),
3242  vector.end(),
3243  value,
3244  comparator);
3245  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3246  {
3247  size_t indexToRemove = it - vector.begin();
3248  VmaVectorRemove(vector, indexToRemove);
3249  return true;
3250  }
3251  return false;
3252 }
3253 
3254 template<typename CmpLess, typename VectorT>
3255 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
3256 {
3257  CmpLess comparator;
3258  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3259  vector.data(),
3260  vector.data() + vector.size(),
3261  value,
3262  comparator);
3263  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
3264  {
3265  return it - vector.begin();
3266  }
3267  else
3268  {
3269  return vector.size();
3270  }
3271 }
3272 
3274 // class VmaPoolAllocator
3275 
3276 /*
3277 Allocator for objects of type T using a list of arrays (pools) to speed up
3278 allocation. Number of elements that can be allocated is not bounded because
3279 allocator can create multiple blocks.
3280 */
3281 template<typename T>
3282 class VmaPoolAllocator
3283 {
3284  VMA_CLASS_NO_COPY(VmaPoolAllocator)
3285 public:
3286  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3287  ~VmaPoolAllocator();
3288  void Clear();
3289  T* Alloc();
3290  void Free(T* ptr);
3291 
3292 private:
3293  union Item
3294  {
3295  uint32_t NextFreeIndex;
3296  T Value;
3297  };
3298 
3299  struct ItemBlock
3300  {
3301  Item* pItems;
3302  uint32_t FirstFreeIndex;
3303  };
3304 
3305  const VkAllocationCallbacks* m_pAllocationCallbacks;
3306  size_t m_ItemsPerBlock;
3307  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3308 
3309  ItemBlock& CreateNewBlock();
3310 };
3311 
3312 template<typename T>
3313 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3314  m_pAllocationCallbacks(pAllocationCallbacks),
3315  m_ItemsPerBlock(itemsPerBlock),
3316  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3317 {
3318  VMA_ASSERT(itemsPerBlock > 0);
3319 }
3320 
3321 template<typename T>
3322 VmaPoolAllocator<T>::~VmaPoolAllocator()
3323 {
3324  Clear();
3325 }
3326 
3327 template<typename T>
3328 void VmaPoolAllocator<T>::Clear()
3329 {
3330  for(size_t i = m_ItemBlocks.size(); i--; )
3331  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3332  m_ItemBlocks.clear();
3333 }
3334 
3335 template<typename T>
3336 T* VmaPoolAllocator<T>::Alloc()
3337 {
3338  for(size_t i = m_ItemBlocks.size(); i--; )
3339  {
3340  ItemBlock& block = m_ItemBlocks[i];
3341  // This block has some free items: Use first one.
3342  if(block.FirstFreeIndex != UINT32_MAX)
3343  {
3344  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3345  block.FirstFreeIndex = pItem->NextFreeIndex;
3346  return &pItem->Value;
3347  }
3348  }
3349 
3350  // No block has free item: Create new one and use it.
3351  ItemBlock& newBlock = CreateNewBlock();
3352  Item* const pItem = &newBlock.pItems[0];
3353  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3354  return &pItem->Value;
3355 }
3356 
3357 template<typename T>
3358 void VmaPoolAllocator<T>::Free(T* ptr)
3359 {
3360  // Search all memory blocks to find ptr.
3361  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3362  {
3363  ItemBlock& block = m_ItemBlocks[i];
3364 
3365  // Casting to union.
3366  Item* pItemPtr;
3367  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3368 
3369  // Check if pItemPtr is in address range of this block.
3370  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3371  {
3372  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3373  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3374  block.FirstFreeIndex = index;
3375  return;
3376  }
3377  }
3378  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3379 }
3380 
3381 template<typename T>
3382 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3383 {
3384  ItemBlock newBlock = {
3385  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3386 
3387  m_ItemBlocks.push_back(newBlock);
3388 
3389  // Setup singly-linked list of all free items in this block.
3390  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3391  newBlock.pItems[i].NextFreeIndex = i + 1;
3392  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3393  return m_ItemBlocks.back();
3394 }
3395 
3397 // class VmaRawList, VmaList
3398 
3399 #if VMA_USE_STL_LIST
3400 
3401 #define VmaList std::list
3402 
3403 #else // #if VMA_USE_STL_LIST
3404 
3405 template<typename T>
3406 struct VmaListItem
3407 {
3408  VmaListItem* pPrev;
3409  VmaListItem* pNext;
3410  T Value;
3411 };
3412 
3413 // Doubly linked list.
3414 template<typename T>
3415 class VmaRawList
3416 {
3417  VMA_CLASS_NO_COPY(VmaRawList)
3418 public:
3419  typedef VmaListItem<T> ItemType;
3420 
3421  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3422  ~VmaRawList();
3423  void Clear();
3424 
3425  size_t GetCount() const { return m_Count; }
3426  bool IsEmpty() const { return m_Count == 0; }
3427 
3428  ItemType* Front() { return m_pFront; }
3429  const ItemType* Front() const { return m_pFront; }
3430  ItemType* Back() { return m_pBack; }
3431  const ItemType* Back() const { return m_pBack; }
3432 
3433  ItemType* PushBack();
3434  ItemType* PushFront();
3435  ItemType* PushBack(const T& value);
3436  ItemType* PushFront(const T& value);
3437  void PopBack();
3438  void PopFront();
3439 
3440  // Item can be null - it means PushBack.
3441  ItemType* InsertBefore(ItemType* pItem);
3442  // Item can be null - it means PushFront.
3443  ItemType* InsertAfter(ItemType* pItem);
3444 
3445  ItemType* InsertBefore(ItemType* pItem, const T& value);
3446  ItemType* InsertAfter(ItemType* pItem, const T& value);
3447 
3448  void Remove(ItemType* pItem);
3449 
3450 private:
3451  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3452  VmaPoolAllocator<ItemType> m_ItemAllocator;
3453  ItemType* m_pFront;
3454  ItemType* m_pBack;
3455  size_t m_Count;
3456 };
3457 
3458 template<typename T>
3459 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3460  m_pAllocationCallbacks(pAllocationCallbacks),
3461  m_ItemAllocator(pAllocationCallbacks, 128),
3462  m_pFront(VMA_NULL),
3463  m_pBack(VMA_NULL),
3464  m_Count(0)
3465 {
3466 }
3467 
3468 template<typename T>
3469 VmaRawList<T>::~VmaRawList()
3470 {
3471  // Intentionally not calling Clear, because that would be unnecessary
3472  // computations to return all items to m_ItemAllocator as free.
3473 }
3474 
3475 template<typename T>
3476 void VmaRawList<T>::Clear()
3477 {
3478  if(IsEmpty() == false)
3479  {
3480  ItemType* pItem = m_pBack;
3481  while(pItem != VMA_NULL)
3482  {
3483  ItemType* const pPrevItem = pItem->pPrev;
3484  m_ItemAllocator.Free(pItem);
3485  pItem = pPrevItem;
3486  }
3487  m_pFront = VMA_NULL;
3488  m_pBack = VMA_NULL;
3489  m_Count = 0;
3490  }
3491 }
3492 
3493 template<typename T>
3494 VmaListItem<T>* VmaRawList<T>::PushBack()
3495 {
3496  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3497  pNewItem->pNext = VMA_NULL;
3498  if(IsEmpty())
3499  {
3500  pNewItem->pPrev = VMA_NULL;
3501  m_pFront = pNewItem;
3502  m_pBack = pNewItem;
3503  m_Count = 1;
3504  }
3505  else
3506  {
3507  pNewItem->pPrev = m_pBack;
3508  m_pBack->pNext = pNewItem;
3509  m_pBack = pNewItem;
3510  ++m_Count;
3511  }
3512  return pNewItem;
3513 }
3514 
3515 template<typename T>
3516 VmaListItem<T>* VmaRawList<T>::PushFront()
3517 {
3518  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3519  pNewItem->pPrev = VMA_NULL;
3520  if(IsEmpty())
3521  {
3522  pNewItem->pNext = VMA_NULL;
3523  m_pFront = pNewItem;
3524  m_pBack = pNewItem;
3525  m_Count = 1;
3526  }
3527  else
3528  {
3529  pNewItem->pNext = m_pFront;
3530  m_pFront->pPrev = pNewItem;
3531  m_pFront = pNewItem;
3532  ++m_Count;
3533  }
3534  return pNewItem;
3535 }
3536 
3537 template<typename T>
3538 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3539 {
3540  ItemType* const pNewItem = PushBack();
3541  pNewItem->Value = value;
3542  return pNewItem;
3543 }
3544 
3545 template<typename T>
3546 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3547 {
3548  ItemType* const pNewItem = PushFront();
3549  pNewItem->Value = value;
3550  return pNewItem;
3551 }
3552 
3553 template<typename T>
3554 void VmaRawList<T>::PopBack()
3555 {
3556  VMA_HEAVY_ASSERT(m_Count > 0);
3557  ItemType* const pBackItem = m_pBack;
3558  ItemType* const pPrevItem = pBackItem->pPrev;
3559  if(pPrevItem != VMA_NULL)
3560  {
3561  pPrevItem->pNext = VMA_NULL;
3562  }
3563  m_pBack = pPrevItem;
3564  m_ItemAllocator.Free(pBackItem);
3565  --m_Count;
3566 }
3567 
3568 template<typename T>
3569 void VmaRawList<T>::PopFront()
3570 {
3571  VMA_HEAVY_ASSERT(m_Count > 0);
3572  ItemType* const pFrontItem = m_pFront;
3573  ItemType* const pNextItem = pFrontItem->pNext;
3574  if(pNextItem != VMA_NULL)
3575  {
3576  pNextItem->pPrev = VMA_NULL;
3577  }
3578  m_pFront = pNextItem;
3579  m_ItemAllocator.Free(pFrontItem);
3580  --m_Count;
3581 }
3582 
3583 template<typename T>
3584 void VmaRawList<T>::Remove(ItemType* pItem)
3585 {
3586  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
3587  VMA_HEAVY_ASSERT(m_Count > 0);
3588 
3589  if(pItem->pPrev != VMA_NULL)
3590  {
3591  pItem->pPrev->pNext = pItem->pNext;
3592  }
3593  else
3594  {
3595  VMA_HEAVY_ASSERT(m_pFront == pItem);
3596  m_pFront = pItem->pNext;
3597  }
3598 
3599  if(pItem->pNext != VMA_NULL)
3600  {
3601  pItem->pNext->pPrev = pItem->pPrev;
3602  }
3603  else
3604  {
3605  VMA_HEAVY_ASSERT(m_pBack == pItem);
3606  m_pBack = pItem->pPrev;
3607  }
3608 
3609  m_ItemAllocator.Free(pItem);
3610  --m_Count;
3611 }
3612 
3613 template<typename T>
3614 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
3615 {
3616  if(pItem != VMA_NULL)
3617  {
3618  ItemType* const prevItem = pItem->pPrev;
3619  ItemType* const newItem = m_ItemAllocator.Alloc();
3620  newItem->pPrev = prevItem;
3621  newItem->pNext = pItem;
3622  pItem->pPrev = newItem;
3623  if(prevItem != VMA_NULL)
3624  {
3625  prevItem->pNext = newItem;
3626  }
3627  else
3628  {
3629  VMA_HEAVY_ASSERT(m_pFront == pItem);
3630  m_pFront = newItem;
3631  }
3632  ++m_Count;
3633  return newItem;
3634  }
3635  else
3636  return PushBack();
3637 }
3638 
3639 template<typename T>
3640 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3641 {
3642  if(pItem != VMA_NULL)
3643  {
3644  ItemType* const nextItem = pItem->pNext;
3645  ItemType* const newItem = m_ItemAllocator.Alloc();
3646  newItem->pNext = nextItem;
3647  newItem->pPrev = pItem;
3648  pItem->pNext = newItem;
3649  if(nextItem != VMA_NULL)
3650  {
3651  nextItem->pPrev = newItem;
3652  }
3653  else
3654  {
3655  VMA_HEAVY_ASSERT(m_pBack == pItem);
3656  m_pBack = newItem;
3657  }
3658  ++m_Count;
3659  return newItem;
3660  }
3661  else
3662  return PushFront();
3663 }
3664 
3665 template<typename T>
3666 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
3667 {
3668  ItemType* const newItem = InsertBefore(pItem);
3669  newItem->Value = value;
3670  return newItem;
3671 }
3672 
3673 template<typename T>
3674 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
3675 {
3676  ItemType* const newItem = InsertAfter(pItem);
3677  newItem->Value = value;
3678  return newItem;
3679 }
3680 
3681 template<typename T, typename AllocatorT>
3682 class VmaList
3683 {
3684  VMA_CLASS_NO_COPY(VmaList)
3685 public:
3686  class iterator
3687  {
3688  public:
3689  iterator() :
3690  m_pList(VMA_NULL),
3691  m_pItem(VMA_NULL)
3692  {
3693  }
3694 
3695  T& operator*() const
3696  {
3697  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3698  return m_pItem->Value;
3699  }
3700  T* operator->() const
3701  {
3702  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3703  return &m_pItem->Value;
3704  }
3705 
3706  iterator& operator++()
3707  {
3708  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3709  m_pItem = m_pItem->pNext;
3710  return *this;
3711  }
3712  iterator& operator--()
3713  {
3714  if(m_pItem != VMA_NULL)
3715  {
3716  m_pItem = m_pItem->pPrev;
3717  }
3718  else
3719  {
3720  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3721  m_pItem = m_pList->Back();
3722  }
3723  return *this;
3724  }
3725 
3726  iterator operator++(int)
3727  {
3728  iterator result = *this;
3729  ++*this;
3730  return result;
3731  }
3732  iterator operator--(int)
3733  {
3734  iterator result = *this;
3735  --*this;
3736  return result;
3737  }
3738 
3739  bool operator==(const iterator& rhs) const
3740  {
3741  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3742  return m_pItem == rhs.m_pItem;
3743  }
3744  bool operator!=(const iterator& rhs) const
3745  {
3746  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3747  return m_pItem != rhs.m_pItem;
3748  }
3749 
3750  private:
3751  VmaRawList<T>* m_pList;
3752  VmaListItem<T>* m_pItem;
3753 
3754  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
3755  m_pList(pList),
3756  m_pItem(pItem)
3757  {
3758  }
3759 
3760  friend class VmaList<T, AllocatorT>;
3761  };
3762 
3763  class const_iterator
3764  {
3765  public:
3766  const_iterator() :
3767  m_pList(VMA_NULL),
3768  m_pItem(VMA_NULL)
3769  {
3770  }
3771 
3772  const_iterator(const iterator& src) :
3773  m_pList(src.m_pList),
3774  m_pItem(src.m_pItem)
3775  {
3776  }
3777 
3778  const T& operator*() const
3779  {
3780  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3781  return m_pItem->Value;
3782  }
3783  const T* operator->() const
3784  {
3785  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3786  return &m_pItem->Value;
3787  }
3788 
3789  const_iterator& operator++()
3790  {
3791  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3792  m_pItem = m_pItem->pNext;
3793  return *this;
3794  }
3795  const_iterator& operator--()
3796  {
3797  if(m_pItem != VMA_NULL)
3798  {
3799  m_pItem = m_pItem->pPrev;
3800  }
3801  else
3802  {
3803  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3804  m_pItem = m_pList->Back();
3805  }
3806  return *this;
3807  }
3808 
3809  const_iterator operator++(int)
3810  {
3811  const_iterator result = *this;
3812  ++*this;
3813  return result;
3814  }
3815  const_iterator operator--(int)
3816  {
3817  const_iterator result = *this;
3818  --*this;
3819  return result;
3820  }
3821 
3822  bool operator==(const const_iterator& rhs) const
3823  {
3824  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3825  return m_pItem == rhs.m_pItem;
3826  }
3827  bool operator!=(const const_iterator& rhs) const
3828  {
3829  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3830  return m_pItem != rhs.m_pItem;
3831  }
3832 
3833  private:
3834  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
3835  m_pList(pList),
3836  m_pItem(pItem)
3837  {
3838  }
3839 
3840  const VmaRawList<T>* m_pList;
3841  const VmaListItem<T>* m_pItem;
3842 
3843  friend class VmaList<T, AllocatorT>;
3844  };
3845 
3846  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
3847 
3848  bool empty() const { return m_RawList.IsEmpty(); }
3849  size_t size() const { return m_RawList.GetCount(); }
3850 
3851  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
3852  iterator end() { return iterator(&m_RawList, VMA_NULL); }
3853 
3854  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
3855  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
3856 
3857  void clear() { m_RawList.Clear(); }
3858  void push_back(const T& value) { m_RawList.PushBack(value); }
3859  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
3860  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
3861 
3862 private:
3863  VmaRawList<T> m_RawList;
3864 };
3865 
3866 #endif // #if VMA_USE_STL_LIST
3867 
3869 // class VmaMap
3870 
3871 // Unused in this version.
3872 #if 0
3873 
3874 #if VMA_USE_STL_UNORDERED_MAP
3875 
3876 #define VmaPair std::pair
3877 
3878 #define VMA_MAP_TYPE(KeyT, ValueT) \
3879  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
3880 
3881 #else // #if VMA_USE_STL_UNORDERED_MAP
3882 
3883 template<typename T1, typename T2>
3884 struct VmaPair
3885 {
3886  T1 first;
3887  T2 second;
3888 
3889  VmaPair() : first(), second() { }
3890  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
3891 };
3892 
3893 /* Class compatible with subset of interface of std::unordered_map.
3894 KeyT, ValueT must be POD because they will be stored in VmaVector.
3895 */
3896 template<typename KeyT, typename ValueT>
3897 class VmaMap
3898 {
3899 public:
3900  typedef VmaPair<KeyT, ValueT> PairType;
3901  typedef PairType* iterator;
3902 
3903  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
3904 
3905  iterator begin() { return m_Vector.begin(); }
3906  iterator end() { return m_Vector.end(); }
3907 
3908  void insert(const PairType& pair);
3909  iterator find(const KeyT& key);
3910  void erase(iterator it);
3911 
3912 private:
3913  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
3914 };
3915 
3916 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
3917 
3918 template<typename FirstT, typename SecondT>
3919 struct VmaPairFirstLess
3920 {
3921  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
3922  {
3923  return lhs.first < rhs.first;
3924  }
3925  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
3926  {
3927  return lhs.first < rhsFirst;
3928  }
3929 };
3930 
3931 template<typename KeyT, typename ValueT>
3932 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
3933 {
3934  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3935  m_Vector.data(),
3936  m_Vector.data() + m_Vector.size(),
3937  pair,
3938  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3939  VmaVectorInsert(m_Vector, indexToInsert, pair);
3940 }
3941 
3942 template<typename KeyT, typename ValueT>
3943 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3944 {
3945  PairType* it = VmaBinaryFindFirstNotLess(
3946  m_Vector.data(),
3947  m_Vector.data() + m_Vector.size(),
3948  key,
3949  VmaPairFirstLess<KeyT, ValueT>());
3950  if((it != m_Vector.end()) && (it->first == key))
3951  {
3952  return it;
3953  }
3954  else
3955  {
3956  return m_Vector.end();
3957  }
3958 }
3959 
3960 template<typename KeyT, typename ValueT>
3961 void VmaMap<KeyT, ValueT>::erase(iterator it)
3962 {
3963  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3964 }
3965 
3966 #endif // #if VMA_USE_STL_UNORDERED_MAP
3967 
3968 #endif // #if 0
3969 
3971 
3972 class VmaDeviceMemoryBlock;
3973 
3974 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
3975 
3976 struct VmaAllocation_T
3977 {
3978  VMA_CLASS_NO_COPY(VmaAllocation_T)
3979 private:
3980  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3981 
3982  enum FLAGS
3983  {
3984  FLAG_USER_DATA_STRING = 0x01,
3985  };
3986 
3987 public:
3988  enum ALLOCATION_TYPE
3989  {
3990  ALLOCATION_TYPE_NONE,
3991  ALLOCATION_TYPE_BLOCK,
3992  ALLOCATION_TYPE_DEDICATED,
3993  };
3994 
3995  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3996  m_Alignment(1),
3997  m_Size(0),
3998  m_pUserData(VMA_NULL),
3999  m_LastUseFrameIndex(currentFrameIndex),
4000  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4001  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4002  m_MapCount(0),
4003  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4004  {
4005 #if VMA_STATS_STRING_ENABLED
4006  m_CreationFrameIndex = currentFrameIndex;
4007  m_BufferImageUsage = 0;
4008 #endif
4009  }
4010 
4011  ~VmaAllocation_T()
4012  {
4013  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4014 
4015  // Check if owned string was freed.
4016  VMA_ASSERT(m_pUserData == VMA_NULL);
4017  }
4018 
4019  void InitBlockAllocation(
4020  VmaPool hPool,
4021  VmaDeviceMemoryBlock* block,
4022  VkDeviceSize offset,
4023  VkDeviceSize alignment,
4024  VkDeviceSize size,
4025  VmaSuballocationType suballocationType,
4026  bool mapped,
4027  bool canBecomeLost)
4028  {
4029  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4030  VMA_ASSERT(block != VMA_NULL);
4031  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4032  m_Alignment = alignment;
4033  m_Size = size;
4034  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4035  m_SuballocationType = (uint8_t)suballocationType;
4036  m_BlockAllocation.m_hPool = hPool;
4037  m_BlockAllocation.m_Block = block;
4038  m_BlockAllocation.m_Offset = offset;
4039  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4040  }
4041 
4042  void InitLost()
4043  {
4044  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4045  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4046  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4047  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4048  m_BlockAllocation.m_Block = VMA_NULL;
4049  m_BlockAllocation.m_Offset = 0;
4050  m_BlockAllocation.m_CanBecomeLost = true;
4051  }
4052 
4053  void ChangeBlockAllocation(
4054  VmaAllocator hAllocator,
4055  VmaDeviceMemoryBlock* block,
4056  VkDeviceSize offset);
4057 
4058  // pMappedData not null means allocation is created with MAPPED flag.
4059  void InitDedicatedAllocation(
4060  uint32_t memoryTypeIndex,
4061  VkDeviceMemory hMemory,
4062  VmaSuballocationType suballocationType,
4063  void* pMappedData,
4064  VkDeviceSize size)
4065  {
4066  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4067  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4068  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4069  m_Alignment = 0;
4070  m_Size = size;
4071  m_SuballocationType = (uint8_t)suballocationType;
4072  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4073  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4074  m_DedicatedAllocation.m_hMemory = hMemory;
4075  m_DedicatedAllocation.m_pMappedData = pMappedData;
4076  }
4077 
4078  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4079  VkDeviceSize GetAlignment() const { return m_Alignment; }
4080  VkDeviceSize GetSize() const { return m_Size; }
4081  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4082  void* GetUserData() const { return m_pUserData; }
4083  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4084  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4085 
4086  VmaDeviceMemoryBlock* GetBlock() const
4087  {
4088  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4089  return m_BlockAllocation.m_Block;
4090  }
4091  VkDeviceSize GetOffset() const;
4092  VkDeviceMemory GetMemory() const;
4093  uint32_t GetMemoryTypeIndex() const;
4094  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4095  void* GetMappedData() const;
4096  bool CanBecomeLost() const;
4097  VmaPool GetPool() const;
4098 
4099  uint32_t GetLastUseFrameIndex() const
4100  {
4101  return m_LastUseFrameIndex.load();
4102  }
4103  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
4104  {
4105  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
4106  }
4107  /*
4108  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
4109  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
4110  - Else, returns false.
4111 
4112  If hAllocation is already lost, assert - you should not call it then.
4113  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
4114  */
4115  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4116 
4117  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
4118  {
4119  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4120  outInfo.blockCount = 1;
4121  outInfo.allocationCount = 1;
4122  outInfo.unusedRangeCount = 0;
4123  outInfo.usedBytes = m_Size;
4124  outInfo.unusedBytes = 0;
4125  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
4126  outInfo.unusedRangeSizeMin = UINT64_MAX;
4127  outInfo.unusedRangeSizeMax = 0;
4128  }
4129 
4130  void BlockAllocMap();
4131  void BlockAllocUnmap();
4132  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
4133  void DedicatedAllocUnmap(VmaAllocator hAllocator);
4134 
4135 #if VMA_STATS_STRING_ENABLED
4136  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
4137  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
4138 
4139  void InitBufferImageUsage(uint32_t bufferImageUsage)
4140  {
4141  VMA_ASSERT(m_BufferImageUsage == 0);
4142  m_BufferImageUsage = bufferImageUsage;
4143  }
4144 
4145  void PrintParameters(class VmaJsonWriter& json) const;
4146 #endif
4147 
4148 private:
4149  VkDeviceSize m_Alignment;
4150  VkDeviceSize m_Size;
4151  void* m_pUserData;
4152  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
4153  uint8_t m_Type; // ALLOCATION_TYPE
4154  uint8_t m_SuballocationType; // VmaSuballocationType
4155  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
4156  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
4157  uint8_t m_MapCount;
4158  uint8_t m_Flags; // enum FLAGS
4159 
4160  // Allocation out of VmaDeviceMemoryBlock.
4161  struct BlockAllocation
4162  {
4163  VmaPool m_hPool; // Null if belongs to general memory.
4164  VmaDeviceMemoryBlock* m_Block;
4165  VkDeviceSize m_Offset;
4166  bool m_CanBecomeLost;
4167  };
4168 
4169  // Allocation for an object that has its own private VkDeviceMemory.
4170  struct DedicatedAllocation
4171  {
4172  uint32_t m_MemoryTypeIndex;
4173  VkDeviceMemory m_hMemory;
4174  void* m_pMappedData; // Not null means memory is mapped.
4175  };
4176 
4177  union
4178  {
4179  // Allocation out of VmaDeviceMemoryBlock.
4180  BlockAllocation m_BlockAllocation;
4181  // Allocation for an object that has its own private VkDeviceMemory.
4182  DedicatedAllocation m_DedicatedAllocation;
4183  };
4184 
4185 #if VMA_STATS_STRING_ENABLED
4186  uint32_t m_CreationFrameIndex;
4187  uint32_t m_BufferImageUsage; // 0 if unknown.
4188 #endif
4189 
4190  void FreeUserDataString(VmaAllocator hAllocator);
4191 };
4192 
4193 /*
4194 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
4195 allocated memory block or free.
4196 */
4197 struct VmaSuballocation
4198 {
4199  VkDeviceSize offset;
4200  VkDeviceSize size;
4201  VmaAllocation hAllocation;
4202  VmaSuballocationType type;
4203 };
4204 
4205 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
4206 
4207 // Cost of one additional allocation lost, as equivalent in bytes.
4208 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
4209 
4210 /*
4211 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
4212 
4213 If canMakeOtherLost was false:
4214 - item points to a FREE suballocation.
4215 - itemsToMakeLostCount is 0.
4216 
4217 If canMakeOtherLost was true:
4218 - item points to first of sequence of suballocations, which are either FREE,
4219  or point to VmaAllocations that can become lost.
4220 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
4221  the requested allocation to succeed.
4222 */
4223 struct VmaAllocationRequest
4224 {
4225  VkDeviceSize offset;
4226  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
4227  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
4228  VmaSuballocationList::iterator item;
4229  size_t itemsToMakeLostCount;
4230 
4231  VkDeviceSize CalcCost() const
4232  {
4233  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
4234  }
4235 };
4236 
4237 /*
4238 Data structure used for bookkeeping of allocations and unused ranges of memory
4239 in a single VkDeviceMemory block.
4240 */
4241 class VmaBlockMetadata
4242 {
4243  VMA_CLASS_NO_COPY(VmaBlockMetadata)
4244 public:
4245  VmaBlockMetadata(VmaAllocator hAllocator);
4246  ~VmaBlockMetadata();
4247  void Init(VkDeviceSize size);
4248 
4249  // Validates all data structures inside this object. If not valid, returns false.
4250  bool Validate() const;
4251  VkDeviceSize GetSize() const { return m_Size; }
4252  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
4253  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4254  VkDeviceSize GetUnusedRangeSizeMax() const;
4255  // Returns true if this block is empty - contains only single free suballocation.
4256  bool IsEmpty() const;
4257 
4258  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4259  void AddPoolStats(VmaPoolStats& inoutStats) const;
4260 
4261 #if VMA_STATS_STRING_ENABLED
4262  void PrintDetailedMap(class VmaJsonWriter& json) const;
4263 #endif
4264 
4265  // Tries to find a place for suballocation with given parameters inside this block.
4266  // If succeeded, fills pAllocationRequest and returns true.
4267  // If failed, returns false.
4268  bool CreateAllocationRequest(
4269  uint32_t currentFrameIndex,
4270  uint32_t frameInUseCount,
4271  VkDeviceSize bufferImageGranularity,
4272  VkDeviceSize allocSize,
4273  VkDeviceSize allocAlignment,
4274  VmaSuballocationType allocType,
4275  bool canMakeOtherLost,
4276  VmaAllocationRequest* pAllocationRequest);
4277 
4278  bool MakeRequestedAllocationsLost(
4279  uint32_t currentFrameIndex,
4280  uint32_t frameInUseCount,
4281  VmaAllocationRequest* pAllocationRequest);
4282 
4283  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4284 
4285  VkResult CheckCorruption(const void* pBlockData);
4286 
4287  // Makes actual allocation based on request. Request must already be checked and valid.
4288  void Alloc(
4289  const VmaAllocationRequest& request,
4290  VmaSuballocationType type,
4291  VkDeviceSize allocSize,
4292  VmaAllocation hAllocation);
4293 
4294  // Frees suballocation assigned to given memory region.
4295  void Free(const VmaAllocation allocation);
4296  void FreeAtOffset(VkDeviceSize offset);
4297 
4298 private:
4299  VkDeviceSize m_Size;
4300  uint32_t m_FreeCount;
4301  VkDeviceSize m_SumFreeSize;
4302  VmaSuballocationList m_Suballocations;
4303  // Suballocations that are free and have size greater than certain threshold.
4304  // Sorted by size, ascending.
4305  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4306 
4307  bool ValidateFreeSuballocationList() const;
4308 
4309  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4310  // If yes, fills pOffset and returns true. If no, returns false.
4311  bool CheckAllocation(
4312  uint32_t currentFrameIndex,
4313  uint32_t frameInUseCount,
4314  VkDeviceSize bufferImageGranularity,
4315  VkDeviceSize allocSize,
4316  VkDeviceSize allocAlignment,
4317  VmaSuballocationType allocType,
4318  VmaSuballocationList::const_iterator suballocItem,
4319  bool canMakeOtherLost,
4320  VkDeviceSize* pOffset,
4321  size_t* itemsToMakeLostCount,
4322  VkDeviceSize* pSumFreeSize,
4323  VkDeviceSize* pSumItemSize) const;
4324  // Given free suballocation, it merges it with following one, which must also be free.
4325  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4326  // Releases given suballocation, making it free.
4327  // Merges it with adjacent free suballocations if applicable.
4328  // Returns iterator to new free suballocation at this place.
4329  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4330  // Given free suballocation, it inserts it into sorted list of
4331  // m_FreeSuballocationsBySize if it's suitable.
4332  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4333  // Given free suballocation, it removes it from sorted list of
4334  // m_FreeSuballocationsBySize if it's suitable.
4335  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4336 };
4337 
4338 /*
4339 Represents a single block of device memory (`VkDeviceMemory`) with all the
4340 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
4341 
4342 Thread-safety: This class must be externally synchronized.
4343 */
4344 class VmaDeviceMemoryBlock
4345 {
4346  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
4347 public:
4348  VmaBlockMetadata m_Metadata;
4349 
4350  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
4351 
4352  ~VmaDeviceMemoryBlock()
4353  {
4354  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
4355  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
4356  }
4357 
4358  // Always call after construction.
4359  void Init(
4360  uint32_t newMemoryTypeIndex,
4361  VkDeviceMemory newMemory,
4362  VkDeviceSize newSize,
4363  uint32_t id);
4364  // Always call before destruction.
4365  void Destroy(VmaAllocator allocator);
4366 
4367  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
4368  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
4369  uint32_t GetId() const { return m_Id; }
4370  void* GetMappedData() const { return m_pMappedData; }
4371 
4372  // Validates all data structures inside this object. If not valid, returns false.
4373  bool Validate() const;
4374 
4375  VkResult CheckCorruption(VmaAllocator hAllocator);
4376 
4377  // ppData can be null.
4378  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
4379  void Unmap(VmaAllocator hAllocator, uint32_t count);
4380 
4381  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
4382  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
4383 
4384  VkResult BindBufferMemory(
4385  const VmaAllocator hAllocator,
4386  const VmaAllocation hAllocation,
4387  VkBuffer hBuffer);
4388  VkResult BindImageMemory(
4389  const VmaAllocator hAllocator,
4390  const VmaAllocation hAllocation,
4391  VkImage hImage);
4392 
4393 private:
4394  uint32_t m_MemoryTypeIndex;
4395  uint32_t m_Id;
4396  VkDeviceMemory m_hMemory;
4397 
4398  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
4399  // Also protects m_MapCount, m_pMappedData.
4400  VMA_MUTEX m_Mutex;
4401  uint32_t m_MapCount;
4402  void* m_pMappedData;
4403 };
4404 
4405 struct VmaPointerLess
4406 {
4407  bool operator()(const void* lhs, const void* rhs) const
4408  {
4409  return lhs < rhs;
4410  }
4411 };
4412 
4413 class VmaDefragmentator;
4414 
4415 /*
4416 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
4417 Vulkan memory type.
4418 
4419 Synchronized internally with a mutex.
4420 */
4421 struct VmaBlockVector
4422 {
4423  VMA_CLASS_NO_COPY(VmaBlockVector)
4424 public:
4425  VmaBlockVector(
4426  VmaAllocator hAllocator,
4427  uint32_t memoryTypeIndex,
4428  VkDeviceSize preferredBlockSize,
4429  size_t minBlockCount,
4430  size_t maxBlockCount,
4431  VkDeviceSize bufferImageGranularity,
4432  uint32_t frameInUseCount,
4433  bool isCustomPool);
4434  ~VmaBlockVector();
4435 
4436  VkResult CreateMinBlocks();
4437 
4438  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
4439  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
4440  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
4441  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
4442 
4443  void GetPoolStats(VmaPoolStats* pStats);
4444 
4445  bool IsEmpty() const { return m_Blocks.empty(); }
4446  bool IsCorruptionDetectionEnabled() const;
4447 
4448  VkResult Allocate(
4449  VmaPool hCurrentPool,
4450  uint32_t currentFrameIndex,
4451  VkDeviceSize size,
4452  VkDeviceSize alignment,
4453  const VmaAllocationCreateInfo& createInfo,
4454  VmaSuballocationType suballocType,
4455  VmaAllocation* pAllocation);
4456 
4457  void Free(
4458  VmaAllocation hAllocation);
4459 
4460  // Adds statistics of this BlockVector to pStats.
4461  void AddStats(VmaStats* pStats);
4462 
4463 #if VMA_STATS_STRING_ENABLED
4464  void PrintDetailedMap(class VmaJsonWriter& json);
4465 #endif
4466 
4467  void MakePoolAllocationsLost(
4468  uint32_t currentFrameIndex,
4469  size_t* pLostAllocationCount);
4470  VkResult CheckCorruption();
4471 
4472  VmaDefragmentator* EnsureDefragmentator(
4473  VmaAllocator hAllocator,
4474  uint32_t currentFrameIndex);
4475 
4476  VkResult Defragment(
4477  VmaDefragmentationStats* pDefragmentationStats,
4478  VkDeviceSize& maxBytesToMove,
4479  uint32_t& maxAllocationsToMove);
4480 
4481  void DestroyDefragmentator();
4482 
4483 private:
4484  friend class VmaDefragmentator;
4485 
4486  const VmaAllocator m_hAllocator;
4487  const uint32_t m_MemoryTypeIndex;
4488  const VkDeviceSize m_PreferredBlockSize;
4489  const size_t m_MinBlockCount;
4490  const size_t m_MaxBlockCount;
4491  const VkDeviceSize m_BufferImageGranularity;
4492  const uint32_t m_FrameInUseCount;
4493  const bool m_IsCustomPool;
4494  VMA_MUTEX m_Mutex;
4495  // Incrementally sorted by sumFreeSize, ascending.
4496  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
4497  /* There can be at most one allocation that is completely empty - a
4498  hysteresis to avoid pessimistic case of alternating creation and destruction
4499  of a VkDeviceMemory. */
4500  bool m_HasEmptyBlock;
4501  VmaDefragmentator* m_pDefragmentator;
4502  uint32_t m_NextBlockId;
4503 
4504  VkDeviceSize CalcMaxBlockSize() const;
4505 
4506  // Finds and removes given block from vector.
4507  void Remove(VmaDeviceMemoryBlock* pBlock);
4508 
4509  // Performs single step in sorting m_Blocks. They may not be fully sorted
4510  // after this call.
4511  void IncrementallySortBlocks();
4512 
4513  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
4514 };
4515 
4516 struct VmaPool_T
4517 {
4518  VMA_CLASS_NO_COPY(VmaPool_T)
4519 public:
4520  VmaBlockVector m_BlockVector;
4521 
4522  VmaPool_T(
4523  VmaAllocator hAllocator,
4524  const VmaPoolCreateInfo& createInfo);
4525  ~VmaPool_T();
4526 
4527  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
4528  uint32_t GetId() const { return m_Id; }
4529  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
4530 
4531 #if VMA_STATS_STRING_ENABLED
4532  //void PrintDetailedMap(class VmaStringBuilder& sb);
4533 #endif
4534 
4535 private:
4536  uint32_t m_Id;
4537 };
4538 
4539 class VmaDefragmentator
4540 {
4541  VMA_CLASS_NO_COPY(VmaDefragmentator)
4542 private:
4543  const VmaAllocator m_hAllocator;
4544  VmaBlockVector* const m_pBlockVector;
4545  uint32_t m_CurrentFrameIndex;
4546  VkDeviceSize m_BytesMoved;
4547  uint32_t m_AllocationsMoved;
4548 
4549  struct AllocationInfo
4550  {
4551  VmaAllocation m_hAllocation;
4552  VkBool32* m_pChanged;
4553 
4554  AllocationInfo() :
4555  m_hAllocation(VK_NULL_HANDLE),
4556  m_pChanged(VMA_NULL)
4557  {
4558  }
4559  };
4560 
4561  struct AllocationInfoSizeGreater
4562  {
4563  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
4564  {
4565  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
4566  }
4567  };
4568 
4569  // Used between AddAllocation and Defragment.
4570  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4571 
4572  struct BlockInfo
4573  {
4574  VmaDeviceMemoryBlock* m_pBlock;
4575  bool m_HasNonMovableAllocations;
4576  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4577 
4578  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
4579  m_pBlock(VMA_NULL),
4580  m_HasNonMovableAllocations(true),
4581  m_Allocations(pAllocationCallbacks),
4582  m_pMappedDataForDefragmentation(VMA_NULL)
4583  {
4584  }
4585 
4586  void CalcHasNonMovableAllocations()
4587  {
4588  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
4589  const size_t defragmentAllocCount = m_Allocations.size();
4590  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
4591  }
4592 
4593  void SortAllocationsBySizeDescecnding()
4594  {
4595  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
4596  }
4597 
4598  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
4599  void Unmap(VmaAllocator hAllocator);
4600 
4601  private:
4602  // Not null if mapped for defragmentation only, not originally mapped.
4603  void* m_pMappedDataForDefragmentation;
4604  };
4605 
4606  struct BlockPointerLess
4607  {
4608  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
4609  {
4610  return pLhsBlockInfo->m_pBlock < pRhsBlock;
4611  }
4612  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4613  {
4614  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
4615  }
4616  };
4617 
4618  // 1. Blocks with some non-movable allocations go first.
4619  // 2. Blocks with smaller sumFreeSize go first.
4620  struct BlockInfoCompareMoveDestination
4621  {
4622  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4623  {
4624  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
4625  {
4626  return true;
4627  }
4628  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
4629  {
4630  return false;
4631  }
4632  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
4633  {
4634  return true;
4635  }
4636  return false;
4637  }
4638  };
4639 
4640  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
4641  BlockInfoVector m_Blocks;
4642 
4643  VkResult DefragmentRound(
4644  VkDeviceSize maxBytesToMove,
4645  uint32_t maxAllocationsToMove);
4646 
4647  static bool MoveMakesSense(
4648  size_t dstBlockIndex, VkDeviceSize dstOffset,
4649  size_t srcBlockIndex, VkDeviceSize srcOffset);
4650 
4651 public:
4652  VmaDefragmentator(
4653  VmaAllocator hAllocator,
4654  VmaBlockVector* pBlockVector,
4655  uint32_t currentFrameIndex);
4656 
4657  ~VmaDefragmentator();
4658 
4659  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
4660  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
4661 
4662  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
4663 
4664  VkResult Defragment(
4665  VkDeviceSize maxBytesToMove,
4666  uint32_t maxAllocationsToMove);
4667 };
4668 
4669 // Main allocator object.
4670 struct VmaAllocator_T
4671 {
4672  VMA_CLASS_NO_COPY(VmaAllocator_T)
4673 public:
4674  bool m_UseMutex;
4675  bool m_UseKhrDedicatedAllocation;
4676  VkDevice m_hDevice;
4677  bool m_AllocationCallbacksSpecified;
4678  VkAllocationCallbacks m_AllocationCallbacks;
4679  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
4680 
4681  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
4682  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
4683  VMA_MUTEX m_HeapSizeLimitMutex;
4684 
4685  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
4686  VkPhysicalDeviceMemoryProperties m_MemProps;
4687 
4688  // Default pools.
4689  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
4690 
4691  // Each vector is sorted by memory (handle value).
4692  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
4693  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
4694  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
4695 
4696  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
4697  ~VmaAllocator_T();
4698 
4699  const VkAllocationCallbacks* GetAllocationCallbacks() const
4700  {
4701  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
4702  }
4703  const VmaVulkanFunctions& GetVulkanFunctions() const
4704  {
4705  return m_VulkanFunctions;
4706  }
4707 
4708  VkDeviceSize GetBufferImageGranularity() const
4709  {
4710  return VMA_MAX(
4711  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
4712  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
4713  }
4714 
4715  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
4716  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
4717 
4718  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
4719  {
4720  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
4721  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
4722  }
4723  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
4724  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
4725  {
4726  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
4727  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
4728  }
4729  // Minimum alignment for all allocations in specific memory type.
4730  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
4731  {
4732  return IsMemoryTypeNonCoherent(memTypeIndex) ?
4733  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
4734  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
4735  }
4736 
4737  bool IsIntegratedGpu() const
4738  {
4739  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
4740  }
4741 
4742  void GetBufferMemoryRequirements(
4743  VkBuffer hBuffer,
4744  VkMemoryRequirements& memReq,
4745  bool& requiresDedicatedAllocation,
4746  bool& prefersDedicatedAllocation) const;
4747  void GetImageMemoryRequirements(
4748  VkImage hImage,
4749  VkMemoryRequirements& memReq,
4750  bool& requiresDedicatedAllocation,
4751  bool& prefersDedicatedAllocation) const;
4752 
4753  // Main allocation function.
4754  VkResult AllocateMemory(
4755  const VkMemoryRequirements& vkMemReq,
4756  bool requiresDedicatedAllocation,
4757  bool prefersDedicatedAllocation,
4758  VkBuffer dedicatedBuffer,
4759  VkImage dedicatedImage,
4760  const VmaAllocationCreateInfo& createInfo,
4761  VmaSuballocationType suballocType,
4762  VmaAllocation* pAllocation);
4763 
4764  // Main deallocation function.
4765  void FreeMemory(const VmaAllocation allocation);
4766 
4767  void CalculateStats(VmaStats* pStats);
4768 
4769 #if VMA_STATS_STRING_ENABLED
4770  void PrintDetailedMap(class VmaJsonWriter& json);
4771 #endif
4772 
4773  VkResult Defragment(
4774  VmaAllocation* pAllocations,
4775  size_t allocationCount,
4776  VkBool32* pAllocationsChanged,
4777  const VmaDefragmentationInfo* pDefragmentationInfo,
4778  VmaDefragmentationStats* pDefragmentationStats);
4779 
4780  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
4781  bool TouchAllocation(VmaAllocation hAllocation);
4782 
4783  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
4784  void DestroyPool(VmaPool pool);
4785  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
4786 
4787  void SetCurrentFrameIndex(uint32_t frameIndex);
4788 
4789  void MakePoolAllocationsLost(
4790  VmaPool hPool,
4791  size_t* pLostAllocationCount);
4792  VkResult CheckPoolCorruption(VmaPool hPool);
4793  VkResult CheckCorruption(uint32_t memoryTypeBits);
4794 
4795  void CreateLostAllocation(VmaAllocation* pAllocation);
4796 
4797  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
4798  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
4799 
4800  VkResult Map(VmaAllocation hAllocation, void** ppData);
4801  void Unmap(VmaAllocation hAllocation);
4802 
4803  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
4804  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
4805 
4806  void FlushOrInvalidateAllocation(
4807  VmaAllocation hAllocation,
4808  VkDeviceSize offset, VkDeviceSize size,
4809  VMA_CACHE_OPERATION op);
4810 
4811  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
4812 
4813 private:
4814  VkDeviceSize m_PreferredLargeHeapBlockSize;
4815 
4816  VkPhysicalDevice m_PhysicalDevice;
4817  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
4818 
4819  VMA_MUTEX m_PoolsMutex;
4820  // Protected by m_PoolsMutex. Sorted by pointer value.
4821  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
4822  uint32_t m_NextPoolId;
4823 
4824  VmaVulkanFunctions m_VulkanFunctions;
4825 
4826  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
4827 
4828  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
4829 
4830  VkResult AllocateMemoryOfType(
4831  VkDeviceSize size,
4832  VkDeviceSize alignment,
4833  bool dedicatedAllocation,
4834  VkBuffer dedicatedBuffer,
4835  VkImage dedicatedImage,
4836  const VmaAllocationCreateInfo& createInfo,
4837  uint32_t memTypeIndex,
4838  VmaSuballocationType suballocType,
4839  VmaAllocation* pAllocation);
4840 
4841  // Allocates and registers new VkDeviceMemory specifically for single allocation.
4842  VkResult AllocateDedicatedMemory(
4843  VkDeviceSize size,
4844  VmaSuballocationType suballocType,
4845  uint32_t memTypeIndex,
4846  bool map,
4847  bool isUserDataString,
4848  void* pUserData,
4849  VkBuffer dedicatedBuffer,
4850  VkImage dedicatedImage,
4851  VmaAllocation* pAllocation);
4852 
4853  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
4854  void FreeDedicatedMemory(VmaAllocation allocation);
4855 };
4856 
4858 // Memory allocation #2 after VmaAllocator_T definition
4859 
4860 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
4861 {
4862  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
4863 }
4864 
4865 static void VmaFree(VmaAllocator hAllocator, void* ptr)
4866 {
4867  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
4868 }
4869 
4870 template<typename T>
4871 static T* VmaAllocate(VmaAllocator hAllocator)
4872 {
4873  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
4874 }
4875 
4876 template<typename T>
4877 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
4878 {
4879  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
4880 }
4881 
4882 template<typename T>
4883 static void vma_delete(VmaAllocator hAllocator, T* ptr)
4884 {
4885  if(ptr != VMA_NULL)
4886  {
4887  ptr->~T();
4888  VmaFree(hAllocator, ptr);
4889  }
4890 }
4891 
4892 template<typename T>
4893 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
4894 {
4895  if(ptr != VMA_NULL)
4896  {
4897  for(size_t i = count; i--; )
4898  ptr[i].~T();
4899  VmaFree(hAllocator, ptr);
4900  }
4901 }
4902 
4904 // VmaStringBuilder
4905 
4906 #if VMA_STATS_STRING_ENABLED
4907 
4908 class VmaStringBuilder
4909 {
4910 public:
4911  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
4912  size_t GetLength() const { return m_Data.size(); }
4913  const char* GetData() const { return m_Data.data(); }
4914 
4915  void Add(char ch) { m_Data.push_back(ch); }
4916  void Add(const char* pStr);
4917  void AddNewLine() { Add('\n'); }
4918  void AddNumber(uint32_t num);
4919  void AddNumber(uint64_t num);
4920  void AddPointer(const void* ptr);
4921 
4922 private:
4923  VmaVector< char, VmaStlAllocator<char> > m_Data;
4924 };
4925 
4926 void VmaStringBuilder::Add(const char* pStr)
4927 {
4928  const size_t strLen = strlen(pStr);
4929  if(strLen > 0)
4930  {
4931  const size_t oldCount = m_Data.size();
4932  m_Data.resize(oldCount + strLen);
4933  memcpy(m_Data.data() + oldCount, pStr, strLen);
4934  }
4935 }
4936 
4937 void VmaStringBuilder::AddNumber(uint32_t num)
4938 {
4939  char buf[11];
4940  VmaUint32ToStr(buf, sizeof(buf), num);
4941  Add(buf);
4942 }
4943 
4944 void VmaStringBuilder::AddNumber(uint64_t num)
4945 {
4946  char buf[21];
4947  VmaUint64ToStr(buf, sizeof(buf), num);
4948  Add(buf);
4949 }
4950 
4951 void VmaStringBuilder::AddPointer(const void* ptr)
4952 {
4953  char buf[21];
4954  VmaPtrToStr(buf, sizeof(buf), ptr);
4955  Add(buf);
4956 }
4957 
4958 #endif // #if VMA_STATS_STRING_ENABLED
4959 
4961 // VmaJsonWriter
4962 
4963 #if VMA_STATS_STRING_ENABLED
4964 
4965 class VmaJsonWriter
4966 {
4967  VMA_CLASS_NO_COPY(VmaJsonWriter)
4968 public:
4969  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
4970  ~VmaJsonWriter();
4971 
4972  void BeginObject(bool singleLine = false);
4973  void EndObject();
4974 
4975  void BeginArray(bool singleLine = false);
4976  void EndArray();
4977 
4978  void WriteString(const char* pStr);
4979  void BeginString(const char* pStr = VMA_NULL);
4980  void ContinueString(const char* pStr);
4981  void ContinueString(uint32_t n);
4982  void ContinueString(uint64_t n);
4983  void ContinueString_Pointer(const void* ptr);
4984  void EndString(const char* pStr = VMA_NULL);
4985 
4986  void WriteNumber(uint32_t n);
4987  void WriteNumber(uint64_t n);
4988  void WriteBool(bool b);
4989  void WriteNull();
4990 
4991 private:
4992  static const char* const INDENT;
4993 
4994  enum COLLECTION_TYPE
4995  {
4996  COLLECTION_TYPE_OBJECT,
4997  COLLECTION_TYPE_ARRAY,
4998  };
4999  struct StackItem
5000  {
5001  COLLECTION_TYPE type;
5002  uint32_t valueCount;
5003  bool singleLineMode;
5004  };
5005 
5006  VmaStringBuilder& m_SB;
5007  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
5008  bool m_InsideString;
5009 
5010  void BeginValue(bool isString);
5011  void WriteIndent(bool oneLess = false);
5012 };
5013 
5014 const char* const VmaJsonWriter::INDENT = " ";
5015 
5016 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
5017  m_SB(sb),
5018  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
5019  m_InsideString(false)
5020 {
5021 }
5022 
5023 VmaJsonWriter::~VmaJsonWriter()
5024 {
5025  VMA_ASSERT(!m_InsideString);
5026  VMA_ASSERT(m_Stack.empty());
5027 }
5028 
5029 void VmaJsonWriter::BeginObject(bool singleLine)
5030 {
5031  VMA_ASSERT(!m_InsideString);
5032 
5033  BeginValue(false);
5034  m_SB.Add('{');
5035 
5036  StackItem item;
5037  item.type = COLLECTION_TYPE_OBJECT;
5038  item.valueCount = 0;
5039  item.singleLineMode = singleLine;
5040  m_Stack.push_back(item);
5041 }
5042 
5043 void VmaJsonWriter::EndObject()
5044 {
5045  VMA_ASSERT(!m_InsideString);
5046 
5047  WriteIndent(true);
5048  m_SB.Add('}');
5049 
5050  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
5051  m_Stack.pop_back();
5052 }
5053 
5054 void VmaJsonWriter::BeginArray(bool singleLine)
5055 {
5056  VMA_ASSERT(!m_InsideString);
5057 
5058  BeginValue(false);
5059  m_SB.Add('[');
5060 
5061  StackItem item;
5062  item.type = COLLECTION_TYPE_ARRAY;
5063  item.valueCount = 0;
5064  item.singleLineMode = singleLine;
5065  m_Stack.push_back(item);
5066 }
5067 
5068 void VmaJsonWriter::EndArray()
5069 {
5070  VMA_ASSERT(!m_InsideString);
5071 
5072  WriteIndent(true);
5073  m_SB.Add(']');
5074 
5075  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
5076  m_Stack.pop_back();
5077 }
5078 
5079 void VmaJsonWriter::WriteString(const char* pStr)
5080 {
5081  BeginString(pStr);
5082  EndString();
5083 }
5084 
5085 void VmaJsonWriter::BeginString(const char* pStr)
5086 {
5087  VMA_ASSERT(!m_InsideString);
5088 
5089  BeginValue(true);
5090  m_SB.Add('"');
5091  m_InsideString = true;
5092  if(pStr != VMA_NULL && pStr[0] != '\0')
5093  {
5094  ContinueString(pStr);
5095  }
5096 }
5097 
5098 void VmaJsonWriter::ContinueString(const char* pStr)
5099 {
5100  VMA_ASSERT(m_InsideString);
5101 
5102  const size_t strLen = strlen(pStr);
5103  for(size_t i = 0; i < strLen; ++i)
5104  {
5105  char ch = pStr[i];
5106  if(ch == '\'')
5107  {
5108  m_SB.Add("\\\\");
5109  }
5110  else if(ch == '"')
5111  {
5112  m_SB.Add("\\\"");
5113  }
5114  else if(ch >= 32)
5115  {
5116  m_SB.Add(ch);
5117  }
5118  else switch(ch)
5119  {
5120  case '\b':
5121  m_SB.Add("\\b");
5122  break;
5123  case '\f':
5124  m_SB.Add("\\f");
5125  break;
5126  case '\n':
5127  m_SB.Add("\\n");
5128  break;
5129  case '\r':
5130  m_SB.Add("\\r");
5131  break;
5132  case '\t':
5133  m_SB.Add("\\t");
5134  break;
5135  default:
5136  VMA_ASSERT(0 && "Character not currently supported.");
5137  break;
5138  }
5139  }
5140 }
5141 
5142 void VmaJsonWriter::ContinueString(uint32_t n)
5143 {
5144  VMA_ASSERT(m_InsideString);
5145  m_SB.AddNumber(n);
5146 }
5147 
5148 void VmaJsonWriter::ContinueString(uint64_t n)
5149 {
5150  VMA_ASSERT(m_InsideString);
5151  m_SB.AddNumber(n);
5152 }
5153 
5154 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
5155 {
5156  VMA_ASSERT(m_InsideString);
5157  m_SB.AddPointer(ptr);
5158 }
5159 
5160 void VmaJsonWriter::EndString(const char* pStr)
5161 {
5162  VMA_ASSERT(m_InsideString);
5163  if(pStr != VMA_NULL && pStr[0] != '\0')
5164  {
5165  ContinueString(pStr);
5166  }
5167  m_SB.Add('"');
5168  m_InsideString = false;
5169 }
5170 
5171 void VmaJsonWriter::WriteNumber(uint32_t n)
5172 {
5173  VMA_ASSERT(!m_InsideString);
5174  BeginValue(false);
5175  m_SB.AddNumber(n);
5176 }
5177 
5178 void VmaJsonWriter::WriteNumber(uint64_t n)
5179 {
5180  VMA_ASSERT(!m_InsideString);
5181  BeginValue(false);
5182  m_SB.AddNumber(n);
5183 }
5184 
5185 void VmaJsonWriter::WriteBool(bool b)
5186 {
5187  VMA_ASSERT(!m_InsideString);
5188  BeginValue(false);
5189  m_SB.Add(b ? "true" : "false");
5190 }
5191 
5192 void VmaJsonWriter::WriteNull()
5193 {
5194  VMA_ASSERT(!m_InsideString);
5195  BeginValue(false);
5196  m_SB.Add("null");
5197 }
5198 
5199 void VmaJsonWriter::BeginValue(bool isString)
5200 {
5201  if(!m_Stack.empty())
5202  {
5203  StackItem& currItem = m_Stack.back();
5204  if(currItem.type == COLLECTION_TYPE_OBJECT &&
5205  currItem.valueCount % 2 == 0)
5206  {
5207  VMA_ASSERT(isString);
5208  }
5209 
5210  if(currItem.type == COLLECTION_TYPE_OBJECT &&
5211  currItem.valueCount % 2 != 0)
5212  {
5213  m_SB.Add(": ");
5214  }
5215  else if(currItem.valueCount > 0)
5216  {
5217  m_SB.Add(", ");
5218  WriteIndent();
5219  }
5220  else
5221  {
5222  WriteIndent();
5223  }
5224  ++currItem.valueCount;
5225  }
5226 }
5227 
5228 void VmaJsonWriter::WriteIndent(bool oneLess)
5229 {
5230  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
5231  {
5232  m_SB.AddNewLine();
5233 
5234  size_t count = m_Stack.size();
5235  if(count > 0 && oneLess)
5236  {
5237  --count;
5238  }
5239  for(size_t i = 0; i < count; ++i)
5240  {
5241  m_SB.Add(INDENT);
5242  }
5243  }
5244 }
5245 
5246 #endif // #if VMA_STATS_STRING_ENABLED
5247 
5249 
5250 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
5251 {
5252  if(IsUserDataString())
5253  {
5254  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
5255 
5256  FreeUserDataString(hAllocator);
5257 
5258  if(pUserData != VMA_NULL)
5259  {
5260  const char* const newStrSrc = (char*)pUserData;
5261  const size_t newStrLen = strlen(newStrSrc);
5262  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
5263  memcpy(newStrDst, newStrSrc, newStrLen + 1);
5264  m_pUserData = newStrDst;
5265  }
5266  }
5267  else
5268  {
5269  m_pUserData = pUserData;
5270  }
5271 }
5272 
5273 void VmaAllocation_T::ChangeBlockAllocation(
5274  VmaAllocator hAllocator,
5275  VmaDeviceMemoryBlock* block,
5276  VkDeviceSize offset)
5277 {
5278  VMA_ASSERT(block != VMA_NULL);
5279  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5280 
5281  // Move mapping reference counter from old block to new block.
5282  if(block != m_BlockAllocation.m_Block)
5283  {
5284  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
5285  if(IsPersistentMap())
5286  ++mapRefCount;
5287  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
5288  block->Map(hAllocator, mapRefCount, VMA_NULL);
5289  }
5290 
5291  m_BlockAllocation.m_Block = block;
5292  m_BlockAllocation.m_Offset = offset;
5293 }
5294 
5295 VkDeviceSize VmaAllocation_T::GetOffset() const
5296 {
5297  switch(m_Type)
5298  {
5299  case ALLOCATION_TYPE_BLOCK:
5300  return m_BlockAllocation.m_Offset;
5301  case ALLOCATION_TYPE_DEDICATED:
5302  return 0;
5303  default:
5304  VMA_ASSERT(0);
5305  return 0;
5306  }
5307 }
5308 
5309 VkDeviceMemory VmaAllocation_T::GetMemory() const
5310 {
5311  switch(m_Type)
5312  {
5313  case ALLOCATION_TYPE_BLOCK:
5314  return m_BlockAllocation.m_Block->GetDeviceMemory();
5315  case ALLOCATION_TYPE_DEDICATED:
5316  return m_DedicatedAllocation.m_hMemory;
5317  default:
5318  VMA_ASSERT(0);
5319  return VK_NULL_HANDLE;
5320  }
5321 }
5322 
5323 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
5324 {
5325  switch(m_Type)
5326  {
5327  case ALLOCATION_TYPE_BLOCK:
5328  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
5329  case ALLOCATION_TYPE_DEDICATED:
5330  return m_DedicatedAllocation.m_MemoryTypeIndex;
5331  default:
5332  VMA_ASSERT(0);
5333  return UINT32_MAX;
5334  }
5335 }
5336 
5337 void* VmaAllocation_T::GetMappedData() const
5338 {
5339  switch(m_Type)
5340  {
5341  case ALLOCATION_TYPE_BLOCK:
5342  if(m_MapCount != 0)
5343  {
5344  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
5345  VMA_ASSERT(pBlockData != VMA_NULL);
5346  return (char*)pBlockData + m_BlockAllocation.m_Offset;
5347  }
5348  else
5349  {
5350  return VMA_NULL;
5351  }
5352  break;
5353  case ALLOCATION_TYPE_DEDICATED:
5354  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
5355  return m_DedicatedAllocation.m_pMappedData;
5356  default:
5357  VMA_ASSERT(0);
5358  return VMA_NULL;
5359  }
5360 }
5361 
5362 bool VmaAllocation_T::CanBecomeLost() const
5363 {
5364  switch(m_Type)
5365  {
5366  case ALLOCATION_TYPE_BLOCK:
5367  return m_BlockAllocation.m_CanBecomeLost;
5368  case ALLOCATION_TYPE_DEDICATED:
5369  return false;
5370  default:
5371  VMA_ASSERT(0);
5372  return false;
5373  }
5374 }
5375 
5376 VmaPool VmaAllocation_T::GetPool() const
5377 {
5378  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5379  return m_BlockAllocation.m_hPool;
5380 }
5381 
5382 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5383 {
5384  VMA_ASSERT(CanBecomeLost());
5385 
5386  /*
5387  Warning: This is a carefully designed algorithm.
5388  Do not modify unless you really know what you're doing :)
5389  */
5390  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
5391  for(;;)
5392  {
5393  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
5394  {
5395  VMA_ASSERT(0);
5396  return false;
5397  }
5398  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
5399  {
5400  return false;
5401  }
5402  else // Last use time earlier than current time.
5403  {
5404  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
5405  {
5406  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
5407  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
5408  return true;
5409  }
5410  }
5411  }
5412 }
5413 
5414 #if VMA_STATS_STRING_ENABLED
5415 
5416 // Correspond to values of enum VmaSuballocationType.
5417 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
5418  "FREE",
5419  "UNKNOWN",
5420  "BUFFER",
5421  "IMAGE_UNKNOWN",
5422  "IMAGE_LINEAR",
5423  "IMAGE_OPTIMAL",
5424 };
5425 
5426 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
5427 {
5428  json.WriteString("Type");
5429  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
5430 
5431  json.WriteString("Size");
5432  json.WriteNumber(m_Size);
5433 
5434  if(m_pUserData != VMA_NULL)
5435  {
5436  json.WriteString("UserData");
5437  if(IsUserDataString())
5438  {
5439  json.WriteString((const char*)m_pUserData);
5440  }
5441  else
5442  {
5443  json.BeginString();
5444  json.ContinueString_Pointer(m_pUserData);
5445  json.EndString();
5446  }
5447  }
5448 
5449  json.WriteString("CreationFrameIndex");
5450  json.WriteNumber(m_CreationFrameIndex);
5451 
5452  json.WriteString("LastUseFrameIndex");
5453  json.WriteNumber(GetLastUseFrameIndex());
5454 
5455  if(m_BufferImageUsage != 0)
5456  {
5457  json.WriteString("Usage");
5458  json.WriteNumber(m_BufferImageUsage);
5459  }
5460 }
5461 
5462 #endif
5463 
5464 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
5465 {
5466  VMA_ASSERT(IsUserDataString());
5467  if(m_pUserData != VMA_NULL)
5468  {
5469  char* const oldStr = (char*)m_pUserData;
5470  const size_t oldStrLen = strlen(oldStr);
5471  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
5472  m_pUserData = VMA_NULL;
5473  }
5474 }
5475 
5476 void VmaAllocation_T::BlockAllocMap()
5477 {
5478  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
5479 
5480  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
5481  {
5482  ++m_MapCount;
5483  }
5484  else
5485  {
5486  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
5487  }
5488 }
5489 
5490 void VmaAllocation_T::BlockAllocUnmap()
5491 {
5492  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
5493 
5494  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
5495  {
5496  --m_MapCount;
5497  }
5498  else
5499  {
5500  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
5501  }
5502 }
5503 
5504 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
5505 {
5506  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
5507 
5508  if(m_MapCount != 0)
5509  {
5510  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
5511  {
5512  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
5513  *ppData = m_DedicatedAllocation.m_pMappedData;
5514  ++m_MapCount;
5515  return VK_SUCCESS;
5516  }
5517  else
5518  {
5519  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
5520  return VK_ERROR_MEMORY_MAP_FAILED;
5521  }
5522  }
5523  else
5524  {
5525  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5526  hAllocator->m_hDevice,
5527  m_DedicatedAllocation.m_hMemory,
5528  0, // offset
5529  VK_WHOLE_SIZE,
5530  0, // flags
5531  ppData);
5532  if(result == VK_SUCCESS)
5533  {
5534  m_DedicatedAllocation.m_pMappedData = *ppData;
5535  m_MapCount = 1;
5536  }
5537  return result;
5538  }
5539 }
5540 
5541 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
5542 {
5543  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
5544 
5545  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
5546  {
5547  --m_MapCount;
5548  if(m_MapCount == 0)
5549  {
5550  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
5551  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
5552  hAllocator->m_hDevice,
5553  m_DedicatedAllocation.m_hMemory);
5554  }
5555  }
5556  else
5557  {
5558  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
5559  }
5560 }
5561 
5562 #if VMA_STATS_STRING_ENABLED
5563 
5564 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
5565 {
5566  json.BeginObject();
5567 
5568  json.WriteString("Blocks");
5569  json.WriteNumber(stat.blockCount);
5570 
5571  json.WriteString("Allocations");
5572  json.WriteNumber(stat.allocationCount);
5573 
5574  json.WriteString("UnusedRanges");
5575  json.WriteNumber(stat.unusedRangeCount);
5576 
5577  json.WriteString("UsedBytes");
5578  json.WriteNumber(stat.usedBytes);
5579 
5580  json.WriteString("UnusedBytes");
5581  json.WriteNumber(stat.unusedBytes);
5582 
5583  if(stat.allocationCount > 1)
5584  {
5585  json.WriteString("AllocationSize");
5586  json.BeginObject(true);
5587  json.WriteString("Min");
5588  json.WriteNumber(stat.allocationSizeMin);
5589  json.WriteString("Avg");
5590  json.WriteNumber(stat.allocationSizeAvg);
5591  json.WriteString("Max");
5592  json.WriteNumber(stat.allocationSizeMax);
5593  json.EndObject();
5594  }
5595 
5596  if(stat.unusedRangeCount > 1)
5597  {
5598  json.WriteString("UnusedRangeSize");
5599  json.BeginObject(true);
5600  json.WriteString("Min");
5601  json.WriteNumber(stat.unusedRangeSizeMin);
5602  json.WriteString("Avg");
5603  json.WriteNumber(stat.unusedRangeSizeAvg);
5604  json.WriteString("Max");
5605  json.WriteNumber(stat.unusedRangeSizeMax);
5606  json.EndObject();
5607  }
5608 
5609  json.EndObject();
5610 }
5611 
5612 #endif // #if VMA_STATS_STRING_ENABLED
5613 
5614 struct VmaSuballocationItemSizeLess
5615 {
5616  bool operator()(
5617  const VmaSuballocationList::iterator lhs,
5618  const VmaSuballocationList::iterator rhs) const
5619  {
5620  return lhs->size < rhs->size;
5621  }
5622  bool operator()(
5623  const VmaSuballocationList::iterator lhs,
5624  VkDeviceSize rhsSize) const
5625  {
5626  return lhs->size < rhsSize;
5627  }
5628 };
5629 
5631 // class VmaBlockMetadata
5632 
5633 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
5634  m_Size(0),
5635  m_FreeCount(0),
5636  m_SumFreeSize(0),
5637  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
5638  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
5639 {
5640 }
5641 
5642 VmaBlockMetadata::~VmaBlockMetadata()
5643 {
5644 }
5645 
5646 void VmaBlockMetadata::Init(VkDeviceSize size)
5647 {
5648  m_Size = size;
5649  m_FreeCount = 1;
5650  m_SumFreeSize = size;
5651 
5652  VmaSuballocation suballoc = {};
5653  suballoc.offset = 0;
5654  suballoc.size = size;
5655  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5656  suballoc.hAllocation = VK_NULL_HANDLE;
5657 
5658  m_Suballocations.push_back(suballoc);
5659  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
5660  --suballocItem;
5661  m_FreeSuballocationsBySize.push_back(suballocItem);
5662 }
5663 
5664 bool VmaBlockMetadata::Validate() const
5665 {
5666  if(m_Suballocations.empty())
5667  {
5668  return false;
5669  }
5670 
5671  // Expected offset of new suballocation as calculates from previous ones.
5672  VkDeviceSize calculatedOffset = 0;
5673  // Expected number of free suballocations as calculated from traversing their list.
5674  uint32_t calculatedFreeCount = 0;
5675  // Expected sum size of free suballocations as calculated from traversing their list.
5676  VkDeviceSize calculatedSumFreeSize = 0;
5677  // Expected number of free suballocations that should be registered in
5678  // m_FreeSuballocationsBySize calculated from traversing their list.
5679  size_t freeSuballocationsToRegister = 0;
5680  // True if previous visited suballocation was free.
5681  bool prevFree = false;
5682 
5683  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5684  suballocItem != m_Suballocations.cend();
5685  ++suballocItem)
5686  {
5687  const VmaSuballocation& subAlloc = *suballocItem;
5688 
5689  // Actual offset of this suballocation doesn't match expected one.
5690  if(subAlloc.offset != calculatedOffset)
5691  {
5692  return false;
5693  }
5694 
5695  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
5696  // Two adjacent free suballocations are invalid. They should be merged.
5697  if(prevFree && currFree)
5698  {
5699  return false;
5700  }
5701 
5702  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
5703  {
5704  return false;
5705  }
5706 
5707  if(currFree)
5708  {
5709  calculatedSumFreeSize += subAlloc.size;
5710  ++calculatedFreeCount;
5711  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5712  {
5713  ++freeSuballocationsToRegister;
5714  }
5715 
5716  // Margin required between allocations - every free space must be at least that large.
5717  if(subAlloc.size < VMA_DEBUG_MARGIN)
5718  {
5719  return false;
5720  }
5721  }
5722  else
5723  {
5724  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
5725  {
5726  return false;
5727  }
5728  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
5729  {
5730  return false;
5731  }
5732 
5733  // Margin required between allocations - previous allocation must be free.
5734  if(VMA_DEBUG_MARGIN > 0 && !prevFree)
5735  {
5736  return false;
5737  }
5738  }
5739 
5740  calculatedOffset += subAlloc.size;
5741  prevFree = currFree;
5742  }
5743 
5744  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
5745  // match expected one.
5746  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
5747  {
5748  return false;
5749  }
5750 
5751  VkDeviceSize lastSize = 0;
5752  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
5753  {
5754  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
5755 
5756  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
5757  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5758  {
5759  return false;
5760  }
5761  // They must be sorted by size ascending.
5762  if(suballocItem->size < lastSize)
5763  {
5764  return false;
5765  }
5766 
5767  lastSize = suballocItem->size;
5768  }
5769 
5770  // Check if totals match calculacted values.
5771  if(!ValidateFreeSuballocationList() ||
5772  (calculatedOffset != m_Size) ||
5773  (calculatedSumFreeSize != m_SumFreeSize) ||
5774  (calculatedFreeCount != m_FreeCount))
5775  {
5776  return false;
5777  }
5778 
5779  return true;
5780 }
5781 
5782 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
5783 {
5784  if(!m_FreeSuballocationsBySize.empty())
5785  {
5786  return m_FreeSuballocationsBySize.back()->size;
5787  }
5788  else
5789  {
5790  return 0;
5791  }
5792 }
5793 
5794 bool VmaBlockMetadata::IsEmpty() const
5795 {
5796  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
5797 }
5798 
5799 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
5800 {
5801  outInfo.blockCount = 1;
5802 
5803  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5804  outInfo.allocationCount = rangeCount - m_FreeCount;
5805  outInfo.unusedRangeCount = m_FreeCount;
5806 
5807  outInfo.unusedBytes = m_SumFreeSize;
5808  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
5809 
5810  outInfo.allocationSizeMin = UINT64_MAX;
5811  outInfo.allocationSizeMax = 0;
5812  outInfo.unusedRangeSizeMin = UINT64_MAX;
5813  outInfo.unusedRangeSizeMax = 0;
5814 
5815  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5816  suballocItem != m_Suballocations.cend();
5817  ++suballocItem)
5818  {
5819  const VmaSuballocation& suballoc = *suballocItem;
5820  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5821  {
5822  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
5823  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
5824  }
5825  else
5826  {
5827  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
5828  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
5829  }
5830  }
5831 }
5832 
5833 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
5834 {
5835  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5836 
5837  inoutStats.size += m_Size;
5838  inoutStats.unusedSize += m_SumFreeSize;
5839  inoutStats.allocationCount += rangeCount - m_FreeCount;
5840  inoutStats.unusedRangeCount += m_FreeCount;
5841  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
5842 }
5843 
5844 #if VMA_STATS_STRING_ENABLED
5845 
5846 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
5847 {
5848  json.BeginObject();
5849 
5850  json.WriteString("TotalBytes");
5851  json.WriteNumber(m_Size);
5852 
5853  json.WriteString("UnusedBytes");
5854  json.WriteNumber(m_SumFreeSize);
5855 
5856  json.WriteString("Allocations");
5857  json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);
5858 
5859  json.WriteString("UnusedRanges");
5860  json.WriteNumber(m_FreeCount);
5861 
5862  json.WriteString("Suballocations");
5863  json.BeginArray();
5864  size_t i = 0;
5865  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5866  suballocItem != m_Suballocations.cend();
5867  ++suballocItem, ++i)
5868  {
5869  json.BeginObject(true);
5870 
5871  json.WriteString("Offset");
5872  json.WriteNumber(suballocItem->offset);
5873 
5874  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5875  {
5876  json.WriteString("Type");
5877  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
5878 
5879  json.WriteString("Size");
5880  json.WriteNumber(suballocItem->size);
5881  }
5882  else
5883  {
5884  suballocItem->hAllocation->PrintParameters(json);
5885  }
5886 
5887  json.EndObject();
5888  }
5889  json.EndArray();
5890 
5891  json.EndObject();
5892 }
5893 
5894 #endif // #if VMA_STATS_STRING_ENABLED
5895 
5896 /*
5897 How many suitable free suballocations to analyze before choosing best one.
5898 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
5899  be chosen.
5900 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
5901  suballocations will be analized and best one will be chosen.
5902 - Any other value is also acceptable.
5903 */
5904 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
5905 
5906 bool VmaBlockMetadata::CreateAllocationRequest(
5907  uint32_t currentFrameIndex,
5908  uint32_t frameInUseCount,
5909  VkDeviceSize bufferImageGranularity,
5910  VkDeviceSize allocSize,
5911  VkDeviceSize allocAlignment,
5912  VmaSuballocationType allocType,
5913  bool canMakeOtherLost,
5914  VmaAllocationRequest* pAllocationRequest)
5915 {
5916  VMA_ASSERT(allocSize > 0);
5917  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5918  VMA_ASSERT(pAllocationRequest != VMA_NULL);
5919  VMA_HEAVY_ASSERT(Validate());
5920 
5921  // There is not enough total free space in this block to fullfill the request: Early return.
5922  if(canMakeOtherLost == false && m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
5923  {
5924  return false;
5925  }
5926 
5927  // New algorithm, efficiently searching freeSuballocationsBySize.
5928  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
5929  if(freeSuballocCount > 0)
5930  {
5931  if(VMA_BEST_FIT)
5932  {
5933  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
5934  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5935  m_FreeSuballocationsBySize.data(),
5936  m_FreeSuballocationsBySize.data() + freeSuballocCount,
5937  allocSize + 2 * VMA_DEBUG_MARGIN,
5938  VmaSuballocationItemSizeLess());
5939  size_t index = it - m_FreeSuballocationsBySize.data();
5940  for(; index < freeSuballocCount; ++index)
5941  {
5942  if(CheckAllocation(
5943  currentFrameIndex,
5944  frameInUseCount,
5945  bufferImageGranularity,
5946  allocSize,
5947  allocAlignment,
5948  allocType,
5949  m_FreeSuballocationsBySize[index],
5950  false, // canMakeOtherLost
5951  &pAllocationRequest->offset,
5952  &pAllocationRequest->itemsToMakeLostCount,
5953  &pAllocationRequest->sumFreeSize,
5954  &pAllocationRequest->sumItemSize))
5955  {
5956  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5957  return true;
5958  }
5959  }
5960  }
5961  else
5962  {
5963  // Search staring from biggest suballocations.
5964  for(size_t index = freeSuballocCount; index--; )
5965  {
5966  if(CheckAllocation(
5967  currentFrameIndex,
5968  frameInUseCount,
5969  bufferImageGranularity,
5970  allocSize,
5971  allocAlignment,
5972  allocType,
5973  m_FreeSuballocationsBySize[index],
5974  false, // canMakeOtherLost
5975  &pAllocationRequest->offset,
5976  &pAllocationRequest->itemsToMakeLostCount,
5977  &pAllocationRequest->sumFreeSize,
5978  &pAllocationRequest->sumItemSize))
5979  {
5980  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5981  return true;
5982  }
5983  }
5984  }
5985  }
5986 
5987  if(canMakeOtherLost)
5988  {
5989  // Brute-force algorithm. TODO: Come up with something better.
5990 
5991  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
5992  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
5993 
5994  VmaAllocationRequest tmpAllocRequest = {};
5995  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
5996  suballocIt != m_Suballocations.end();
5997  ++suballocIt)
5998  {
5999  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
6000  suballocIt->hAllocation->CanBecomeLost())
6001  {
6002  if(CheckAllocation(
6003  currentFrameIndex,
6004  frameInUseCount,
6005  bufferImageGranularity,
6006  allocSize,
6007  allocAlignment,
6008  allocType,
6009  suballocIt,
6010  canMakeOtherLost,
6011  &tmpAllocRequest.offset,
6012  &tmpAllocRequest.itemsToMakeLostCount,
6013  &tmpAllocRequest.sumFreeSize,
6014  &tmpAllocRequest.sumItemSize))
6015  {
6016  tmpAllocRequest.item = suballocIt;
6017 
6018  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
6019  {
6020  *pAllocationRequest = tmpAllocRequest;
6021  }
6022  }
6023  }
6024  }
6025 
6026  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
6027  {
6028  return true;
6029  }
6030  }
6031 
6032  return false;
6033 }
6034 
6035 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
6036  uint32_t currentFrameIndex,
6037  uint32_t frameInUseCount,
6038  VmaAllocationRequest* pAllocationRequest)
6039 {
6040  while(pAllocationRequest->itemsToMakeLostCount > 0)
6041  {
6042  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
6043  {
6044  ++pAllocationRequest->item;
6045  }
6046  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
6047  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
6048  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
6049  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
6050  {
6051  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
6052  --pAllocationRequest->itemsToMakeLostCount;
6053  }
6054  else
6055  {
6056  return false;
6057  }
6058  }
6059 
6060  VMA_HEAVY_ASSERT(Validate());
6061  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
6062  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
6063 
6064  return true;
6065 }
6066 
6067 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
6068 {
6069  uint32_t lostAllocationCount = 0;
6070  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
6071  it != m_Suballocations.end();
6072  ++it)
6073  {
6074  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
6075  it->hAllocation->CanBecomeLost() &&
6076  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
6077  {
6078  it = FreeSuballocation(it);
6079  ++lostAllocationCount;
6080  }
6081  }
6082  return lostAllocationCount;
6083 }
6084 
6085 VkResult VmaBlockMetadata::CheckCorruption(const void* pBlockData)
6086 {
6087  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
6088  it != m_Suballocations.end();
6089  ++it)
6090  {
6091  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
6092  {
6093  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
6094  {
6095  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
6096  return VK_ERROR_VALIDATION_FAILED_EXT;
6097  }
6098  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
6099  {
6100  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
6101  return VK_ERROR_VALIDATION_FAILED_EXT;
6102  }
6103  }
6104  }
6105 
6106  return VK_SUCCESS;
6107 }
6108 
6109 void VmaBlockMetadata::Alloc(
6110  const VmaAllocationRequest& request,
6111  VmaSuballocationType type,
6112  VkDeviceSize allocSize,
6113  VmaAllocation hAllocation)
6114 {
6115  VMA_ASSERT(request.item != m_Suballocations.end());
6116  VmaSuballocation& suballoc = *request.item;
6117  // Given suballocation is a free block.
6118  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
6119  // Given offset is inside this suballocation.
6120  VMA_ASSERT(request.offset >= suballoc.offset);
6121  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
6122  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
6123  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
6124 
6125  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
6126  // it to become used.
6127  UnregisterFreeSuballocation(request.item);
6128 
6129  suballoc.offset = request.offset;
6130  suballoc.size = allocSize;
6131  suballoc.type = type;
6132  suballoc.hAllocation = hAllocation;
6133 
6134  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
6135  if(paddingEnd)
6136  {
6137  VmaSuballocation paddingSuballoc = {};
6138  paddingSuballoc.offset = request.offset + allocSize;
6139  paddingSuballoc.size = paddingEnd;
6140  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6141  VmaSuballocationList::iterator next = request.item;
6142  ++next;
6143  const VmaSuballocationList::iterator paddingEndItem =
6144  m_Suballocations.insert(next, paddingSuballoc);
6145  RegisterFreeSuballocation(paddingEndItem);
6146  }
6147 
6148  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
6149  if(paddingBegin)
6150  {
6151  VmaSuballocation paddingSuballoc = {};
6152  paddingSuballoc.offset = request.offset - paddingBegin;
6153  paddingSuballoc.size = paddingBegin;
6154  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6155  const VmaSuballocationList::iterator paddingBeginItem =
6156  m_Suballocations.insert(request.item, paddingSuballoc);
6157  RegisterFreeSuballocation(paddingBeginItem);
6158  }
6159 
6160  // Update totals.
6161  m_FreeCount = m_FreeCount - 1;
6162  if(paddingBegin > 0)
6163  {
6164  ++m_FreeCount;
6165  }
6166  if(paddingEnd > 0)
6167  {
6168  ++m_FreeCount;
6169  }
6170  m_SumFreeSize -= allocSize;
6171 }
6172 
6173 void VmaBlockMetadata::Free(const VmaAllocation allocation)
6174 {
6175  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
6176  suballocItem != m_Suballocations.end();
6177  ++suballocItem)
6178  {
6179  VmaSuballocation& suballoc = *suballocItem;
6180  if(suballoc.hAllocation == allocation)
6181  {
6182  FreeSuballocation(suballocItem);
6183  VMA_HEAVY_ASSERT(Validate());
6184  return;
6185  }
6186  }
6187  VMA_ASSERT(0 && "Not found!");
6188 }
6189 
6190 void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)
6191 {
6192  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
6193  suballocItem != m_Suballocations.end();
6194  ++suballocItem)
6195  {
6196  VmaSuballocation& suballoc = *suballocItem;
6197  if(suballoc.offset == offset)
6198  {
6199  FreeSuballocation(suballocItem);
6200  return;
6201  }
6202  }
6203  VMA_ASSERT(0 && "Not found!");
6204 }
6205 
6206 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
6207 {
6208  VkDeviceSize lastSize = 0;
6209  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
6210  {
6211  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
6212 
6213  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
6214  {
6215  VMA_ASSERT(0);
6216  return false;
6217  }
6218  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6219  {
6220  VMA_ASSERT(0);
6221  return false;
6222  }
6223  if(it->size < lastSize)
6224  {
6225  VMA_ASSERT(0);
6226  return false;
6227  }
6228 
6229  lastSize = it->size;
6230  }
6231  return true;
6232 }
6233 
6234 bool VmaBlockMetadata::CheckAllocation(
6235  uint32_t currentFrameIndex,
6236  uint32_t frameInUseCount,
6237  VkDeviceSize bufferImageGranularity,
6238  VkDeviceSize allocSize,
6239  VkDeviceSize allocAlignment,
6240  VmaSuballocationType allocType,
6241  VmaSuballocationList::const_iterator suballocItem,
6242  bool canMakeOtherLost,
6243  VkDeviceSize* pOffset,
6244  size_t* itemsToMakeLostCount,
6245  VkDeviceSize* pSumFreeSize,
6246  VkDeviceSize* pSumItemSize) const
6247 {
6248  VMA_ASSERT(allocSize > 0);
6249  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6250  VMA_ASSERT(suballocItem != m_Suballocations.cend());
6251  VMA_ASSERT(pOffset != VMA_NULL);
6252 
6253  *itemsToMakeLostCount = 0;
6254  *pSumFreeSize = 0;
6255  *pSumItemSize = 0;
6256 
6257  if(canMakeOtherLost)
6258  {
6259  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6260  {
6261  *pSumFreeSize = suballocItem->size;
6262  }
6263  else
6264  {
6265  if(suballocItem->hAllocation->CanBecomeLost() &&
6266  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
6267  {
6268  ++*itemsToMakeLostCount;
6269  *pSumItemSize = suballocItem->size;
6270  }
6271  else
6272  {
6273  return false;
6274  }
6275  }
6276 
6277  // Remaining size is too small for this request: Early return.
6278  if(m_Size - suballocItem->offset < allocSize)
6279  {
6280  return false;
6281  }
6282 
6283  // Start from offset equal to beginning of this suballocation.
6284  *pOffset = suballocItem->offset;
6285 
6286  // Apply VMA_DEBUG_MARGIN at the beginning.
6287  if(VMA_DEBUG_MARGIN > 0)
6288  {
6289  *pOffset += VMA_DEBUG_MARGIN;
6290  }
6291 
6292  // Apply alignment.
6293  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
6294 
6295  // Check previous suballocations for BufferImageGranularity conflicts.
6296  // Make bigger alignment if necessary.
6297  if(bufferImageGranularity > 1)
6298  {
6299  bool bufferImageGranularityConflict = false;
6300  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
6301  while(prevSuballocItem != m_Suballocations.cbegin())
6302  {
6303  --prevSuballocItem;
6304  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
6305  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
6306  {
6307  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
6308  {
6309  bufferImageGranularityConflict = true;
6310  break;
6311  }
6312  }
6313  else
6314  // Already on previous page.
6315  break;
6316  }
6317  if(bufferImageGranularityConflict)
6318  {
6319  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
6320  }
6321  }
6322 
6323  // Now that we have final *pOffset, check if we are past suballocItem.
6324  // If yes, return false - this function should be called for another suballocItem as starting point.
6325  if(*pOffset >= suballocItem->offset + suballocItem->size)
6326  {
6327  return false;
6328  }
6329 
6330  // Calculate padding at the beginning based on current offset.
6331  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
6332 
6333  // Calculate required margin at the end.
6334  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
6335 
6336  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
6337  // Another early return check.
6338  if(suballocItem->offset + totalSize > m_Size)
6339  {
6340  return false;
6341  }
6342 
6343  // Advance lastSuballocItem until desired size is reached.
6344  // Update itemsToMakeLostCount.
6345  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
6346  if(totalSize > suballocItem->size)
6347  {
6348  VkDeviceSize remainingSize = totalSize - suballocItem->size;
6349  while(remainingSize > 0)
6350  {
6351  ++lastSuballocItem;
6352  if(lastSuballocItem == m_Suballocations.cend())
6353  {
6354  return false;
6355  }
6356  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6357  {
6358  *pSumFreeSize += lastSuballocItem->size;
6359  }
6360  else
6361  {
6362  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
6363  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
6364  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
6365  {
6366  ++*itemsToMakeLostCount;
6367  *pSumItemSize += lastSuballocItem->size;
6368  }
6369  else
6370  {
6371  return false;
6372  }
6373  }
6374  remainingSize = (lastSuballocItem->size < remainingSize) ?
6375  remainingSize - lastSuballocItem->size : 0;
6376  }
6377  }
6378 
6379  // Check next suballocations for BufferImageGranularity conflicts.
6380  // If conflict exists, we must mark more allocations lost or fail.
6381  if(bufferImageGranularity > 1)
6382  {
6383  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
6384  ++nextSuballocItem;
6385  while(nextSuballocItem != m_Suballocations.cend())
6386  {
6387  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
6388  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
6389  {
6390  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
6391  {
6392  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
6393  if(nextSuballoc.hAllocation->CanBecomeLost() &&
6394  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
6395  {
6396  ++*itemsToMakeLostCount;
6397  }
6398  else
6399  {
6400  return false;
6401  }
6402  }
6403  }
6404  else
6405  {
6406  // Already on next page.
6407  break;
6408  }
6409  ++nextSuballocItem;
6410  }
6411  }
6412  }
6413  else
6414  {
6415  const VmaSuballocation& suballoc = *suballocItem;
6416  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
6417 
6418  *pSumFreeSize = suballoc.size;
6419 
6420  // Size of this suballocation is too small for this request: Early return.
6421  if(suballoc.size < allocSize)
6422  {
6423  return false;
6424  }
6425 
6426  // Start from offset equal to beginning of this suballocation.
6427  *pOffset = suballoc.offset;
6428 
6429  // Apply VMA_DEBUG_MARGIN at the beginning.
6430  if(VMA_DEBUG_MARGIN > 0)
6431  {
6432  *pOffset += VMA_DEBUG_MARGIN;
6433  }
6434 
6435  // Apply alignment.
6436  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
6437 
6438  // Check previous suballocations for BufferImageGranularity conflicts.
6439  // Make bigger alignment if necessary.
6440  if(bufferImageGranularity > 1)
6441  {
6442  bool bufferImageGranularityConflict = false;
6443  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
6444  while(prevSuballocItem != m_Suballocations.cbegin())
6445  {
6446  --prevSuballocItem;
6447  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
6448  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
6449  {
6450  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
6451  {
6452  bufferImageGranularityConflict = true;
6453  break;
6454  }
6455  }
6456  else
6457  // Already on previous page.
6458  break;
6459  }
6460  if(bufferImageGranularityConflict)
6461  {
6462  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
6463  }
6464  }
6465 
6466  // Calculate padding at the beginning based on current offset.
6467  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
6468 
6469  // Calculate required margin at the end.
6470  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
6471 
6472  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
6473  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
6474  {
6475  return false;
6476  }
6477 
6478  // Check next suballocations for BufferImageGranularity conflicts.
6479  // If conflict exists, allocation cannot be made here.
6480  if(bufferImageGranularity > 1)
6481  {
6482  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
6483  ++nextSuballocItem;
6484  while(nextSuballocItem != m_Suballocations.cend())
6485  {
6486  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
6487  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
6488  {
6489  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
6490  {
6491  return false;
6492  }
6493  }
6494  else
6495  {
6496  // Already on next page.
6497  break;
6498  }
6499  ++nextSuballocItem;
6500  }
6501  }
6502  }
6503 
6504  // All tests passed: Success. pOffset is already filled.
6505  return true;
6506 }
6507 
6508 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
6509 {
6510  VMA_ASSERT(item != m_Suballocations.end());
6511  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6512 
6513  VmaSuballocationList::iterator nextItem = item;
6514  ++nextItem;
6515  VMA_ASSERT(nextItem != m_Suballocations.end());
6516  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
6517 
6518  item->size += nextItem->size;
6519  --m_FreeCount;
6520  m_Suballocations.erase(nextItem);
6521 }
6522 
6523 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
6524 {
6525  // Change this suballocation to be marked as free.
6526  VmaSuballocation& suballoc = *suballocItem;
6527  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6528  suballoc.hAllocation = VK_NULL_HANDLE;
6529 
6530  // Update totals.
6531  ++m_FreeCount;
6532  m_SumFreeSize += suballoc.size;
6533 
6534  // Merge with previous and/or next suballocation if it's also free.
6535  bool mergeWithNext = false;
6536  bool mergeWithPrev = false;
6537 
6538  VmaSuballocationList::iterator nextItem = suballocItem;
6539  ++nextItem;
6540  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
6541  {
6542  mergeWithNext = true;
6543  }
6544 
6545  VmaSuballocationList::iterator prevItem = suballocItem;
6546  if(suballocItem != m_Suballocations.begin())
6547  {
6548  --prevItem;
6549  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6550  {
6551  mergeWithPrev = true;
6552  }
6553  }
6554 
6555  if(mergeWithNext)
6556  {
6557  UnregisterFreeSuballocation(nextItem);
6558  MergeFreeWithNext(suballocItem);
6559  }
6560 
6561  if(mergeWithPrev)
6562  {
6563  UnregisterFreeSuballocation(prevItem);
6564  MergeFreeWithNext(prevItem);
6565  RegisterFreeSuballocation(prevItem);
6566  return prevItem;
6567  }
6568  else
6569  {
6570  RegisterFreeSuballocation(suballocItem);
6571  return suballocItem;
6572  }
6573 }
6574 
6575 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
6576 {
6577  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6578  VMA_ASSERT(item->size > 0);
6579 
6580  // You may want to enable this validation at the beginning or at the end of
6581  // this function, depending on what do you want to check.
6582  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6583 
6584  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6585  {
6586  if(m_FreeSuballocationsBySize.empty())
6587  {
6588  m_FreeSuballocationsBySize.push_back(item);
6589  }
6590  else
6591  {
6592  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
6593  }
6594  }
6595 
6596  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6597 }
6598 
6599 
6600 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
6601 {
6602  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6603  VMA_ASSERT(item->size > 0);
6604 
6605  // You may want to enable this validation at the beginning or at the end of
6606  // this function, depending on what do you want to check.
6607  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6608 
6609  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6610  {
6611  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6612  m_FreeSuballocationsBySize.data(),
6613  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
6614  item,
6615  VmaSuballocationItemSizeLess());
6616  for(size_t index = it - m_FreeSuballocationsBySize.data();
6617  index < m_FreeSuballocationsBySize.size();
6618  ++index)
6619  {
6620  if(m_FreeSuballocationsBySize[index] == item)
6621  {
6622  VmaVectorRemove(m_FreeSuballocationsBySize, index);
6623  return;
6624  }
6625  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
6626  }
6627  VMA_ASSERT(0 && "Not found.");
6628  }
6629 
6630  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6631 }
6632 
6634 // class VmaDeviceMemoryBlock
6635 
6636 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
6637  m_Metadata(hAllocator),
6638  m_MemoryTypeIndex(UINT32_MAX),
6639  m_Id(0),
6640  m_hMemory(VK_NULL_HANDLE),
6641  m_MapCount(0),
6642  m_pMappedData(VMA_NULL)
6643 {
6644 }
6645 
6646 void VmaDeviceMemoryBlock::Init(
6647  uint32_t newMemoryTypeIndex,
6648  VkDeviceMemory newMemory,
6649  VkDeviceSize newSize,
6650  uint32_t id)
6651 {
6652  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6653 
6654  m_MemoryTypeIndex = newMemoryTypeIndex;
6655  m_Id = id;
6656  m_hMemory = newMemory;
6657 
6658  m_Metadata.Init(newSize);
6659 }
6660 
6661 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
6662 {
6663  // This is the most important assert in the entire library.
6664  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
6665  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
6666 
6667  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
6668  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
6669  m_hMemory = VK_NULL_HANDLE;
6670 }
6671 
6672 bool VmaDeviceMemoryBlock::Validate() const
6673 {
6674  if((m_hMemory == VK_NULL_HANDLE) ||
6675  (m_Metadata.GetSize() == 0))
6676  {
6677  return false;
6678  }
6679 
6680  return m_Metadata.Validate();
6681 }
6682 
6683 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
6684 {
6685  void* pData = nullptr;
6686  VkResult res = Map(hAllocator, 1, &pData);
6687  if(res != VK_SUCCESS)
6688  {
6689  return res;
6690  }
6691 
6692  res = m_Metadata.CheckCorruption(pData);
6693 
6694  Unmap(hAllocator, 1);
6695 
6696  return res;
6697 }
6698 
6699 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
6700 {
6701  if(count == 0)
6702  {
6703  return VK_SUCCESS;
6704  }
6705 
6706  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6707  if(m_MapCount != 0)
6708  {
6709  m_MapCount += count;
6710  VMA_ASSERT(m_pMappedData != VMA_NULL);
6711  if(ppData != VMA_NULL)
6712  {
6713  *ppData = m_pMappedData;
6714  }
6715  return VK_SUCCESS;
6716  }
6717  else
6718  {
6719  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6720  hAllocator->m_hDevice,
6721  m_hMemory,
6722  0, // offset
6723  VK_WHOLE_SIZE,
6724  0, // flags
6725  &m_pMappedData);
6726  if(result == VK_SUCCESS)
6727  {
6728  if(ppData != VMA_NULL)
6729  {
6730  *ppData = m_pMappedData;
6731  }
6732  m_MapCount = count;
6733  }
6734  return result;
6735  }
6736 }
6737 
6738 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
6739 {
6740  if(count == 0)
6741  {
6742  return;
6743  }
6744 
6745  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6746  if(m_MapCount >= count)
6747  {
6748  m_MapCount -= count;
6749  if(m_MapCount == 0)
6750  {
6751  m_pMappedData = VMA_NULL;
6752  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
6753  }
6754  }
6755  else
6756  {
6757  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
6758  }
6759 }
6760 
6761 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
6762 {
6763  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
6764  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
6765 
6766  void* pData;
6767  VkResult res = Map(hAllocator, 1, &pData);
6768  if(res != VK_SUCCESS)
6769  {
6770  return res;
6771  }
6772 
6773  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
6774  VmaWriteMagicValue(pData, allocOffset + allocSize);
6775 
6776  Unmap(hAllocator, 1);
6777 
6778  return VK_SUCCESS;
6779 }
6780 
6781 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
6782 {
6783  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
6784  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
6785 
6786  void* pData;
6787  VkResult res = Map(hAllocator, 1, &pData);
6788  if(res != VK_SUCCESS)
6789  {
6790  return res;
6791  }
6792 
6793  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
6794  {
6795  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
6796  }
6797  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
6798  {
6799  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
6800  }
6801 
6802  Unmap(hAllocator, 1);
6803 
6804  return VK_SUCCESS;
6805 }
6806 
6807 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
6808  const VmaAllocator hAllocator,
6809  const VmaAllocation hAllocation,
6810  VkBuffer hBuffer)
6811 {
6812  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
6813  hAllocation->GetBlock() == this);
6814  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
6815  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6816  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
6817  hAllocator->m_hDevice,
6818  hBuffer,
6819  m_hMemory,
6820  hAllocation->GetOffset());
6821 }
6822 
6823 VkResult VmaDeviceMemoryBlock::BindImageMemory(
6824  const VmaAllocator hAllocator,
6825  const VmaAllocation hAllocation,
6826  VkImage hImage)
6827 {
6828  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
6829  hAllocation->GetBlock() == this);
6830  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
6831  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6832  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
6833  hAllocator->m_hDevice,
6834  hImage,
6835  m_hMemory,
6836  hAllocation->GetOffset());
6837 }
6838 
6839 static void InitStatInfo(VmaStatInfo& outInfo)
6840 {
6841  memset(&outInfo, 0, sizeof(outInfo));
6842  outInfo.allocationSizeMin = UINT64_MAX;
6843  outInfo.unusedRangeSizeMin = UINT64_MAX;
6844 }
6845 
6846 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
6847 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
6848 {
6849  inoutInfo.blockCount += srcInfo.blockCount;
6850  inoutInfo.allocationCount += srcInfo.allocationCount;
6851  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
6852  inoutInfo.usedBytes += srcInfo.usedBytes;
6853  inoutInfo.unusedBytes += srcInfo.unusedBytes;
6854  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
6855  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
6856  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
6857  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
6858 }
6859 
6860 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
6861 {
6862  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
6863  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
6864  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
6865  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
6866 }
6867 
6868 VmaPool_T::VmaPool_T(
6869  VmaAllocator hAllocator,
6870  const VmaPoolCreateInfo& createInfo) :
6871  m_BlockVector(
6872  hAllocator,
6873  createInfo.memoryTypeIndex,
6874  createInfo.blockSize,
6875  createInfo.minBlockCount,
6876  createInfo.maxBlockCount,
6877  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
6878  createInfo.frameInUseCount,
6879  true), // isCustomPool
6880  m_Id(0)
6881 {
6882 }
6883 
6884 VmaPool_T::~VmaPool_T()
6885 {
6886 }
6887 
6888 #if VMA_STATS_STRING_ENABLED
6889 
6890 #endif // #if VMA_STATS_STRING_ENABLED
6891 
6892 VmaBlockVector::VmaBlockVector(
6893  VmaAllocator hAllocator,
6894  uint32_t memoryTypeIndex,
6895  VkDeviceSize preferredBlockSize,
6896  size_t minBlockCount,
6897  size_t maxBlockCount,
6898  VkDeviceSize bufferImageGranularity,
6899  uint32_t frameInUseCount,
6900  bool isCustomPool) :
6901  m_hAllocator(hAllocator),
6902  m_MemoryTypeIndex(memoryTypeIndex),
6903  m_PreferredBlockSize(preferredBlockSize),
6904  m_MinBlockCount(minBlockCount),
6905  m_MaxBlockCount(maxBlockCount),
6906  m_BufferImageGranularity(bufferImageGranularity),
6907  m_FrameInUseCount(frameInUseCount),
6908  m_IsCustomPool(isCustomPool),
6909  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
6910  m_HasEmptyBlock(false),
6911  m_pDefragmentator(VMA_NULL),
6912  m_NextBlockId(0)
6913 {
6914 }
6915 
6916 VmaBlockVector::~VmaBlockVector()
6917 {
6918  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
6919 
6920  for(size_t i = m_Blocks.size(); i--; )
6921  {
6922  m_Blocks[i]->Destroy(m_hAllocator);
6923  vma_delete(m_hAllocator, m_Blocks[i]);
6924  }
6925 }
6926 
6927 VkResult VmaBlockVector::CreateMinBlocks()
6928 {
6929  for(size_t i = 0; i < m_MinBlockCount; ++i)
6930  {
6931  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
6932  if(res != VK_SUCCESS)
6933  {
6934  return res;
6935  }
6936  }
6937  return VK_SUCCESS;
6938 }
6939 
6940 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
6941 {
6942  pStats->size = 0;
6943  pStats->unusedSize = 0;
6944  pStats->allocationCount = 0;
6945  pStats->unusedRangeCount = 0;
6946  pStats->unusedRangeSizeMax = 0;
6947 
6948  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6949 
6950  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6951  {
6952  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6953  VMA_ASSERT(pBlock);
6954  VMA_HEAVY_ASSERT(pBlock->Validate());
6955  pBlock->m_Metadata.AddPoolStats(*pStats);
6956  }
6957 }
6958 
6959 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
6960 {
6961  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
6962  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
6963  (VMA_DEBUG_MARGIN > 0) &&
6964  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
6965 }
6966 
6967 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
6968 
6969 VkResult VmaBlockVector::Allocate(
6970  VmaPool hCurrentPool,
6971  uint32_t currentFrameIndex,
6972  VkDeviceSize size,
6973  VkDeviceSize alignment,
6974  const VmaAllocationCreateInfo& createInfo,
6975  VmaSuballocationType suballocType,
6976  VmaAllocation* pAllocation)
6977 {
6978  // Early reject: requested allocation size is larger that maximum block size for this block vector.
6979  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
6980  {
6981  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6982  }
6983 
6984  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
6985  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
6986 
6987  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6988 
6989  // 1. Search existing allocations. Try to allocate without making other allocations lost.
6990  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6991  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6992  {
6993  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6994  VMA_ASSERT(pCurrBlock);
6995  VmaAllocationRequest currRequest = {};
6996  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6997  currentFrameIndex,
6998  m_FrameInUseCount,
6999  m_BufferImageGranularity,
7000  size,
7001  alignment,
7002  suballocType,
7003  false, // canMakeOtherLost
7004  &currRequest))
7005  {
7006  // Allocate from pCurrBlock.
7007  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
7008 
7009  if(mapped)
7010  {
7011  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
7012  if(res != VK_SUCCESS)
7013  {
7014  return res;
7015  }
7016  }
7017 
7018  // We no longer have an empty Allocation.
7019  if(pCurrBlock->m_Metadata.IsEmpty())
7020  {
7021  m_HasEmptyBlock = false;
7022  }
7023 
7024  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
7025  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, size, *pAllocation);
7026  (*pAllocation)->InitBlockAllocation(
7027  hCurrentPool,
7028  pCurrBlock,
7029  currRequest.offset,
7030  alignment,
7031  size,
7032  suballocType,
7033  mapped,
7034  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
7035  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
7036  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
7037  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
7038  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
7039  {
7040  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
7041  }
7042  if(IsCorruptionDetectionEnabled())
7043  {
7044  VkResult res = pCurrBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
7045  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
7046  }
7047  return VK_SUCCESS;
7048  }
7049  }
7050 
7051  const bool canCreateNewBlock =
7052  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
7053  (m_Blocks.size() < m_MaxBlockCount);
7054 
7055  // 2. Try to create new block.
7056  if(canCreateNewBlock)
7057  {
7058  // Calculate optimal size for new block.
7059  VkDeviceSize newBlockSize = m_PreferredBlockSize;
7060  uint32_t newBlockSizeShift = 0;
7061  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
7062 
7063  // Allocating blocks of other sizes is allowed only in default pools.
7064  // In custom pools block size is fixed.
7065  if(m_IsCustomPool == false)
7066  {
7067  // Allocate 1/8, 1/4, 1/2 as first blocks.
7068  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
7069  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
7070  {
7071  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
7072  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
7073  {
7074  newBlockSize = smallerNewBlockSize;
7075  ++newBlockSizeShift;
7076  }
7077  else
7078  {
7079  break;
7080  }
7081  }
7082  }
7083 
7084  size_t newBlockIndex = 0;
7085  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
7086  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
7087  if(m_IsCustomPool == false)
7088  {
7089  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
7090  {
7091  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
7092  if(smallerNewBlockSize >= size)
7093  {
7094  newBlockSize = smallerNewBlockSize;
7095  ++newBlockSizeShift;
7096  res = CreateBlock(newBlockSize, &newBlockIndex);
7097  }
7098  else
7099  {
7100  break;
7101  }
7102  }
7103  }
7104 
7105  if(res == VK_SUCCESS)
7106  {
7107  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
7108  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= size);
7109 
7110  if(mapped)
7111  {
7112  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
7113  if(res != VK_SUCCESS)
7114  {
7115  return res;
7116  }
7117  }
7118 
7119  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
7120  VmaAllocationRequest allocRequest;
7121  if(pBlock->m_Metadata.CreateAllocationRequest(
7122  currentFrameIndex,
7123  m_FrameInUseCount,
7124  m_BufferImageGranularity,
7125  size,
7126  alignment,
7127  suballocType,
7128  false, // canMakeOtherLost
7129  &allocRequest))
7130  {
7131  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
7132  pBlock->m_Metadata.Alloc(allocRequest, suballocType, size, *pAllocation);
7133  (*pAllocation)->InitBlockAllocation(
7134  hCurrentPool,
7135  pBlock,
7136  allocRequest.offset,
7137  alignment,
7138  size,
7139  suballocType,
7140  mapped,
7141  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
7142  VMA_HEAVY_ASSERT(pBlock->Validate());
7143  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
7144  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
7145  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
7146  {
7147  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
7148  }
7149  if(IsCorruptionDetectionEnabled())
7150  {
7151  res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, allocRequest.offset, size);
7152  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
7153  }
7154  return VK_SUCCESS;
7155  }
7156  else
7157  {
7158  // Allocation from empty block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
7159  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7160  }
7161  }
7162  }
7163 
7164  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
7165 
7166  // 3. Try to allocate from existing blocks with making other allocations lost.
7167  if(canMakeOtherLost)
7168  {
7169  uint32_t tryIndex = 0;
7170  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
7171  {
7172  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
7173  VmaAllocationRequest bestRequest = {};
7174  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
7175 
7176  // 1. Search existing allocations.
7177  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
7178  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
7179  {
7180  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
7181  VMA_ASSERT(pCurrBlock);
7182  VmaAllocationRequest currRequest = {};
7183  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
7184  currentFrameIndex,
7185  m_FrameInUseCount,
7186  m_BufferImageGranularity,
7187  size,
7188  alignment,
7189  suballocType,
7190  canMakeOtherLost,
7191  &currRequest))
7192  {
7193  const VkDeviceSize currRequestCost = currRequest.CalcCost();
7194  if(pBestRequestBlock == VMA_NULL ||
7195  currRequestCost < bestRequestCost)
7196  {
7197  pBestRequestBlock = pCurrBlock;
7198  bestRequest = currRequest;
7199  bestRequestCost = currRequestCost;
7200 
7201  if(bestRequestCost == 0)
7202  {
7203  break;
7204  }
7205  }
7206  }
7207  }
7208 
7209  if(pBestRequestBlock != VMA_NULL)
7210  {
7211  if(mapped)
7212  {
7213  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
7214  if(res != VK_SUCCESS)
7215  {
7216  return res;
7217  }
7218  }
7219 
7220  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
7221  currentFrameIndex,
7222  m_FrameInUseCount,
7223  &bestRequest))
7224  {
7225  // We no longer have an empty Allocation.
7226  if(pBestRequestBlock->m_Metadata.IsEmpty())
7227  {
7228  m_HasEmptyBlock = false;
7229  }
7230  // Allocate from this pBlock.
7231  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
7232  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, size, *pAllocation);
7233  (*pAllocation)->InitBlockAllocation(
7234  hCurrentPool,
7235  pBestRequestBlock,
7236  bestRequest.offset,
7237  alignment,
7238  size,
7239  suballocType,
7240  mapped,
7241  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
7242  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
7243  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
7244  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
7245  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
7246  {
7247  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
7248  }
7249  if(IsCorruptionDetectionEnabled())
7250  {
7251  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
7252  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
7253  }
7254  return VK_SUCCESS;
7255  }
7256  // else: Some allocations must have been touched while we are here. Next try.
7257  }
7258  else
7259  {
7260  // Could not find place in any of the blocks - break outer loop.
7261  break;
7262  }
7263  }
7264  /* Maximum number of tries exceeded - a very unlike event when many other
7265  threads are simultaneously touching allocations making it impossible to make
7266  lost at the same time as we try to allocate. */
7267  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
7268  {
7269  return VK_ERROR_TOO_MANY_OBJECTS;
7270  }
7271  }
7272 
7273  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7274 }
7275 
7276 void VmaBlockVector::Free(
7277  VmaAllocation hAllocation)
7278 {
7279  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
7280 
7281  // Scope for lock.
7282  {
7283  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
7284 
7285  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
7286 
7287  if(IsCorruptionDetectionEnabled())
7288  {
7289  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
7290  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
7291  }
7292 
7293  if(hAllocation->IsPersistentMap())
7294  {
7295  pBlock->Unmap(m_hAllocator, 1);
7296  }
7297 
7298  pBlock->m_Metadata.Free(hAllocation);
7299  VMA_HEAVY_ASSERT(pBlock->Validate());
7300 
7301  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
7302 
7303  // pBlock became empty after this deallocation.
7304  if(pBlock->m_Metadata.IsEmpty())
7305  {
7306  // Already has empty Allocation. We don't want to have two, so delete this one.
7307  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
7308  {
7309  pBlockToDelete = pBlock;
7310  Remove(pBlock);
7311  }
7312  // We now have first empty block.
7313  else
7314  {
7315  m_HasEmptyBlock = true;
7316  }
7317  }
7318  // pBlock didn't become empty, but we have another empty block - find and free that one.
7319  // (This is optional, heuristics.)
7320  else if(m_HasEmptyBlock)
7321  {
7322  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
7323  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
7324  {
7325  pBlockToDelete = pLastBlock;
7326  m_Blocks.pop_back();
7327  m_HasEmptyBlock = false;
7328  }
7329  }
7330 
7331  IncrementallySortBlocks();
7332  }
7333 
7334  // Destruction of a free Allocation. Deferred until this point, outside of mutex
7335  // lock, for performance reason.
7336  if(pBlockToDelete != VMA_NULL)
7337  {
7338  VMA_DEBUG_LOG(" Deleted empty allocation");
7339  pBlockToDelete->Destroy(m_hAllocator);
7340  vma_delete(m_hAllocator, pBlockToDelete);
7341  }
7342 }
7343 
7344 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
7345 {
7346  VkDeviceSize result = 0;
7347  for(size_t i = m_Blocks.size(); i--; )
7348  {
7349  result = VMA_MAX(result, m_Blocks[i]->m_Metadata.GetSize());
7350  if(result >= m_PreferredBlockSize)
7351  {
7352  break;
7353  }
7354  }
7355  return result;
7356 }
7357 
7358 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
7359 {
7360  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
7361  {
7362  if(m_Blocks[blockIndex] == pBlock)
7363  {
7364  VmaVectorRemove(m_Blocks, blockIndex);
7365  return;
7366  }
7367  }
7368  VMA_ASSERT(0);
7369 }
7370 
7371 void VmaBlockVector::IncrementallySortBlocks()
7372 {
7373  // Bubble sort only until first swap.
7374  for(size_t i = 1; i < m_Blocks.size(); ++i)
7375  {
7376  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
7377  {
7378  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
7379  return;
7380  }
7381  }
7382 }
7383 
7384 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
7385 {
7386  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
7387  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
7388  allocInfo.allocationSize = blockSize;
7389  VkDeviceMemory mem = VK_NULL_HANDLE;
7390  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
7391  if(res < 0)
7392  {
7393  return res;
7394  }
7395 
7396  // New VkDeviceMemory successfully created.
7397 
7398  // Create new Allocation for it.
7399  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
7400  pBlock->Init(
7401  m_MemoryTypeIndex,
7402  mem,
7403  allocInfo.allocationSize,
7404  m_NextBlockId++);
7405 
7406  m_Blocks.push_back(pBlock);
7407  if(pNewBlockIndex != VMA_NULL)
7408  {
7409  *pNewBlockIndex = m_Blocks.size() - 1;
7410  }
7411 
7412  return VK_SUCCESS;
7413 }
7414 
7415 #if VMA_STATS_STRING_ENABLED
7416 
7417 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
7418 {
7419  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
7420 
7421  json.BeginObject();
7422 
7423  if(m_IsCustomPool)
7424  {
7425  json.WriteString("MemoryTypeIndex");
7426  json.WriteNumber(m_MemoryTypeIndex);
7427 
7428  json.WriteString("BlockSize");
7429  json.WriteNumber(m_PreferredBlockSize);
7430 
7431  json.WriteString("BlockCount");
7432  json.BeginObject(true);
7433  if(m_MinBlockCount > 0)
7434  {
7435  json.WriteString("Min");
7436  json.WriteNumber((uint64_t)m_MinBlockCount);
7437  }
7438  if(m_MaxBlockCount < SIZE_MAX)
7439  {
7440  json.WriteString("Max");
7441  json.WriteNumber((uint64_t)m_MaxBlockCount);
7442  }
7443  json.WriteString("Cur");
7444  json.WriteNumber((uint64_t)m_Blocks.size());
7445  json.EndObject();
7446 
7447  if(m_FrameInUseCount > 0)
7448  {
7449  json.WriteString("FrameInUseCount");
7450  json.WriteNumber(m_FrameInUseCount);
7451  }
7452  }
7453  else
7454  {
7455  json.WriteString("PreferredBlockSize");
7456  json.WriteNumber(m_PreferredBlockSize);
7457  }
7458 
7459  json.WriteString("Blocks");
7460  json.BeginObject();
7461  for(size_t i = 0; i < m_Blocks.size(); ++i)
7462  {
7463  json.BeginString();
7464  json.ContinueString(m_Blocks[i]->GetId());
7465  json.EndString();
7466 
7467  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
7468  }
7469  json.EndObject();
7470 
7471  json.EndObject();
7472 }
7473 
7474 #endif // #if VMA_STATS_STRING_ENABLED
7475 
7476 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
7477  VmaAllocator hAllocator,
7478  uint32_t currentFrameIndex)
7479 {
7480  if(m_pDefragmentator == VMA_NULL)
7481  {
7482  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
7483  hAllocator,
7484  this,
7485  currentFrameIndex);
7486  }
7487 
7488  return m_pDefragmentator;
7489 }
7490 
7491 VkResult VmaBlockVector::Defragment(
7492  VmaDefragmentationStats* pDefragmentationStats,
7493  VkDeviceSize& maxBytesToMove,
7494  uint32_t& maxAllocationsToMove)
7495 {
7496  if(m_pDefragmentator == VMA_NULL)
7497  {
7498  return VK_SUCCESS;
7499  }
7500 
7501  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
7502 
7503  // Defragment.
7504  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
7505 
7506  // Accumulate statistics.
7507  if(pDefragmentationStats != VMA_NULL)
7508  {
7509  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
7510  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
7511  pDefragmentationStats->bytesMoved += bytesMoved;
7512  pDefragmentationStats->allocationsMoved += allocationsMoved;
7513  VMA_ASSERT(bytesMoved <= maxBytesToMove);
7514  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
7515  maxBytesToMove -= bytesMoved;
7516  maxAllocationsToMove -= allocationsMoved;
7517  }
7518 
7519  // Free empty blocks.
7520  m_HasEmptyBlock = false;
7521  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
7522  {
7523  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
7524  if(pBlock->m_Metadata.IsEmpty())
7525  {
7526  if(m_Blocks.size() > m_MinBlockCount)
7527  {
7528  if(pDefragmentationStats != VMA_NULL)
7529  {
7530  ++pDefragmentationStats->deviceMemoryBlocksFreed;
7531  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
7532  }
7533 
7534  VmaVectorRemove(m_Blocks, blockIndex);
7535  pBlock->Destroy(m_hAllocator);
7536  vma_delete(m_hAllocator, pBlock);
7537  }
7538  else
7539  {
7540  m_HasEmptyBlock = true;
7541  }
7542  }
7543  }
7544 
7545  return result;
7546 }
7547 
7548 void VmaBlockVector::DestroyDefragmentator()
7549 {
7550  if(m_pDefragmentator != VMA_NULL)
7551  {
7552  vma_delete(m_hAllocator, m_pDefragmentator);
7553  m_pDefragmentator = VMA_NULL;
7554  }
7555 }
7556 
7557 void VmaBlockVector::MakePoolAllocationsLost(
7558  uint32_t currentFrameIndex,
7559  size_t* pLostAllocationCount)
7560 {
7561  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
7562  size_t lostAllocationCount = 0;
7563  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
7564  {
7565  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
7566  VMA_ASSERT(pBlock);
7567  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
7568  }
7569  if(pLostAllocationCount != VMA_NULL)
7570  {
7571  *pLostAllocationCount = lostAllocationCount;
7572  }
7573 }
7574 
7575 VkResult VmaBlockVector::CheckCorruption()
7576 {
7577  if(!IsCorruptionDetectionEnabled())
7578  {
7579  return VK_ERROR_FEATURE_NOT_PRESENT;
7580  }
7581 
7582  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
7583  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
7584  {
7585  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
7586  VMA_ASSERT(pBlock);
7587  VkResult res = pBlock->CheckCorruption(m_hAllocator);
7588  if(res != VK_SUCCESS)
7589  {
7590  return res;
7591  }
7592  }
7593  return VK_SUCCESS;
7594 }
7595 
7596 void VmaBlockVector::AddStats(VmaStats* pStats)
7597 {
7598  const uint32_t memTypeIndex = m_MemoryTypeIndex;
7599  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
7600 
7601  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
7602 
7603  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
7604  {
7605  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
7606  VMA_ASSERT(pBlock);
7607  VMA_HEAVY_ASSERT(pBlock->Validate());
7608  VmaStatInfo allocationStatInfo;
7609  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
7610  VmaAddStatInfo(pStats->total, allocationStatInfo);
7611  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7612  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7613  }
7614 }
7615 
7617 // VmaDefragmentator members definition
7618 
7619 VmaDefragmentator::VmaDefragmentator(
7620  VmaAllocator hAllocator,
7621  VmaBlockVector* pBlockVector,
7622  uint32_t currentFrameIndex) :
7623  m_hAllocator(hAllocator),
7624  m_pBlockVector(pBlockVector),
7625  m_CurrentFrameIndex(currentFrameIndex),
7626  m_BytesMoved(0),
7627  m_AllocationsMoved(0),
7628  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
7629  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
7630 {
7631 }
7632 
7633 VmaDefragmentator::~VmaDefragmentator()
7634 {
7635  for(size_t i = m_Blocks.size(); i--; )
7636  {
7637  vma_delete(m_hAllocator, m_Blocks[i]);
7638  }
7639 }
7640 
7641 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
7642 {
7643  AllocationInfo allocInfo;
7644  allocInfo.m_hAllocation = hAlloc;
7645  allocInfo.m_pChanged = pChanged;
7646  m_Allocations.push_back(allocInfo);
7647 }
7648 
7649 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
7650 {
7651  // It has already been mapped for defragmentation.
7652  if(m_pMappedDataForDefragmentation)
7653  {
7654  *ppMappedData = m_pMappedDataForDefragmentation;
7655  return VK_SUCCESS;
7656  }
7657 
7658  // It is originally mapped.
7659  if(m_pBlock->GetMappedData())
7660  {
7661  *ppMappedData = m_pBlock->GetMappedData();
7662  return VK_SUCCESS;
7663  }
7664 
7665  // Map on first usage.
7666  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
7667  *ppMappedData = m_pMappedDataForDefragmentation;
7668  return res;
7669 }
7670 
7671 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
7672 {
7673  if(m_pMappedDataForDefragmentation != VMA_NULL)
7674  {
7675  m_pBlock->Unmap(hAllocator, 1);
7676  }
7677 }
7678 
7679 VkResult VmaDefragmentator::DefragmentRound(
7680  VkDeviceSize maxBytesToMove,
7681  uint32_t maxAllocationsToMove)
7682 {
7683  if(m_Blocks.empty())
7684  {
7685  return VK_SUCCESS;
7686  }
7687 
7688  size_t srcBlockIndex = m_Blocks.size() - 1;
7689  size_t srcAllocIndex = SIZE_MAX;
7690  for(;;)
7691  {
7692  // 1. Find next allocation to move.
7693  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
7694  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
7695  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
7696  {
7697  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
7698  {
7699  // Finished: no more allocations to process.
7700  if(srcBlockIndex == 0)
7701  {
7702  return VK_SUCCESS;
7703  }
7704  else
7705  {
7706  --srcBlockIndex;
7707  srcAllocIndex = SIZE_MAX;
7708  }
7709  }
7710  else
7711  {
7712  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
7713  }
7714  }
7715 
7716  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
7717  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
7718 
7719  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
7720  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
7721  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
7722  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
7723 
7724  // 2. Try to find new place for this allocation in preceding or current block.
7725  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
7726  {
7727  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
7728  VmaAllocationRequest dstAllocRequest;
7729  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
7730  m_CurrentFrameIndex,
7731  m_pBlockVector->GetFrameInUseCount(),
7732  m_pBlockVector->GetBufferImageGranularity(),
7733  size,
7734  alignment,
7735  suballocType,
7736  false, // canMakeOtherLost
7737  &dstAllocRequest) &&
7738  MoveMakesSense(
7739  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
7740  {
7741  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
7742 
7743  // Reached limit on number of allocations or bytes to move.
7744  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
7745  (m_BytesMoved + size > maxBytesToMove))
7746  {
7747  return VK_INCOMPLETE;
7748  }
7749 
7750  void* pDstMappedData = VMA_NULL;
7751  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
7752  if(res != VK_SUCCESS)
7753  {
7754  return res;
7755  }
7756 
7757  void* pSrcMappedData = VMA_NULL;
7758  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
7759  if(res != VK_SUCCESS)
7760  {
7761  return res;
7762  }
7763 
7764  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
7765  memcpy(
7766  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
7767  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
7768  static_cast<size_t>(size));
7769 
7770  if(VMA_DEBUG_MARGIN > 0)
7771  {
7772  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
7773  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
7774  }
7775 
7776  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
7777  pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);
7778 
7779  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
7780 
7781  if(allocInfo.m_pChanged != VMA_NULL)
7782  {
7783  *allocInfo.m_pChanged = VK_TRUE;
7784  }
7785 
7786  ++m_AllocationsMoved;
7787  m_BytesMoved += size;
7788 
7789  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
7790 
7791  break;
7792  }
7793  }
7794 
7795  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
7796 
7797  if(srcAllocIndex > 0)
7798  {
7799  --srcAllocIndex;
7800  }
7801  else
7802  {
7803  if(srcBlockIndex > 0)
7804  {
7805  --srcBlockIndex;
7806  srcAllocIndex = SIZE_MAX;
7807  }
7808  else
7809  {
7810  return VK_SUCCESS;
7811  }
7812  }
7813  }
7814 }
7815 
7816 VkResult VmaDefragmentator::Defragment(
7817  VkDeviceSize maxBytesToMove,
7818  uint32_t maxAllocationsToMove)
7819 {
7820  if(m_Allocations.empty())
7821  {
7822  return VK_SUCCESS;
7823  }
7824 
7825  // Create block info for each block.
7826  const size_t blockCount = m_pBlockVector->m_Blocks.size();
7827  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7828  {
7829  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
7830  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
7831  m_Blocks.push_back(pBlockInfo);
7832  }
7833 
7834  // Sort them by m_pBlock pointer value.
7835  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
7836 
7837  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
7838  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
7839  {
7840  AllocationInfo& allocInfo = m_Allocations[blockIndex];
7841  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
7842  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7843  {
7844  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
7845  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
7846  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
7847  {
7848  (*it)->m_Allocations.push_back(allocInfo);
7849  }
7850  else
7851  {
7852  VMA_ASSERT(0);
7853  }
7854  }
7855  }
7856  m_Allocations.clear();
7857 
7858  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7859  {
7860  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
7861  pBlockInfo->CalcHasNonMovableAllocations();
7862  pBlockInfo->SortAllocationsBySizeDescecnding();
7863  }
7864 
7865  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
7866  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
7867 
7868  // Execute defragmentation rounds (the main part).
7869  VkResult result = VK_SUCCESS;
7870  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
7871  {
7872  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
7873  }
7874 
7875  // Unmap blocks that were mapped for defragmentation.
7876  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7877  {
7878  m_Blocks[blockIndex]->Unmap(m_hAllocator);
7879  }
7880 
7881  return result;
7882 }
7883 
7884 bool VmaDefragmentator::MoveMakesSense(
7885  size_t dstBlockIndex, VkDeviceSize dstOffset,
7886  size_t srcBlockIndex, VkDeviceSize srcOffset)
7887 {
7888  if(dstBlockIndex < srcBlockIndex)
7889  {
7890  return true;
7891  }
7892  if(dstBlockIndex > srcBlockIndex)
7893  {
7894  return false;
7895  }
7896  if(dstOffset < srcOffset)
7897  {
7898  return true;
7899  }
7900  return false;
7901 }
7902 
7904 // VmaAllocator_T
7905 
7906 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
7907  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
7908  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
7909  m_hDevice(pCreateInfo->device),
7910  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
7911  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
7912  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
7913  m_PreferredLargeHeapBlockSize(0),
7914  m_PhysicalDevice(pCreateInfo->physicalDevice),
7915  m_CurrentFrameIndex(0),
7916  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
7917  m_NextPoolId(0)
7918 {
7919  if(VMA_DEBUG_DETECT_CORRUPTION)
7920  {
7921  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
7922  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
7923  }
7924 
7925  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
7926 
7927 #if !(VMA_DEDICATED_ALLOCATION)
7929  {
7930  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
7931  }
7932 #endif
7933 
7934  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
7935  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
7936  memset(&m_MemProps, 0, sizeof(m_MemProps));
7937 
7938  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
7939  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
7940 
7941  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7942  {
7943  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
7944  }
7945 
7946  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
7947  {
7948  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
7949  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
7950  }
7951 
7952  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
7953 
7954  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
7955  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
7956 
7957  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
7958  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
7959 
7960  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
7961  {
7962  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
7963  {
7964  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
7965  if(limit != VK_WHOLE_SIZE)
7966  {
7967  m_HeapSizeLimit[heapIndex] = limit;
7968  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
7969  {
7970  m_MemProps.memoryHeaps[heapIndex].size = limit;
7971  }
7972  }
7973  }
7974  }
7975 
7976  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7977  {
7978  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
7979 
7980  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
7981  this,
7982  memTypeIndex,
7983  preferredBlockSize,
7984  0,
7985  SIZE_MAX,
7986  GetBufferImageGranularity(),
7987  pCreateInfo->frameInUseCount,
7988  false); // isCustomPool
7989  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
7990  // becase minBlockCount is 0.
7991  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
7992 
7993  }
7994 }
7995 
7996 VmaAllocator_T::~VmaAllocator_T()
7997 {
7998  VMA_ASSERT(m_Pools.empty());
7999 
8000  for(size_t i = GetMemoryTypeCount(); i--; )
8001  {
8002  vma_delete(this, m_pDedicatedAllocations[i]);
8003  vma_delete(this, m_pBlockVectors[i]);
8004  }
8005 }
8006 
8007 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
8008 {
8009 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
8010  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
8011  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
8012  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
8013  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
8014  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
8015  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
8016  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
8017  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
8018  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
8019  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
8020  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
8021  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
8022  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
8023  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
8024  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
8025  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
8026 #if VMA_DEDICATED_ALLOCATION
8027  if(m_UseKhrDedicatedAllocation)
8028  {
8029  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
8030  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
8031  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
8032  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
8033  }
8034 #endif // #if VMA_DEDICATED_ALLOCATION
8035 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
8036 
8037 #define VMA_COPY_IF_NOT_NULL(funcName) \
8038  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
8039 
8040  if(pVulkanFunctions != VMA_NULL)
8041  {
8042  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
8043  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
8044  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
8045  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
8046  VMA_COPY_IF_NOT_NULL(vkMapMemory);
8047  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
8048  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
8049  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
8050  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
8051  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
8052  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
8053  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
8054  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
8055  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
8056  VMA_COPY_IF_NOT_NULL(vkCreateImage);
8057  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
8058 #if VMA_DEDICATED_ALLOCATION
8059  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
8060  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
8061 #endif
8062  }
8063 
8064 #undef VMA_COPY_IF_NOT_NULL
8065 
8066  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
8067  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
8068  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
8069  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
8070  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
8071  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
8072  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
8073  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
8074  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
8075  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
8076  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
8077  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
8078  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
8079  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
8080  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
8081  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
8082  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
8083  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
8084 #if VMA_DEDICATED_ALLOCATION
8085  if(m_UseKhrDedicatedAllocation)
8086  {
8087  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
8088  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
8089  }
8090 #endif
8091 }
8092 
8093 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
8094 {
8095  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
8096  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
8097  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
8098  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
8099 }
8100 
8101 VkResult VmaAllocator_T::AllocateMemoryOfType(
8102  VkDeviceSize size,
8103  VkDeviceSize alignment,
8104  bool dedicatedAllocation,
8105  VkBuffer dedicatedBuffer,
8106  VkImage dedicatedImage,
8107  const VmaAllocationCreateInfo& createInfo,
8108  uint32_t memTypeIndex,
8109  VmaSuballocationType suballocType,
8110  VmaAllocation* pAllocation)
8111 {
8112  VMA_ASSERT(pAllocation != VMA_NULL);
8113  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
8114 
8115  VmaAllocationCreateInfo finalCreateInfo = createInfo;
8116 
8117  // If memory type is not HOST_VISIBLE, disable MAPPED.
8118  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
8119  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
8120  {
8121  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
8122  }
8123 
8124  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
8125  VMA_ASSERT(blockVector);
8126 
8127  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
8128  bool preferDedicatedMemory =
8129  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
8130  dedicatedAllocation ||
8131  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
8132  size > preferredBlockSize / 2;
8133 
8134  if(preferDedicatedMemory &&
8135  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
8136  finalCreateInfo.pool == VK_NULL_HANDLE)
8137  {
8139  }
8140 
8141  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
8142  {
8143  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
8144  {
8145  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
8146  }
8147  else
8148  {
8149  return AllocateDedicatedMemory(
8150  size,
8151  suballocType,
8152  memTypeIndex,
8153  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
8154  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
8155  finalCreateInfo.pUserData,
8156  dedicatedBuffer,
8157  dedicatedImage,
8158  pAllocation);
8159  }
8160  }
8161  else
8162  {
8163  VkResult res = blockVector->Allocate(
8164  VK_NULL_HANDLE, // hCurrentPool
8165  m_CurrentFrameIndex.load(),
8166  size,
8167  alignment,
8168  finalCreateInfo,
8169  suballocType,
8170  pAllocation);
8171  if(res == VK_SUCCESS)
8172  {
8173  return res;
8174  }
8175 
8176  // 5. Try dedicated memory.
8177  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
8178  {
8179  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
8180  }
8181  else
8182  {
8183  res = AllocateDedicatedMemory(
8184  size,
8185  suballocType,
8186  memTypeIndex,
8187  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
8188  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
8189  finalCreateInfo.pUserData,
8190  dedicatedBuffer,
8191  dedicatedImage,
8192  pAllocation);
8193  if(res == VK_SUCCESS)
8194  {
8195  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
8196  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
8197  return VK_SUCCESS;
8198  }
8199  else
8200  {
8201  // Everything failed: Return error code.
8202  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
8203  return res;
8204  }
8205  }
8206  }
8207 }
8208 
8209 VkResult VmaAllocator_T::AllocateDedicatedMemory(
8210  VkDeviceSize size,
8211  VmaSuballocationType suballocType,
8212  uint32_t memTypeIndex,
8213  bool map,
8214  bool isUserDataString,
8215  void* pUserData,
8216  VkBuffer dedicatedBuffer,
8217  VkImage dedicatedImage,
8218  VmaAllocation* pAllocation)
8219 {
8220  VMA_ASSERT(pAllocation);
8221 
8222  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
8223  allocInfo.memoryTypeIndex = memTypeIndex;
8224  allocInfo.allocationSize = size;
8225 
8226 #if VMA_DEDICATED_ALLOCATION
8227  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
8228  if(m_UseKhrDedicatedAllocation)
8229  {
8230  if(dedicatedBuffer != VK_NULL_HANDLE)
8231  {
8232  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
8233  dedicatedAllocInfo.buffer = dedicatedBuffer;
8234  allocInfo.pNext = &dedicatedAllocInfo;
8235  }
8236  else if(dedicatedImage != VK_NULL_HANDLE)
8237  {
8238  dedicatedAllocInfo.image = dedicatedImage;
8239  allocInfo.pNext = &dedicatedAllocInfo;
8240  }
8241  }
8242 #endif // #if VMA_DEDICATED_ALLOCATION
8243 
8244  // Allocate VkDeviceMemory.
8245  VkDeviceMemory hMemory = VK_NULL_HANDLE;
8246  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
8247  if(res < 0)
8248  {
8249  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
8250  return res;
8251  }
8252 
8253  void* pMappedData = VMA_NULL;
8254  if(map)
8255  {
8256  res = (*m_VulkanFunctions.vkMapMemory)(
8257  m_hDevice,
8258  hMemory,
8259  0,
8260  VK_WHOLE_SIZE,
8261  0,
8262  &pMappedData);
8263  if(res < 0)
8264  {
8265  VMA_DEBUG_LOG(" vkMapMemory FAILED");
8266  FreeVulkanMemory(memTypeIndex, size, hMemory);
8267  return res;
8268  }
8269  }
8270 
8271  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
8272  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
8273  (*pAllocation)->SetUserData(this, pUserData);
8274  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
8275  {
8276  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
8277  }
8278 
8279  // Register it in m_pDedicatedAllocations.
8280  {
8281  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8282  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
8283  VMA_ASSERT(pDedicatedAllocations);
8284  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
8285  }
8286 
8287  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
8288 
8289  return VK_SUCCESS;
8290 }
8291 
8292 void VmaAllocator_T::GetBufferMemoryRequirements(
8293  VkBuffer hBuffer,
8294  VkMemoryRequirements& memReq,
8295  bool& requiresDedicatedAllocation,
8296  bool& prefersDedicatedAllocation) const
8297 {
8298 #if VMA_DEDICATED_ALLOCATION
8299  if(m_UseKhrDedicatedAllocation)
8300  {
8301  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
8302  memReqInfo.buffer = hBuffer;
8303 
8304  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
8305 
8306  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
8307  memReq2.pNext = &memDedicatedReq;
8308 
8309  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
8310 
8311  memReq = memReq2.memoryRequirements;
8312  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
8313  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
8314  }
8315  else
8316 #endif // #if VMA_DEDICATED_ALLOCATION
8317  {
8318  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
8319  requiresDedicatedAllocation = false;
8320  prefersDedicatedAllocation = false;
8321  }
8322 }
8323 
8324 void VmaAllocator_T::GetImageMemoryRequirements(
8325  VkImage hImage,
8326  VkMemoryRequirements& memReq,
8327  bool& requiresDedicatedAllocation,
8328  bool& prefersDedicatedAllocation) const
8329 {
8330 #if VMA_DEDICATED_ALLOCATION
8331  if(m_UseKhrDedicatedAllocation)
8332  {
8333  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
8334  memReqInfo.image = hImage;
8335 
8336  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
8337 
8338  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
8339  memReq2.pNext = &memDedicatedReq;
8340 
8341  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
8342 
8343  memReq = memReq2.memoryRequirements;
8344  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
8345  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
8346  }
8347  else
8348 #endif // #if VMA_DEDICATED_ALLOCATION
8349  {
8350  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
8351  requiresDedicatedAllocation = false;
8352  prefersDedicatedAllocation = false;
8353  }
8354 }
8355 
8356 VkResult VmaAllocator_T::AllocateMemory(
8357  const VkMemoryRequirements& vkMemReq,
8358  bool requiresDedicatedAllocation,
8359  bool prefersDedicatedAllocation,
8360  VkBuffer dedicatedBuffer,
8361  VkImage dedicatedImage,
8362  const VmaAllocationCreateInfo& createInfo,
8363  VmaSuballocationType suballocType,
8364  VmaAllocation* pAllocation)
8365 {
8366  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
8367  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
8368  {
8369  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
8370  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
8371  }
8372  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
8374  {
8375  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
8376  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
8377  }
8378  if(requiresDedicatedAllocation)
8379  {
8380  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
8381  {
8382  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
8383  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
8384  }
8385  if(createInfo.pool != VK_NULL_HANDLE)
8386  {
8387  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
8388  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
8389  }
8390  }
8391  if((createInfo.pool != VK_NULL_HANDLE) &&
8392  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
8393  {
8394  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
8395  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
8396  }
8397 
8398  if(createInfo.pool != VK_NULL_HANDLE)
8399  {
8400  const VkDeviceSize alignmentForPool = VMA_MAX(
8401  vkMemReq.alignment,
8402  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
8403  return createInfo.pool->m_BlockVector.Allocate(
8404  createInfo.pool,
8405  m_CurrentFrameIndex.load(),
8406  vkMemReq.size,
8407  alignmentForPool,
8408  createInfo,
8409  suballocType,
8410  pAllocation);
8411  }
8412  else
8413  {
8414  // Bit mask of memory Vulkan types acceptable for this allocation.
8415  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
8416  uint32_t memTypeIndex = UINT32_MAX;
8417  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
8418  if(res == VK_SUCCESS)
8419  {
8420  VkDeviceSize alignmentForMemType = VMA_MAX(
8421  vkMemReq.alignment,
8422  GetMemoryTypeMinAlignment(memTypeIndex));
8423 
8424  res = AllocateMemoryOfType(
8425  vkMemReq.size,
8426  alignmentForMemType,
8427  requiresDedicatedAllocation || prefersDedicatedAllocation,
8428  dedicatedBuffer,
8429  dedicatedImage,
8430  createInfo,
8431  memTypeIndex,
8432  suballocType,
8433  pAllocation);
8434  // Succeeded on first try.
8435  if(res == VK_SUCCESS)
8436  {
8437  return res;
8438  }
8439  // Allocation from this memory type failed. Try other compatible memory types.
8440  else
8441  {
8442  for(;;)
8443  {
8444  // Remove old memTypeIndex from list of possibilities.
8445  memoryTypeBits &= ~(1u << memTypeIndex);
8446  // Find alternative memTypeIndex.
8447  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
8448  if(res == VK_SUCCESS)
8449  {
8450  alignmentForMemType = VMA_MAX(
8451  vkMemReq.alignment,
8452  GetMemoryTypeMinAlignment(memTypeIndex));
8453 
8454  res = AllocateMemoryOfType(
8455  vkMemReq.size,
8456  alignmentForMemType,
8457  requiresDedicatedAllocation || prefersDedicatedAllocation,
8458  dedicatedBuffer,
8459  dedicatedImage,
8460  createInfo,
8461  memTypeIndex,
8462  suballocType,
8463  pAllocation);
8464  // Allocation from this alternative memory type succeeded.
8465  if(res == VK_SUCCESS)
8466  {
8467  return res;
8468  }
8469  // else: Allocation from this memory type failed. Try next one - next loop iteration.
8470  }
8471  // No other matching memory type index could be found.
8472  else
8473  {
8474  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
8475  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
8476  }
8477  }
8478  }
8479  }
8480  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
8481  else
8482  return res;
8483  }
8484 }
8485 
8486 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
8487 {
8488  VMA_ASSERT(allocation);
8489 
8490  if(allocation->CanBecomeLost() == false ||
8491  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
8492  {
8493  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
8494  {
8495  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
8496  }
8497 
8498  switch(allocation->GetType())
8499  {
8500  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8501  {
8502  VmaBlockVector* pBlockVector = VMA_NULL;
8503  VmaPool hPool = allocation->GetPool();
8504  if(hPool != VK_NULL_HANDLE)
8505  {
8506  pBlockVector = &hPool->m_BlockVector;
8507  }
8508  else
8509  {
8510  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
8511  pBlockVector = m_pBlockVectors[memTypeIndex];
8512  }
8513  pBlockVector->Free(allocation);
8514  }
8515  break;
8516  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8517  FreeDedicatedMemory(allocation);
8518  break;
8519  default:
8520  VMA_ASSERT(0);
8521  }
8522  }
8523 
8524  allocation->SetUserData(this, VMA_NULL);
8525  vma_delete(this, allocation);
8526 }
8527 
8528 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
8529 {
8530  // Initialize.
8531  InitStatInfo(pStats->total);
8532  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
8533  InitStatInfo(pStats->memoryType[i]);
8534  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
8535  InitStatInfo(pStats->memoryHeap[i]);
8536 
8537  // Process default pools.
8538  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8539  {
8540  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
8541  VMA_ASSERT(pBlockVector);
8542  pBlockVector->AddStats(pStats);
8543  }
8544 
8545  // Process custom pools.
8546  {
8547  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8548  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
8549  {
8550  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
8551  }
8552  }
8553 
8554  // Process dedicated allocations.
8555  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8556  {
8557  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
8558  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8559  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
8560  VMA_ASSERT(pDedicatedAllocVector);
8561  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
8562  {
8563  VmaStatInfo allocationStatInfo;
8564  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
8565  VmaAddStatInfo(pStats->total, allocationStatInfo);
8566  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
8567  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
8568  }
8569  }
8570 
8571  // Postprocess.
8572  VmaPostprocessCalcStatInfo(pStats->total);
8573  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
8574  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
8575  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
8576  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
8577 }
8578 
8579 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
8580 
8581 VkResult VmaAllocator_T::Defragment(
8582  VmaAllocation* pAllocations,
8583  size_t allocationCount,
8584  VkBool32* pAllocationsChanged,
8585  const VmaDefragmentationInfo* pDefragmentationInfo,
8586  VmaDefragmentationStats* pDefragmentationStats)
8587 {
8588  if(pAllocationsChanged != VMA_NULL)
8589  {
8590  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
8591  }
8592  if(pDefragmentationStats != VMA_NULL)
8593  {
8594  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
8595  }
8596 
8597  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
8598 
8599  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
8600 
8601  const size_t poolCount = m_Pools.size();
8602 
8603  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
8604  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
8605  {
8606  VmaAllocation hAlloc = pAllocations[allocIndex];
8607  VMA_ASSERT(hAlloc);
8608  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
8609  // DedicatedAlloc cannot be defragmented.
8610  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
8611  // Only HOST_VISIBLE memory types can be defragmented.
8612  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
8613  // Lost allocation cannot be defragmented.
8614  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
8615  {
8616  VmaBlockVector* pAllocBlockVector = VMA_NULL;
8617 
8618  const VmaPool hAllocPool = hAlloc->GetPool();
8619  // This allocation belongs to custom pool.
8620  if(hAllocPool != VK_NULL_HANDLE)
8621  {
8622  pAllocBlockVector = &hAllocPool->GetBlockVector();
8623  }
8624  // This allocation belongs to general pool.
8625  else
8626  {
8627  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
8628  }
8629 
8630  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
8631 
8632  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
8633  &pAllocationsChanged[allocIndex] : VMA_NULL;
8634  pDefragmentator->AddAllocation(hAlloc, pChanged);
8635  }
8636  }
8637 
8638  VkResult result = VK_SUCCESS;
8639 
8640  // ======== Main processing.
8641 
8642  VkDeviceSize maxBytesToMove = SIZE_MAX;
8643  uint32_t maxAllocationsToMove = UINT32_MAX;
8644  if(pDefragmentationInfo != VMA_NULL)
8645  {
8646  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
8647  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
8648  }
8649 
8650  // Process standard memory.
8651  for(uint32_t memTypeIndex = 0;
8652  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
8653  ++memTypeIndex)
8654  {
8655  // Only HOST_VISIBLE memory types can be defragmented.
8656  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8657  {
8658  result = m_pBlockVectors[memTypeIndex]->Defragment(
8659  pDefragmentationStats,
8660  maxBytesToMove,
8661  maxAllocationsToMove);
8662  }
8663  }
8664 
8665  // Process custom pools.
8666  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
8667  {
8668  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
8669  pDefragmentationStats,
8670  maxBytesToMove,
8671  maxAllocationsToMove);
8672  }
8673 
8674  // ======== Destroy defragmentators.
8675 
8676  // Process custom pools.
8677  for(size_t poolIndex = poolCount; poolIndex--; )
8678  {
8679  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
8680  }
8681 
8682  // Process standard memory.
8683  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
8684  {
8685  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8686  {
8687  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
8688  }
8689  }
8690 
8691  return result;
8692 }
8693 
8694 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
8695 {
8696  if(hAllocation->CanBecomeLost())
8697  {
8698  /*
8699  Warning: This is a carefully designed algorithm.
8700  Do not modify unless you really know what you're doing :)
8701  */
8702  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
8703  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
8704  for(;;)
8705  {
8706  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
8707  {
8708  pAllocationInfo->memoryType = UINT32_MAX;
8709  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
8710  pAllocationInfo->offset = 0;
8711  pAllocationInfo->size = hAllocation->GetSize();
8712  pAllocationInfo->pMappedData = VMA_NULL;
8713  pAllocationInfo->pUserData = hAllocation->GetUserData();
8714  return;
8715  }
8716  else if(localLastUseFrameIndex == localCurrFrameIndex)
8717  {
8718  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
8719  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
8720  pAllocationInfo->offset = hAllocation->GetOffset();
8721  pAllocationInfo->size = hAllocation->GetSize();
8722  pAllocationInfo->pMappedData = VMA_NULL;
8723  pAllocationInfo->pUserData = hAllocation->GetUserData();
8724  return;
8725  }
8726  else // Last use time earlier than current time.
8727  {
8728  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
8729  {
8730  localLastUseFrameIndex = localCurrFrameIndex;
8731  }
8732  }
8733  }
8734  }
8735  else
8736  {
8737 #if VMA_STATS_STRING_ENABLED
8738  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
8739  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
8740  for(;;)
8741  {
8742  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
8743  if(localLastUseFrameIndex == localCurrFrameIndex)
8744  {
8745  break;
8746  }
8747  else // Last use time earlier than current time.
8748  {
8749  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
8750  {
8751  localLastUseFrameIndex = localCurrFrameIndex;
8752  }
8753  }
8754  }
8755 #endif
8756 
8757  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
8758  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
8759  pAllocationInfo->offset = hAllocation->GetOffset();
8760  pAllocationInfo->size = hAllocation->GetSize();
8761  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
8762  pAllocationInfo->pUserData = hAllocation->GetUserData();
8763  }
8764 }
8765 
8766 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
8767 {
8768  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
8769  if(hAllocation->CanBecomeLost())
8770  {
8771  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
8772  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
8773  for(;;)
8774  {
8775  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
8776  {
8777  return false;
8778  }
8779  else if(localLastUseFrameIndex == localCurrFrameIndex)
8780  {
8781  return true;
8782  }
8783  else // Last use time earlier than current time.
8784  {
8785  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
8786  {
8787  localLastUseFrameIndex = localCurrFrameIndex;
8788  }
8789  }
8790  }
8791  }
8792  else
8793  {
8794 #if VMA_STATS_STRING_ENABLED
8795  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
8796  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
8797  for(;;)
8798  {
8799  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
8800  if(localLastUseFrameIndex == localCurrFrameIndex)
8801  {
8802  break;
8803  }
8804  else // Last use time earlier than current time.
8805  {
8806  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
8807  {
8808  localLastUseFrameIndex = localCurrFrameIndex;
8809  }
8810  }
8811  }
8812 #endif
8813 
8814  return true;
8815  }
8816 }
8817 
8818 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
8819 {
8820  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
8821 
8822  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
8823 
8824  if(newCreateInfo.maxBlockCount == 0)
8825  {
8826  newCreateInfo.maxBlockCount = SIZE_MAX;
8827  }
8828  if(newCreateInfo.blockSize == 0)
8829  {
8830  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
8831  }
8832 
8833  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
8834 
8835  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
8836  if(res != VK_SUCCESS)
8837  {
8838  vma_delete(this, *pPool);
8839  *pPool = VMA_NULL;
8840  return res;
8841  }
8842 
8843  // Add to m_Pools.
8844  {
8845  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8846  (*pPool)->SetId(m_NextPoolId++);
8847  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
8848  }
8849 
8850  return VK_SUCCESS;
8851 }
8852 
8853 void VmaAllocator_T::DestroyPool(VmaPool pool)
8854 {
8855  // Remove from m_Pools.
8856  {
8857  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8858  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
8859  VMA_ASSERT(success && "Pool not found in Allocator.");
8860  }
8861 
8862  vma_delete(this, pool);
8863 }
8864 
8865 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
8866 {
8867  pool->m_BlockVector.GetPoolStats(pPoolStats);
8868 }
8869 
8870 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
8871 {
8872  m_CurrentFrameIndex.store(frameIndex);
8873 }
8874 
8875 void VmaAllocator_T::MakePoolAllocationsLost(
8876  VmaPool hPool,
8877  size_t* pLostAllocationCount)
8878 {
8879  hPool->m_BlockVector.MakePoolAllocationsLost(
8880  m_CurrentFrameIndex.load(),
8881  pLostAllocationCount);
8882 }
8883 
8884 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
8885 {
8886  return hPool->m_BlockVector.CheckCorruption();
8887 }
8888 
8889 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
8890 {
8891  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
8892 
8893  // Process default pools.
8894  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8895  {
8896  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
8897  {
8898  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
8899  VMA_ASSERT(pBlockVector);
8900  VkResult localRes = pBlockVector->CheckCorruption();
8901  switch(localRes)
8902  {
8903  case VK_ERROR_FEATURE_NOT_PRESENT:
8904  break;
8905  case VK_SUCCESS:
8906  finalRes = VK_SUCCESS;
8907  break;
8908  default:
8909  return localRes;
8910  }
8911  }
8912  }
8913 
8914  // Process custom pools.
8915  {
8916  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8917  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
8918  {
8919  if(((1u << m_Pools[poolIndex]->GetBlockVector().GetMemoryTypeIndex()) & memoryTypeBits) != 0)
8920  {
8921  VkResult localRes = m_Pools[poolIndex]->GetBlockVector().CheckCorruption();
8922  switch(localRes)
8923  {
8924  case VK_ERROR_FEATURE_NOT_PRESENT:
8925  break;
8926  case VK_SUCCESS:
8927  finalRes = VK_SUCCESS;
8928  break;
8929  default:
8930  return localRes;
8931  }
8932  }
8933  }
8934  }
8935 
8936  return finalRes;
8937 }
8938 
8939 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
8940 {
8941  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
8942  (*pAllocation)->InitLost();
8943 }
8944 
8945 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
8946 {
8947  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
8948 
8949  VkResult res;
8950  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8951  {
8952  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8953  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
8954  {
8955  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8956  if(res == VK_SUCCESS)
8957  {
8958  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
8959  }
8960  }
8961  else
8962  {
8963  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
8964  }
8965  }
8966  else
8967  {
8968  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8969  }
8970 
8971  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
8972  {
8973  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
8974  }
8975 
8976  return res;
8977 }
8978 
8979 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
8980 {
8981  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
8982  {
8983  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
8984  }
8985 
8986  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
8987 
8988  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
8989  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8990  {
8991  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8992  m_HeapSizeLimit[heapIndex] += size;
8993  }
8994 }
8995 
8996 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
8997 {
8998  if(hAllocation->CanBecomeLost())
8999  {
9000  return VK_ERROR_MEMORY_MAP_FAILED;
9001  }
9002 
9003  switch(hAllocation->GetType())
9004  {
9005  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
9006  {
9007  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
9008  char *pBytes = VMA_NULL;
9009  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
9010  if(res == VK_SUCCESS)
9011  {
9012  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
9013  hAllocation->BlockAllocMap();
9014  }
9015  return res;
9016  }
9017  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
9018  return hAllocation->DedicatedAllocMap(this, ppData);
9019  default:
9020  VMA_ASSERT(0);
9021  return VK_ERROR_MEMORY_MAP_FAILED;
9022  }
9023 }
9024 
9025 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
9026 {
9027  switch(hAllocation->GetType())
9028  {
9029  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
9030  {
9031  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
9032  hAllocation->BlockAllocUnmap();
9033  pBlock->Unmap(this, 1);
9034  }
9035  break;
9036  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
9037  hAllocation->DedicatedAllocUnmap(this);
9038  break;
9039  default:
9040  VMA_ASSERT(0);
9041  }
9042 }
9043 
9044 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
9045 {
9046  VkResult res = VK_SUCCESS;
9047  switch(hAllocation->GetType())
9048  {
9049  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
9050  res = GetVulkanFunctions().vkBindBufferMemory(
9051  m_hDevice,
9052  hBuffer,
9053  hAllocation->GetMemory(),
9054  0); //memoryOffset
9055  break;
9056  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
9057  {
9058  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
9059  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
9060  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
9061  break;
9062  }
9063  default:
9064  VMA_ASSERT(0);
9065  }
9066  return res;
9067 }
9068 
9069 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
9070 {
9071  VkResult res = VK_SUCCESS;
9072  switch(hAllocation->GetType())
9073  {
9074  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
9075  res = GetVulkanFunctions().vkBindImageMemory(
9076  m_hDevice,
9077  hImage,
9078  hAllocation->GetMemory(),
9079  0); //memoryOffset
9080  break;
9081  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
9082  {
9083  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
9084  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
9085  res = pBlock->BindImageMemory(this, hAllocation, hImage);
9086  break;
9087  }
9088  default:
9089  VMA_ASSERT(0);
9090  }
9091  return res;
9092 }
9093 
9094 void VmaAllocator_T::FlushOrInvalidateAllocation(
9095  VmaAllocation hAllocation,
9096  VkDeviceSize offset, VkDeviceSize size,
9097  VMA_CACHE_OPERATION op)
9098 {
9099  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
9100  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
9101  {
9102  const VkDeviceSize allocationSize = hAllocation->GetSize();
9103  VMA_ASSERT(offset <= allocationSize);
9104 
9105  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
9106 
9107  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
9108  memRange.memory = hAllocation->GetMemory();
9109 
9110  switch(hAllocation->GetType())
9111  {
9112  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
9113  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
9114  if(size == VK_WHOLE_SIZE)
9115  {
9116  memRange.size = allocationSize - memRange.offset;
9117  }
9118  else
9119  {
9120  VMA_ASSERT(offset + size <= allocationSize);
9121  memRange.size = VMA_MIN(
9122  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
9123  allocationSize - memRange.offset);
9124  }
9125  break;
9126 
9127  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
9128  {
9129  // 1. Still within this allocation.
9130  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
9131  if(size == VK_WHOLE_SIZE)
9132  {
9133  size = allocationSize - offset;
9134  }
9135  else
9136  {
9137  VMA_ASSERT(offset + size <= allocationSize);
9138  }
9139  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
9140 
9141  // 2. Adjust to whole block.
9142  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
9143  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
9144  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_Metadata.GetSize();
9145  memRange.offset += allocationOffset;
9146  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
9147 
9148  break;
9149  }
9150 
9151  default:
9152  VMA_ASSERT(0);
9153  }
9154 
9155  switch(op)
9156  {
9157  case VMA_CACHE_FLUSH:
9158  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
9159  break;
9160  case VMA_CACHE_INVALIDATE:
9161  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
9162  break;
9163  default:
9164  VMA_ASSERT(0);
9165  }
9166  }
9167  // else: Just ignore this call.
9168 }
9169 
9170 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
9171 {
9172  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
9173 
9174  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
9175  {
9176  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
9177  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
9178  VMA_ASSERT(pDedicatedAllocations);
9179  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
9180  VMA_ASSERT(success);
9181  }
9182 
9183  VkDeviceMemory hMemory = allocation->GetMemory();
9184 
9185  if(allocation->GetMappedData() != VMA_NULL)
9186  {
9187  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
9188  }
9189 
9190  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
9191 
9192  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
9193 }
9194 
9195 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
9196 {
9197  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
9198  !hAllocation->CanBecomeLost() &&
9199  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
9200  {
9201  void* pData = VMA_NULL;
9202  VkResult res = Map(hAllocation, &pData);
9203  if(res == VK_SUCCESS)
9204  {
9205  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
9206  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
9207  Unmap(hAllocation);
9208  }
9209  else
9210  {
9211  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
9212  }
9213  }
9214 }
9215 
9216 #if VMA_STATS_STRING_ENABLED
9217 
9218 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
9219 {
9220  bool dedicatedAllocationsStarted = false;
9221  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
9222  {
9223  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
9224  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
9225  VMA_ASSERT(pDedicatedAllocVector);
9226  if(pDedicatedAllocVector->empty() == false)
9227  {
9228  if(dedicatedAllocationsStarted == false)
9229  {
9230  dedicatedAllocationsStarted = true;
9231  json.WriteString("DedicatedAllocations");
9232  json.BeginObject();
9233  }
9234 
9235  json.BeginString("Type ");
9236  json.ContinueString(memTypeIndex);
9237  json.EndString();
9238 
9239  json.BeginArray();
9240 
9241  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
9242  {
9243  json.BeginObject(true);
9244  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
9245  hAlloc->PrintParameters(json);
9246  json.EndObject();
9247  }
9248 
9249  json.EndArray();
9250  }
9251  }
9252  if(dedicatedAllocationsStarted)
9253  {
9254  json.EndObject();
9255  }
9256 
9257  {
9258  bool allocationsStarted = false;
9259  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
9260  {
9261  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
9262  {
9263  if(allocationsStarted == false)
9264  {
9265  allocationsStarted = true;
9266  json.WriteString("DefaultPools");
9267  json.BeginObject();
9268  }
9269 
9270  json.BeginString("Type ");
9271  json.ContinueString(memTypeIndex);
9272  json.EndString();
9273 
9274  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
9275  }
9276  }
9277  if(allocationsStarted)
9278  {
9279  json.EndObject();
9280  }
9281  }
9282 
9283  {
9284  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
9285  const size_t poolCount = m_Pools.size();
9286  if(poolCount > 0)
9287  {
9288  json.WriteString("Pools");
9289  json.BeginObject();
9290  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
9291  {
9292  json.BeginString();
9293  json.ContinueString(m_Pools[poolIndex]->GetId());
9294  json.EndString();
9295 
9296  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
9297  }
9298  json.EndObject();
9299  }
9300  }
9301 }
9302 
9303 #endif // #if VMA_STATS_STRING_ENABLED
9304 
9305 static VkResult AllocateMemoryForImage(
9306  VmaAllocator allocator,
9307  VkImage image,
9308  const VmaAllocationCreateInfo* pAllocationCreateInfo,
9309  VmaSuballocationType suballocType,
9310  VmaAllocation* pAllocation)
9311 {
9312  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
9313 
9314  VkMemoryRequirements vkMemReq = {};
9315  bool requiresDedicatedAllocation = false;
9316  bool prefersDedicatedAllocation = false;
9317  allocator->GetImageMemoryRequirements(image, vkMemReq,
9318  requiresDedicatedAllocation, prefersDedicatedAllocation);
9319 
9320  return allocator->AllocateMemory(
9321  vkMemReq,
9322  requiresDedicatedAllocation,
9323  prefersDedicatedAllocation,
9324  VK_NULL_HANDLE, // dedicatedBuffer
9325  image, // dedicatedImage
9326  *pAllocationCreateInfo,
9327  suballocType,
9328  pAllocation);
9329 }
9330 
9332 // Public interface
9333 
9334 VkResult vmaCreateAllocator(
9335  const VmaAllocatorCreateInfo* pCreateInfo,
9336  VmaAllocator* pAllocator)
9337 {
9338  VMA_ASSERT(pCreateInfo && pAllocator);
9339  VMA_DEBUG_LOG("vmaCreateAllocator");
9340  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
9341  return VK_SUCCESS;
9342 }
9343 
9344 void vmaDestroyAllocator(
9345  VmaAllocator allocator)
9346 {
9347  if(allocator != VK_NULL_HANDLE)
9348  {
9349  VMA_DEBUG_LOG("vmaDestroyAllocator");
9350  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
9351  vma_delete(&allocationCallbacks, allocator);
9352  }
9353 }
9354 
9356  VmaAllocator allocator,
9357  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
9358 {
9359  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
9360  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
9361 }
9362 
9364  VmaAllocator allocator,
9365  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
9366 {
9367  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
9368  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
9369 }
9370 
9372  VmaAllocator allocator,
9373  uint32_t memoryTypeIndex,
9374  VkMemoryPropertyFlags* pFlags)
9375 {
9376  VMA_ASSERT(allocator && pFlags);
9377  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
9378  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
9379 }
9380 
9382  VmaAllocator allocator,
9383  uint32_t frameIndex)
9384 {
9385  VMA_ASSERT(allocator);
9386  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
9387 
9388  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9389 
9390  allocator->SetCurrentFrameIndex(frameIndex);
9391 }
9392 
9393 void vmaCalculateStats(
9394  VmaAllocator allocator,
9395  VmaStats* pStats)
9396 {
9397  VMA_ASSERT(allocator && pStats);
9398  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9399  allocator->CalculateStats(pStats);
9400 }
9401 
9402 #if VMA_STATS_STRING_ENABLED
9403 
9404 void vmaBuildStatsString(
9405  VmaAllocator allocator,
9406  char** ppStatsString,
9407  VkBool32 detailedMap)
9408 {
9409  VMA_ASSERT(allocator && ppStatsString);
9410  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9411 
9412  VmaStringBuilder sb(allocator);
9413  {
9414  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
9415  json.BeginObject();
9416 
9417  VmaStats stats;
9418  allocator->CalculateStats(&stats);
9419 
9420  json.WriteString("Total");
9421  VmaPrintStatInfo(json, stats.total);
9422 
9423  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
9424  {
9425  json.BeginString("Heap ");
9426  json.ContinueString(heapIndex);
9427  json.EndString();
9428  json.BeginObject();
9429 
9430  json.WriteString("Size");
9431  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
9432 
9433  json.WriteString("Flags");
9434  json.BeginArray(true);
9435  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
9436  {
9437  json.WriteString("DEVICE_LOCAL");
9438  }
9439  json.EndArray();
9440 
9441  if(stats.memoryHeap[heapIndex].blockCount > 0)
9442  {
9443  json.WriteString("Stats");
9444  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
9445  }
9446 
9447  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
9448  {
9449  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
9450  {
9451  json.BeginString("Type ");
9452  json.ContinueString(typeIndex);
9453  json.EndString();
9454 
9455  json.BeginObject();
9456 
9457  json.WriteString("Flags");
9458  json.BeginArray(true);
9459  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
9460  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
9461  {
9462  json.WriteString("DEVICE_LOCAL");
9463  }
9464  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
9465  {
9466  json.WriteString("HOST_VISIBLE");
9467  }
9468  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
9469  {
9470  json.WriteString("HOST_COHERENT");
9471  }
9472  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
9473  {
9474  json.WriteString("HOST_CACHED");
9475  }
9476  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
9477  {
9478  json.WriteString("LAZILY_ALLOCATED");
9479  }
9480  json.EndArray();
9481 
9482  if(stats.memoryType[typeIndex].blockCount > 0)
9483  {
9484  json.WriteString("Stats");
9485  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
9486  }
9487 
9488  json.EndObject();
9489  }
9490  }
9491 
9492  json.EndObject();
9493  }
9494  if(detailedMap == VK_TRUE)
9495  {
9496  allocator->PrintDetailedMap(json);
9497  }
9498 
9499  json.EndObject();
9500  }
9501 
9502  const size_t len = sb.GetLength();
9503  char* const pChars = vma_new_array(allocator, char, len + 1);
9504  if(len > 0)
9505  {
9506  memcpy(pChars, sb.GetData(), len);
9507  }
9508  pChars[len] = '\0';
9509  *ppStatsString = pChars;
9510 }
9511 
9512 void vmaFreeStatsString(
9513  VmaAllocator allocator,
9514  char* pStatsString)
9515 {
9516  if(pStatsString != VMA_NULL)
9517  {
9518  VMA_ASSERT(allocator);
9519  size_t len = strlen(pStatsString);
9520  vma_delete_array(allocator, pStatsString, len + 1);
9521  }
9522 }
9523 
9524 #endif // #if VMA_STATS_STRING_ENABLED
9525 
9526 /*
9527 This function is not protected by any mutex because it just reads immutable data.
9528 */
9529 VkResult vmaFindMemoryTypeIndex(
9530  VmaAllocator allocator,
9531  uint32_t memoryTypeBits,
9532  const VmaAllocationCreateInfo* pAllocationCreateInfo,
9533  uint32_t* pMemoryTypeIndex)
9534 {
9535  VMA_ASSERT(allocator != VK_NULL_HANDLE);
9536  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
9537  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
9538 
9539  if(pAllocationCreateInfo->memoryTypeBits != 0)
9540  {
9541  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
9542  }
9543 
9544  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
9545  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
9546 
9547  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
9548  if(mapped)
9549  {
9550  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
9551  }
9552 
9553  // Convert usage to requiredFlags and preferredFlags.
9554  switch(pAllocationCreateInfo->usage)
9555  {
9557  break;
9559  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
9560  {
9561  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
9562  }
9563  break;
9565  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
9566  break;
9568  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
9569  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
9570  {
9571  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
9572  }
9573  break;
9575  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
9576  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
9577  break;
9578  default:
9579  break;
9580  }
9581 
9582  *pMemoryTypeIndex = UINT32_MAX;
9583  uint32_t minCost = UINT32_MAX;
9584  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
9585  memTypeIndex < allocator->GetMemoryTypeCount();
9586  ++memTypeIndex, memTypeBit <<= 1)
9587  {
9588  // This memory type is acceptable according to memoryTypeBits bitmask.
9589  if((memTypeBit & memoryTypeBits) != 0)
9590  {
9591  const VkMemoryPropertyFlags currFlags =
9592  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
9593  // This memory type contains requiredFlags.
9594  if((requiredFlags & ~currFlags) == 0)
9595  {
9596  // Calculate cost as number of bits from preferredFlags not present in this memory type.
9597  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
9598  // Remember memory type with lowest cost.
9599  if(currCost < minCost)
9600  {
9601  *pMemoryTypeIndex = memTypeIndex;
9602  if(currCost == 0)
9603  {
9604  return VK_SUCCESS;
9605  }
9606  minCost = currCost;
9607  }
9608  }
9609  }
9610  }
9611  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
9612 }
9613 
9615  VmaAllocator allocator,
9616  const VkBufferCreateInfo* pBufferCreateInfo,
9617  const VmaAllocationCreateInfo* pAllocationCreateInfo,
9618  uint32_t* pMemoryTypeIndex)
9619 {
9620  VMA_ASSERT(allocator != VK_NULL_HANDLE);
9621  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
9622  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
9623  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
9624 
9625  const VkDevice hDev = allocator->m_hDevice;
9626  VkBuffer hBuffer = VK_NULL_HANDLE;
9627  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
9628  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
9629  if(res == VK_SUCCESS)
9630  {
9631  VkMemoryRequirements memReq = {};
9632  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
9633  hDev, hBuffer, &memReq);
9634 
9635  res = vmaFindMemoryTypeIndex(
9636  allocator,
9637  memReq.memoryTypeBits,
9638  pAllocationCreateInfo,
9639  pMemoryTypeIndex);
9640 
9641  allocator->GetVulkanFunctions().vkDestroyBuffer(
9642  hDev, hBuffer, allocator->GetAllocationCallbacks());
9643  }
9644  return res;
9645 }
9646 
9648  VmaAllocator allocator,
9649  const VkImageCreateInfo* pImageCreateInfo,
9650  const VmaAllocationCreateInfo* pAllocationCreateInfo,
9651  uint32_t* pMemoryTypeIndex)
9652 {
9653  VMA_ASSERT(allocator != VK_NULL_HANDLE);
9654  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
9655  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
9656  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
9657 
9658  const VkDevice hDev = allocator->m_hDevice;
9659  VkImage hImage = VK_NULL_HANDLE;
9660  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
9661  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
9662  if(res == VK_SUCCESS)
9663  {
9664  VkMemoryRequirements memReq = {};
9665  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
9666  hDev, hImage, &memReq);
9667 
9668  res = vmaFindMemoryTypeIndex(
9669  allocator,
9670  memReq.memoryTypeBits,
9671  pAllocationCreateInfo,
9672  pMemoryTypeIndex);
9673 
9674  allocator->GetVulkanFunctions().vkDestroyImage(
9675  hDev, hImage, allocator->GetAllocationCallbacks());
9676  }
9677  return res;
9678 }
9679 
9680 VkResult vmaCreatePool(
9681  VmaAllocator allocator,
9682  const VmaPoolCreateInfo* pCreateInfo,
9683  VmaPool* pPool)
9684 {
9685  VMA_ASSERT(allocator && pCreateInfo && pPool);
9686 
9687  VMA_DEBUG_LOG("vmaCreatePool");
9688 
9689  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9690 
9691  return allocator->CreatePool(pCreateInfo, pPool);
9692 }
9693 
9694 void vmaDestroyPool(
9695  VmaAllocator allocator,
9696  VmaPool pool)
9697 {
9698  VMA_ASSERT(allocator);
9699 
9700  if(pool == VK_NULL_HANDLE)
9701  {
9702  return;
9703  }
9704 
9705  VMA_DEBUG_LOG("vmaDestroyPool");
9706 
9707  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9708 
9709  allocator->DestroyPool(pool);
9710 }
9711 
9712 void vmaGetPoolStats(
9713  VmaAllocator allocator,
9714  VmaPool pool,
9715  VmaPoolStats* pPoolStats)
9716 {
9717  VMA_ASSERT(allocator && pool && pPoolStats);
9718 
9719  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9720 
9721  allocator->GetPoolStats(pool, pPoolStats);
9722 }
9723 
9725  VmaAllocator allocator,
9726  VmaPool pool,
9727  size_t* pLostAllocationCount)
9728 {
9729  VMA_ASSERT(allocator && pool);
9730 
9731  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9732 
9733  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
9734 }
9735 
9736 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
9737 {
9738  VMA_ASSERT(allocator && pool);
9739 
9740  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9741 
9742  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
9743 
9744  return allocator->CheckPoolCorruption(pool);
9745 }
9746 
9747 VkResult vmaAllocateMemory(
9748  VmaAllocator allocator,
9749  const VkMemoryRequirements* pVkMemoryRequirements,
9750  const VmaAllocationCreateInfo* pCreateInfo,
9751  VmaAllocation* pAllocation,
9752  VmaAllocationInfo* pAllocationInfo)
9753 {
9754  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
9755 
9756  VMA_DEBUG_LOG("vmaAllocateMemory");
9757 
9758  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9759 
9760  VkResult result = allocator->AllocateMemory(
9761  *pVkMemoryRequirements,
9762  false, // requiresDedicatedAllocation
9763  false, // prefersDedicatedAllocation
9764  VK_NULL_HANDLE, // dedicatedBuffer
9765  VK_NULL_HANDLE, // dedicatedImage
9766  *pCreateInfo,
9767  VMA_SUBALLOCATION_TYPE_UNKNOWN,
9768  pAllocation);
9769 
9770  if(pAllocationInfo && result == VK_SUCCESS)
9771  {
9772  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9773  }
9774 
9775  return result;
9776 }
9777 
9779  VmaAllocator allocator,
9780  VkBuffer buffer,
9781  const VmaAllocationCreateInfo* pCreateInfo,
9782  VmaAllocation* pAllocation,
9783  VmaAllocationInfo* pAllocationInfo)
9784 {
9785  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
9786 
9787  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
9788 
9789  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9790 
9791  VkMemoryRequirements vkMemReq = {};
9792  bool requiresDedicatedAllocation = false;
9793  bool prefersDedicatedAllocation = false;
9794  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
9795  requiresDedicatedAllocation,
9796  prefersDedicatedAllocation);
9797 
9798  VkResult result = allocator->AllocateMemory(
9799  vkMemReq,
9800  requiresDedicatedAllocation,
9801  prefersDedicatedAllocation,
9802  buffer, // dedicatedBuffer
9803  VK_NULL_HANDLE, // dedicatedImage
9804  *pCreateInfo,
9805  VMA_SUBALLOCATION_TYPE_BUFFER,
9806  pAllocation);
9807 
9808  if(pAllocationInfo && result == VK_SUCCESS)
9809  {
9810  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9811  }
9812 
9813  return result;
9814 }
9815 
9816 VkResult vmaAllocateMemoryForImage(
9817  VmaAllocator allocator,
9818  VkImage image,
9819  const VmaAllocationCreateInfo* pCreateInfo,
9820  VmaAllocation* pAllocation,
9821  VmaAllocationInfo* pAllocationInfo)
9822 {
9823  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
9824 
9825  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
9826 
9827  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9828 
9829  VkResult result = AllocateMemoryForImage(
9830  allocator,
9831  image,
9832  pCreateInfo,
9833  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
9834  pAllocation);
9835 
9836  if(pAllocationInfo && result == VK_SUCCESS)
9837  {
9838  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9839  }
9840 
9841  return result;
9842 }
9843 
9844 void vmaFreeMemory(
9845  VmaAllocator allocator,
9846  VmaAllocation allocation)
9847 {
9848  VMA_ASSERT(allocator);
9849  VMA_DEBUG_LOG("vmaFreeMemory");
9850  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9851  if(allocation != VK_NULL_HANDLE)
9852  {
9853  allocator->FreeMemory(allocation);
9854  }
9855 }
9856 
9858  VmaAllocator allocator,
9859  VmaAllocation allocation,
9860  VmaAllocationInfo* pAllocationInfo)
9861 {
9862  VMA_ASSERT(allocator && allocation && pAllocationInfo);
9863 
9864  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9865 
9866  allocator->GetAllocationInfo(allocation, pAllocationInfo);
9867 }
9868 
9869 VkBool32 vmaTouchAllocation(
9870  VmaAllocator allocator,
9871  VmaAllocation allocation)
9872 {
9873  VMA_ASSERT(allocator && allocation);
9874 
9875  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9876 
9877  return allocator->TouchAllocation(allocation);
9878 }
9879 
9881  VmaAllocator allocator,
9882  VmaAllocation allocation,
9883  void* pUserData)
9884 {
9885  VMA_ASSERT(allocator && allocation);
9886 
9887  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9888 
9889  allocation->SetUserData(allocator, pUserData);
9890 }
9891 
9893  VmaAllocator allocator,
9894  VmaAllocation* pAllocation)
9895 {
9896  VMA_ASSERT(allocator && pAllocation);
9897 
9898  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
9899 
9900  allocator->CreateLostAllocation(pAllocation);
9901 }
9902 
9903 VkResult vmaMapMemory(
9904  VmaAllocator allocator,
9905  VmaAllocation allocation,
9906  void** ppData)
9907 {
9908  VMA_ASSERT(allocator && allocation && ppData);
9909 
9910  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9911 
9912  return allocator->Map(allocation, ppData);
9913 }
9914 
9915 void vmaUnmapMemory(
9916  VmaAllocator allocator,
9917  VmaAllocation allocation)
9918 {
9919  VMA_ASSERT(allocator && allocation);
9920 
9921  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9922 
9923  allocator->Unmap(allocation);
9924 }
9925 
9926 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
9927 {
9928  VMA_ASSERT(allocator && allocation);
9929 
9930  VMA_DEBUG_LOG("vmaFlushAllocation");
9931 
9932  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9933 
9934  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
9935 }
9936 
9937 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
9938 {
9939  VMA_ASSERT(allocator && allocation);
9940 
9941  VMA_DEBUG_LOG("vmaInvalidateAllocation");
9942 
9943  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9944 
9945  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
9946 }
9947 
9948 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
9949 {
9950  VMA_ASSERT(allocator);
9951 
9952  VMA_DEBUG_LOG("vmaCheckCorruption");
9953 
9954  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9955 
9956  return allocator->CheckCorruption(memoryTypeBits);
9957 }
9958 
9959 VkResult vmaDefragment(
9960  VmaAllocator allocator,
9961  VmaAllocation* pAllocations,
9962  size_t allocationCount,
9963  VkBool32* pAllocationsChanged,
9964  const VmaDefragmentationInfo *pDefragmentationInfo,
9965  VmaDefragmentationStats* pDefragmentationStats)
9966 {
9967  VMA_ASSERT(allocator && pAllocations);
9968 
9969  VMA_DEBUG_LOG("vmaDefragment");
9970 
9971  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9972 
9973  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
9974 }
9975 
9976 VkResult vmaBindBufferMemory(
9977  VmaAllocator allocator,
9978  VmaAllocation allocation,
9979  VkBuffer buffer)
9980 {
9981  VMA_ASSERT(allocator && allocation && buffer);
9982 
9983  VMA_DEBUG_LOG("vmaBindBufferMemory");
9984 
9985  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9986 
9987  return allocator->BindBufferMemory(allocation, buffer);
9988 }
9989 
9990 VkResult vmaBindImageMemory(
9991  VmaAllocator allocator,
9992  VmaAllocation allocation,
9993  VkImage image)
9994 {
9995  VMA_ASSERT(allocator && allocation && image);
9996 
9997  VMA_DEBUG_LOG("vmaBindImageMemory");
9998 
9999  VMA_DEBUG_GLOBAL_MUTEX_LOCK
10000 
10001  return allocator->BindImageMemory(allocation, image);
10002 }
10003 
10004 VkResult vmaCreateBuffer(
10005  VmaAllocator allocator,
10006  const VkBufferCreateInfo* pBufferCreateInfo,
10007  const VmaAllocationCreateInfo* pAllocationCreateInfo,
10008  VkBuffer* pBuffer,
10009  VmaAllocation* pAllocation,
10010  VmaAllocationInfo* pAllocationInfo)
10011 {
10012  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
10013 
10014  VMA_DEBUG_LOG("vmaCreateBuffer");
10015 
10016  VMA_DEBUG_GLOBAL_MUTEX_LOCK
10017 
10018  *pBuffer = VK_NULL_HANDLE;
10019  *pAllocation = VK_NULL_HANDLE;
10020 
10021  // 1. Create VkBuffer.
10022  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
10023  allocator->m_hDevice,
10024  pBufferCreateInfo,
10025  allocator->GetAllocationCallbacks(),
10026  pBuffer);
10027  if(res >= 0)
10028  {
10029  // 2. vkGetBufferMemoryRequirements.
10030  VkMemoryRequirements vkMemReq = {};
10031  bool requiresDedicatedAllocation = false;
10032  bool prefersDedicatedAllocation = false;
10033  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
10034  requiresDedicatedAllocation, prefersDedicatedAllocation);
10035 
10036  // Make sure alignment requirements for specific buffer usages reported
10037  // in Physical Device Properties are included in alignment reported by memory requirements.
10038  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
10039  {
10040  VMA_ASSERT(vkMemReq.alignment %
10041  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
10042  }
10043  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
10044  {
10045  VMA_ASSERT(vkMemReq.alignment %
10046  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
10047  }
10048  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
10049  {
10050  VMA_ASSERT(vkMemReq.alignment %
10051  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
10052  }
10053 
10054  // 3. Allocate memory using allocator.
10055  res = allocator->AllocateMemory(
10056  vkMemReq,
10057  requiresDedicatedAllocation,
10058  prefersDedicatedAllocation,
10059  *pBuffer, // dedicatedBuffer
10060  VK_NULL_HANDLE, // dedicatedImage
10061  *pAllocationCreateInfo,
10062  VMA_SUBALLOCATION_TYPE_BUFFER,
10063  pAllocation);
10064  if(res >= 0)
10065  {
10066  // 3. Bind buffer with memory.
10067  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
10068  if(res >= 0)
10069  {
10070  // All steps succeeded.
10071  #if VMA_STATS_STRING_ENABLED
10072  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
10073  #endif
10074  if(pAllocationInfo != VMA_NULL)
10075  {
10076  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
10077  }
10078  return VK_SUCCESS;
10079  }
10080  allocator->FreeMemory(*pAllocation);
10081  *pAllocation = VK_NULL_HANDLE;
10082  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
10083  *pBuffer = VK_NULL_HANDLE;
10084  return res;
10085  }
10086  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
10087  *pBuffer = VK_NULL_HANDLE;
10088  return res;
10089  }
10090  return res;
10091 }
10092 
10093 void vmaDestroyBuffer(
10094  VmaAllocator allocator,
10095  VkBuffer buffer,
10096  VmaAllocation allocation)
10097 {
10098  VMA_ASSERT(allocator);
10099  VMA_DEBUG_LOG("vmaDestroyBuffer");
10100  VMA_DEBUG_GLOBAL_MUTEX_LOCK
10101  if(buffer != VK_NULL_HANDLE)
10102  {
10103  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
10104  }
10105  if(allocation != VK_NULL_HANDLE)
10106  {
10107  allocator->FreeMemory(allocation);
10108  }
10109 }
10110 
10111 VkResult vmaCreateImage(
10112  VmaAllocator allocator,
10113  const VkImageCreateInfo* pImageCreateInfo,
10114  const VmaAllocationCreateInfo* pAllocationCreateInfo,
10115  VkImage* pImage,
10116  VmaAllocation* pAllocation,
10117  VmaAllocationInfo* pAllocationInfo)
10118 {
10119  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
10120 
10121  VMA_DEBUG_LOG("vmaCreateImage");
10122 
10123  VMA_DEBUG_GLOBAL_MUTEX_LOCK
10124 
10125  *pImage = VK_NULL_HANDLE;
10126  *pAllocation = VK_NULL_HANDLE;
10127 
10128  // 1. Create VkImage.
10129  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
10130  allocator->m_hDevice,
10131  pImageCreateInfo,
10132  allocator->GetAllocationCallbacks(),
10133  pImage);
10134  if(res >= 0)
10135  {
10136  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
10137  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
10138  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
10139 
10140  // 2. Allocate memory using allocator.
10141  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
10142  if(res >= 0)
10143  {
10144  // 3. Bind image with memory.
10145  res = allocator->BindImageMemory(*pAllocation, *pImage);
10146  if(res >= 0)
10147  {
10148  // All steps succeeded.
10149  #if VMA_STATS_STRING_ENABLED
10150  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
10151  #endif
10152  if(pAllocationInfo != VMA_NULL)
10153  {
10154  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
10155  }
10156  return VK_SUCCESS;
10157  }
10158  allocator->FreeMemory(*pAllocation);
10159  *pAllocation = VK_NULL_HANDLE;
10160  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
10161  *pImage = VK_NULL_HANDLE;
10162  return res;
10163  }
10164  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
10165  *pImage = VK_NULL_HANDLE;
10166  return res;
10167  }
10168  return res;
10169 }
10170 
10171 void vmaDestroyImage(
10172  VmaAllocator allocator,
10173  VkImage image,
10174  VmaAllocation allocation)
10175 {
10176  VMA_ASSERT(allocator);
10177  VMA_DEBUG_LOG("vmaDestroyImage");
10178  VMA_DEBUG_GLOBAL_MUTEX_LOCK
10179  if(image != VK_NULL_HANDLE)
10180  {
10181  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
10182  }
10183  if(allocation != VK_NULL_HANDLE)
10184  {
10185  allocator->FreeMemory(allocation);
10186  }
10187 }
10188 
10189 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1271
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1537
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1300
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1283
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1494
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1275
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1883
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1297
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2128
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1713
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1767
Definition: vk_mem_alloc.h:1574
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1264
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1612
Definition: vk_mem_alloc.h:1521
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1309
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1362
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1294
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1525
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1427
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1280
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1426
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2132
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1326
VmaStatInfo total
Definition: vk_mem_alloc.h:1436
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2140
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1596
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2123
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1281
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1206
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1303
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1721
Definition: vk_mem_alloc.h:1715
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1893
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1276
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1633
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1737
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1773
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1262
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1724
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaMemoryUsage
Definition: vk_mem_alloc.h:1472
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2118
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:2136
Definition: vk_mem_alloc.h:1511
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1620
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1279
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1432
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1212
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1233
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1238
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2138
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1607
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1783
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1272
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1415
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1732
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1225
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1581
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1428
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1229
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1727
Definition: vk_mem_alloc.h:1520
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1278
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1602
Definition: vk_mem_alloc.h:1593
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1418
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1274
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:1745
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1312
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1776
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1591
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1626
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1350
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1434
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1561
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1427
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1285
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1227
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1284
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1759
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1277
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1907
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1306
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1427
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1424
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1764
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions...
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1888
Definition: vk_mem_alloc.h:1589
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2134
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1270
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1422
Definition: vk_mem_alloc.h:1477
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1717
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1420
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1282
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1286
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1548
Definition: vk_mem_alloc.h:1504
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1902
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1260
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1273
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1869
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1695
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1428
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1435
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1770
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1428
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1874