Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1614 /*
1615 Define this macro to 0/1 to disable/enable support for recording functionality,
1616 available through VmaAllocatorCreateInfo::pRecordSettings.
1617 */
1618 #ifndef VMA_RECORDING_ENABLED
1619  #ifdef _WIN32
1620  #define VMA_RECORDING_ENABLED 1
1621  #else
1622  #define VMA_RECORDING_ENABLED 0
1623  #endif
1624 #endif
1625 
1626 #ifndef NOMINMAX
1627  #define NOMINMAX // For windows.h
1628 #endif
1629 
1630 #ifndef VULKAN_H_
1631  #include <vulkan/vulkan.h>
1632 #endif
1633 
1634 #if VMA_RECORDING_ENABLED
1635  #include <windows.h>
1636 #endif
1637 
1638 #if !defined(VMA_DEDICATED_ALLOCATION)
1639  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1640  #define VMA_DEDICATED_ALLOCATION 1
1641  #else
1642  #define VMA_DEDICATED_ALLOCATION 0
1643  #endif
1644 #endif
1645 
1655 VK_DEFINE_HANDLE(VmaAllocator)
1656 
1657 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1659  VmaAllocator allocator,
1660  uint32_t memoryType,
1661  VkDeviceMemory memory,
1662  VkDeviceSize size);
1664 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1665  VmaAllocator allocator,
1666  uint32_t memoryType,
1667  VkDeviceMemory memory,
1668  VkDeviceSize size);
1669 
1683 
1713 
1716 typedef VkFlags VmaAllocatorCreateFlags;
1717 
1722 typedef struct VmaVulkanFunctions {
1723  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1724  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1725  PFN_vkAllocateMemory vkAllocateMemory;
1726  PFN_vkFreeMemory vkFreeMemory;
1727  PFN_vkMapMemory vkMapMemory;
1728  PFN_vkUnmapMemory vkUnmapMemory;
1729  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1730  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1731  PFN_vkBindBufferMemory vkBindBufferMemory;
1732  PFN_vkBindImageMemory vkBindImageMemory;
1733  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1734  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1735  PFN_vkCreateBuffer vkCreateBuffer;
1736  PFN_vkDestroyBuffer vkDestroyBuffer;
1737  PFN_vkCreateImage vkCreateImage;
1738  PFN_vkDestroyImage vkDestroyImage;
1739  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1740 #if VMA_DEDICATED_ALLOCATION
1741  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1742  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1743 #endif
1745 
1747 typedef enum VmaRecordFlagBits {
1754 
1757 typedef VkFlags VmaRecordFlags;
1758 
1760 typedef struct VmaRecordSettings
1761 {
1771  const char* pFilePath;
1773 
1776 {
1780 
1781  VkPhysicalDevice physicalDevice;
1783 
1784  VkDevice device;
1786 
1789 
1790  const VkAllocationCallbacks* pAllocationCallbacks;
1792 
1832  const VkDeviceSize* pHeapSizeLimit;
1853 
1855 VkResult vmaCreateAllocator(
1856  const VmaAllocatorCreateInfo* pCreateInfo,
1857  VmaAllocator* pAllocator);
1858 
1860 void vmaDestroyAllocator(
1861  VmaAllocator allocator);
1862 
1868  VmaAllocator allocator,
1869  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1870 
1876  VmaAllocator allocator,
1877  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1878 
1886  VmaAllocator allocator,
1887  uint32_t memoryTypeIndex,
1888  VkMemoryPropertyFlags* pFlags);
1889 
1899  VmaAllocator allocator,
1900  uint32_t frameIndex);
1901 
1904 typedef struct VmaStatInfo
1905 {
1907  uint32_t blockCount;
1913  VkDeviceSize usedBytes;
1915  VkDeviceSize unusedBytes;
1918 } VmaStatInfo;
1919 
1921 typedef struct VmaStats
1922 {
1923  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1924  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1926 } VmaStats;
1927 
1929 void vmaCalculateStats(
1930  VmaAllocator allocator,
1931  VmaStats* pStats);
1932 
1933 #define VMA_STATS_STRING_ENABLED 1
1934 
1935 #if VMA_STATS_STRING_ENABLED
1936 
1938 
1940 void vmaBuildStatsString(
1941  VmaAllocator allocator,
1942  char** ppStatsString,
1943  VkBool32 detailedMap);
1944 
1945 void vmaFreeStatsString(
1946  VmaAllocator allocator,
1947  char* pStatsString);
1948 
1949 #endif // #if VMA_STATS_STRING_ENABLED
1950 
1959 VK_DEFINE_HANDLE(VmaPool)
1960 
1961 typedef enum VmaMemoryUsage
1962 {
2011 } VmaMemoryUsage;
2012 
2027 
2082 
2098 
2108 
2115 
2119 
2121 {
2134  VkMemoryPropertyFlags requiredFlags;
2139  VkMemoryPropertyFlags preferredFlags;
2147  uint32_t memoryTypeBits;
2160  void* pUserData;
2162 
2179 VkResult vmaFindMemoryTypeIndex(
2180  VmaAllocator allocator,
2181  uint32_t memoryTypeBits,
2182  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2183  uint32_t* pMemoryTypeIndex);
2184 
2198  VmaAllocator allocator,
2199  const VkBufferCreateInfo* pBufferCreateInfo,
2200  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2201  uint32_t* pMemoryTypeIndex);
2202 
2216  VmaAllocator allocator,
2217  const VkImageCreateInfo* pImageCreateInfo,
2218  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2219  uint32_t* pMemoryTypeIndex);
2220 
2241 
2258 
2269 
2275 
2278 typedef VkFlags VmaPoolCreateFlags;
2279 
2282 typedef struct VmaPoolCreateInfo {
2297  VkDeviceSize blockSize;
2326 
2329 typedef struct VmaPoolStats {
2332  VkDeviceSize size;
2335  VkDeviceSize unusedSize;
2348  VkDeviceSize unusedRangeSizeMax;
2351  size_t blockCount;
2352 } VmaPoolStats;
2353 
2360 VkResult vmaCreatePool(
2361  VmaAllocator allocator,
2362  const VmaPoolCreateInfo* pCreateInfo,
2363  VmaPool* pPool);
2364 
2367 void vmaDestroyPool(
2368  VmaAllocator allocator,
2369  VmaPool pool);
2370 
2377 void vmaGetPoolStats(
2378  VmaAllocator allocator,
2379  VmaPool pool,
2380  VmaPoolStats* pPoolStats);
2381 
2389  VmaAllocator allocator,
2390  VmaPool pool,
2391  size_t* pLostAllocationCount);
2392 
2407 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2408 
2433 VK_DEFINE_HANDLE(VmaAllocation)
2434 
2435 
2437 typedef struct VmaAllocationInfo {
2442  uint32_t memoryType;
2451  VkDeviceMemory deviceMemory;
2456  VkDeviceSize offset;
2461  VkDeviceSize size;
2475  void* pUserData;
2477 
2488 VkResult vmaAllocateMemory(
2489  VmaAllocator allocator,
2490  const VkMemoryRequirements* pVkMemoryRequirements,
2491  const VmaAllocationCreateInfo* pCreateInfo,
2492  VmaAllocation* pAllocation,
2493  VmaAllocationInfo* pAllocationInfo);
2494 
2514 VkResult vmaAllocateMemoryPages(
2515  VmaAllocator allocator,
2516  const VkMemoryRequirements* pVkMemoryRequirements,
2517  const VmaAllocationCreateInfo* pCreateInfo,
2518  size_t allocationCount,
2519  VmaAllocation* pAllocations,
2520  VmaAllocationInfo* pAllocationInfo);
2521 
2529  VmaAllocator allocator,
2530  VkBuffer buffer,
2531  const VmaAllocationCreateInfo* pCreateInfo,
2532  VmaAllocation* pAllocation,
2533  VmaAllocationInfo* pAllocationInfo);
2534 
2536 VkResult vmaAllocateMemoryForImage(
2537  VmaAllocator allocator,
2538  VkImage image,
2539  const VmaAllocationCreateInfo* pCreateInfo,
2540  VmaAllocation* pAllocation,
2541  VmaAllocationInfo* pAllocationInfo);
2542 
2547 void vmaFreeMemory(
2548  VmaAllocator allocator,
2549  VmaAllocation allocation);
2550 
2561 void vmaFreeMemoryPages(
2562  VmaAllocator allocator,
2563  size_t allocationCount,
2564  VmaAllocation* pAllocations);
2565 
2586 VkResult vmaResizeAllocation(
2587  VmaAllocator allocator,
2588  VmaAllocation allocation,
2589  VkDeviceSize newSize);
2590 
2608  VmaAllocator allocator,
2609  VmaAllocation allocation,
2610  VmaAllocationInfo* pAllocationInfo);
2611 
2626 VkBool32 vmaTouchAllocation(
2627  VmaAllocator allocator,
2628  VmaAllocation allocation);
2629 
2644  VmaAllocator allocator,
2645  VmaAllocation allocation,
2646  void* pUserData);
2647 
2659  VmaAllocator allocator,
2660  VmaAllocation* pAllocation);
2661 
2696 VkResult vmaMapMemory(
2697  VmaAllocator allocator,
2698  VmaAllocation allocation,
2699  void** ppData);
2700 
2705 void vmaUnmapMemory(
2706  VmaAllocator allocator,
2707  VmaAllocation allocation);
2708 
2721 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2722 
2735 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2736 
2753 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2754 
2761 VK_DEFINE_HANDLE(VmaDefragmentationContext)
2762 
2763 typedef enum VmaDefragmentationFlagBits {
2767 typedef VkFlags VmaDefragmentationFlags;
2768 
2773 typedef struct VmaDefragmentationInfo2 {
2797  uint32_t poolCount;
2818  VkDeviceSize maxCpuBytesToMove;
2828  VkDeviceSize maxGpuBytesToMove;
2842  VkCommandBuffer commandBuffer;
2844 
2849 typedef struct VmaDefragmentationInfo {
2854  VkDeviceSize maxBytesToMove;
2861 
2863 typedef struct VmaDefragmentationStats {
2865  VkDeviceSize bytesMoved;
2867  VkDeviceSize bytesFreed;
2873 
2900 VkResult vmaDefragmentationBegin(
2901  VmaAllocator allocator,
2902  const VmaDefragmentationInfo2* pInfo,
2903  VmaDefragmentationStats* pStats,
2904  VmaDefragmentationContext *pContext);
2905 
2911 VkResult vmaDefragmentationEnd(
2912  VmaAllocator allocator,
2913  VmaDefragmentationContext context);
2914 
2955 VkResult vmaDefragment(
2956  VmaAllocator allocator,
2957  VmaAllocation* pAllocations,
2958  size_t allocationCount,
2959  VkBool32* pAllocationsChanged,
2960  const VmaDefragmentationInfo *pDefragmentationInfo,
2961  VmaDefragmentationStats* pDefragmentationStats);
2962 
2975 VkResult vmaBindBufferMemory(
2976  VmaAllocator allocator,
2977  VmaAllocation allocation,
2978  VkBuffer buffer);
2979 
2992 VkResult vmaBindImageMemory(
2993  VmaAllocator allocator,
2994  VmaAllocation allocation,
2995  VkImage image);
2996 
3023 VkResult vmaCreateBuffer(
3024  VmaAllocator allocator,
3025  const VkBufferCreateInfo* pBufferCreateInfo,
3026  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3027  VkBuffer* pBuffer,
3028  VmaAllocation* pAllocation,
3029  VmaAllocationInfo* pAllocationInfo);
3030 
3042 void vmaDestroyBuffer(
3043  VmaAllocator allocator,
3044  VkBuffer buffer,
3045  VmaAllocation allocation);
3046 
3048 VkResult vmaCreateImage(
3049  VmaAllocator allocator,
3050  const VkImageCreateInfo* pImageCreateInfo,
3051  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3052  VkImage* pImage,
3053  VmaAllocation* pAllocation,
3054  VmaAllocationInfo* pAllocationInfo);
3055 
3067 void vmaDestroyImage(
3068  VmaAllocator allocator,
3069  VkImage image,
3070  VmaAllocation allocation);
3071 
3072 #ifdef __cplusplus
3073 }
3074 #endif
3075 
3076 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3077 
3078 // For Visual Studio IntelliSense.
3079 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3080 #define VMA_IMPLEMENTATION
3081 #endif
3082 
3083 #ifdef VMA_IMPLEMENTATION
3084 #undef VMA_IMPLEMENTATION
3085 
3086 #include <cstdint>
3087 #include <cstdlib>
3088 #include <cstring>
3089 
3090 /*******************************************************************************
3091 CONFIGURATION SECTION
3092 
3093 Define some of these macros before each #include of this header or change them
3094 here if you need other then default behavior depending on your environment.
3095 */
3096 
3097 /*
3098 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3099 internally, like:
3100 
3101  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3102 
3103 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3104 VmaAllocatorCreateInfo::pVulkanFunctions.
3105 */
3106 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3107 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3108 #endif
3109 
3110 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3111 //#define VMA_USE_STL_CONTAINERS 1
3112 
3113 /* Set this macro to 1 to make the library including and using STL containers:
3114 std::pair, std::vector, std::list, std::unordered_map.
3115 
3116 Set it to 0 or undefined to make the library using its own implementation of
3117 the containers.
3118 */
3119 #if VMA_USE_STL_CONTAINERS
3120  #define VMA_USE_STL_VECTOR 1
3121  #define VMA_USE_STL_UNORDERED_MAP 1
3122  #define VMA_USE_STL_LIST 1
3123 #endif
3124 
3125 #ifndef VMA_USE_STL_SHARED_MUTEX
3126  // Minimum Visual Studio 2015 Update 2
3127  #if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918
3128  #define VMA_USE_STL_SHARED_MUTEX 1
3129  #endif
3130 #endif
3131 
3132 #if VMA_USE_STL_VECTOR
3133  #include <vector>
3134 #endif
3135 
3136 #if VMA_USE_STL_UNORDERED_MAP
3137  #include <unordered_map>
3138 #endif
3139 
3140 #if VMA_USE_STL_LIST
3141  #include <list>
3142 #endif
3143 
3144 /*
3145 Following headers are used in this CONFIGURATION section only, so feel free to
3146 remove them if not needed.
3147 */
3148 #include <cassert> // for assert
3149 #include <algorithm> // for min, max
3150 #include <mutex>
3151 #include <atomic> // for std::atomic
3152 
3153 #ifndef VMA_NULL
3154  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3155  #define VMA_NULL nullptr
3156 #endif
3157 
3158 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3159 #include <cstdlib>
3160 void *aligned_alloc(size_t alignment, size_t size)
3161 {
3162  // alignment must be >= sizeof(void*)
3163  if(alignment < sizeof(void*))
3164  {
3165  alignment = sizeof(void*);
3166  }
3167 
3168  return memalign(alignment, size);
3169 }
3170 #elif defined(__APPLE__) || defined(__ANDROID__)
3171 #include <cstdlib>
3172 void *aligned_alloc(size_t alignment, size_t size)
3173 {
3174  // alignment must be >= sizeof(void*)
3175  if(alignment < sizeof(void*))
3176  {
3177  alignment = sizeof(void*);
3178  }
3179 
3180  void *pointer;
3181  if(posix_memalign(&pointer, alignment, size) == 0)
3182  return pointer;
3183  return VMA_NULL;
3184 }
3185 #endif
3186 
3187 // If your compiler is not compatible with C++11 and definition of
3188 // aligned_alloc() function is missing, uncommeting following line may help:
3189 
3190 //#include <malloc.h>
3191 
3192 // Normal assert to check for programmer's errors, especially in Debug configuration.
3193 #ifndef VMA_ASSERT
3194  #ifdef _DEBUG
3195  #define VMA_ASSERT(expr) assert(expr)
3196  #else
3197  #define VMA_ASSERT(expr)
3198  #endif
3199 #endif
3200 
3201 // Assert that will be called very often, like inside data structures e.g. operator[].
3202 // Making it non-empty can make program slow.
3203 #ifndef VMA_HEAVY_ASSERT
3204  #ifdef _DEBUG
3205  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3206  #else
3207  #define VMA_HEAVY_ASSERT(expr)
3208  #endif
3209 #endif
3210 
3211 #ifndef VMA_ALIGN_OF
3212  #define VMA_ALIGN_OF(type) (__alignof(type))
3213 #endif
3214 
3215 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3216  #if defined(_WIN32)
3217  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3218  #else
3219  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3220  #endif
3221 #endif
3222 
3223 #ifndef VMA_SYSTEM_FREE
3224  #if defined(_WIN32)
3225  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3226  #else
3227  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3228  #endif
3229 #endif
3230 
3231 #ifndef VMA_MIN
3232  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3233 #endif
3234 
3235 #ifndef VMA_MAX
3236  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3237 #endif
3238 
3239 #ifndef VMA_SWAP
3240  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3241 #endif
3242 
3243 #ifndef VMA_SORT
3244  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3245 #endif
3246 
3247 #ifndef VMA_DEBUG_LOG
3248  #define VMA_DEBUG_LOG(format, ...)
3249  /*
3250  #define VMA_DEBUG_LOG(format, ...) do { \
3251  printf(format, __VA_ARGS__); \
3252  printf("\n"); \
3253  } while(false)
3254  */
3255 #endif
3256 
3257 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3258 #if VMA_STATS_STRING_ENABLED
3259  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3260  {
3261  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3262  }
3263  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3264  {
3265  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3266  }
3267  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3268  {
3269  snprintf(outStr, strLen, "%p", ptr);
3270  }
3271 #endif
3272 
3273 #ifndef VMA_MUTEX
3274  class VmaMutex
3275  {
3276  public:
3277  void Lock() { m_Mutex.lock(); }
3278  void Unlock() { m_Mutex.unlock(); }
3279  private:
3280  std::mutex m_Mutex;
3281  };
3282  #define VMA_MUTEX VmaMutex
3283 #endif
3284 
3285 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3286 #ifndef VMA_RW_MUTEX
3287  #if VMA_USE_STL_SHARED_MUTEX
3288  // Use std::shared_mutex from C++17.
3289  #include <shared_mutex>
3290  class VmaRWMutex
3291  {
3292  public:
3293  void LockRead() { m_Mutex.lock_shared(); }
3294  void UnlockRead() { m_Mutex.unlock_shared(); }
3295  void LockWrite() { m_Mutex.lock(); }
3296  void UnlockWrite() { m_Mutex.unlock(); }
3297  private:
3298  std::shared_mutex m_Mutex;
3299  };
3300  #define VMA_RW_MUTEX VmaRWMutex
3301  #elif defined(_WIN32)
3302  // Use SRWLOCK from WinAPI.
3303  class VmaRWMutex
3304  {
3305  public:
3306  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3307  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3308  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3309  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3310  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3311  private:
3312  SRWLOCK m_Lock;
3313  };
3314  #define VMA_RW_MUTEX VmaRWMutex
3315  #else
3316  // Less efficient fallback: Use normal mutex.
3317  class VmaRWMutex
3318  {
3319  public:
3320  void LockRead() { m_Mutex.Lock(); }
3321  void UnlockRead() { m_Mutex.Unlock(); }
3322  void LockWrite() { m_Mutex.Lock(); }
3323  void UnlockWrite() { m_Mutex.Unlock(); }
3324  private:
3325  VMA_MUTEX m_Mutex;
3326  };
3327  #define VMA_RW_MUTEX VmaRWMutex
3328  #endif // #if VMA_USE_STL_SHARED_MUTEX
3329 #endif // #ifndef VMA_RW_MUTEX
3330 
3331 /*
3332 If providing your own implementation, you need to implement a subset of std::atomic:
3333 
3334 - Constructor(uint32_t desired)
3335 - uint32_t load() const
3336 - void store(uint32_t desired)
3337 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3338 */
3339 #ifndef VMA_ATOMIC_UINT32
3340  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3341 #endif
3342 
3343 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3344 
3348  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3349 #endif
3350 
3351 #ifndef VMA_DEBUG_ALIGNMENT
3352 
3356  #define VMA_DEBUG_ALIGNMENT (1)
3357 #endif
3358 
3359 #ifndef VMA_DEBUG_MARGIN
3360 
3364  #define VMA_DEBUG_MARGIN (0)
3365 #endif
3366 
3367 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3368 
3372  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3373 #endif
3374 
3375 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3376 
3381  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3382 #endif
3383 
3384 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3385 
3389  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3390 #endif
3391 
3392 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3393 
3397  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3398 #endif
3399 
3400 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3401  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3403 #endif
3404 
3405 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3406  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3408 #endif
3409 
3410 #ifndef VMA_CLASS_NO_COPY
3411  #define VMA_CLASS_NO_COPY(className) \
3412  private: \
3413  className(const className&) = delete; \
3414  className& operator=(const className&) = delete;
3415 #endif
3416 
3417 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3418 
3419 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3420 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3421 
3422 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3423 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3424 
3425 /*******************************************************************************
3426 END OF CONFIGURATION
3427 */
3428 
3429 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3430 
3431 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3432  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3433 
3434 // Returns number of bits set to 1 in (v).
3435 static inline uint32_t VmaCountBitsSet(uint32_t v)
3436 {
3437  uint32_t c = v - ((v >> 1) & 0x55555555);
3438  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3439  c = ((c >> 4) + c) & 0x0F0F0F0F;
3440  c = ((c >> 8) + c) & 0x00FF00FF;
3441  c = ((c >> 16) + c) & 0x0000FFFF;
3442  return c;
3443 }
3444 
3445 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3446 // Use types like uint32_t, uint64_t as T.
3447 template <typename T>
3448 static inline T VmaAlignUp(T val, T align)
3449 {
3450  return (val + align - 1) / align * align;
3451 }
3452 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3453 // Use types like uint32_t, uint64_t as T.
3454 template <typename T>
3455 static inline T VmaAlignDown(T val, T align)
3456 {
3457  return val / align * align;
3458 }
3459 
3460 // Division with mathematical rounding to nearest number.
3461 template <typename T>
3462 static inline T VmaRoundDiv(T x, T y)
3463 {
3464  return (x + (y / (T)2)) / y;
3465 }
3466 
3467 /*
3468 Returns true if given number is a power of two.
3469 T must be unsigned integer number or signed integer but always nonnegative.
3470 For 0 returns true.
3471 */
3472 template <typename T>
3473 inline bool VmaIsPow2(T x)
3474 {
3475  return (x & (x-1)) == 0;
3476 }
3477 
3478 // Returns smallest power of 2 greater or equal to v.
3479 static inline uint32_t VmaNextPow2(uint32_t v)
3480 {
3481  v--;
3482  v |= v >> 1;
3483  v |= v >> 2;
3484  v |= v >> 4;
3485  v |= v >> 8;
3486  v |= v >> 16;
3487  v++;
3488  return v;
3489 }
3490 static inline uint64_t VmaNextPow2(uint64_t v)
3491 {
3492  v--;
3493  v |= v >> 1;
3494  v |= v >> 2;
3495  v |= v >> 4;
3496  v |= v >> 8;
3497  v |= v >> 16;
3498  v |= v >> 32;
3499  v++;
3500  return v;
3501 }
3502 
3503 // Returns largest power of 2 less or equal to v.
3504 static inline uint32_t VmaPrevPow2(uint32_t v)
3505 {
3506  v |= v >> 1;
3507  v |= v >> 2;
3508  v |= v >> 4;
3509  v |= v >> 8;
3510  v |= v >> 16;
3511  v = v ^ (v >> 1);
3512  return v;
3513 }
3514 static inline uint64_t VmaPrevPow2(uint64_t v)
3515 {
3516  v |= v >> 1;
3517  v |= v >> 2;
3518  v |= v >> 4;
3519  v |= v >> 8;
3520  v |= v >> 16;
3521  v |= v >> 32;
3522  v = v ^ (v >> 1);
3523  return v;
3524 }
3525 
3526 static inline bool VmaStrIsEmpty(const char* pStr)
3527 {
3528  return pStr == VMA_NULL || *pStr == '\0';
3529 }
3530 
3531 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3532 {
3533  switch(algorithm)
3534  {
3536  return "Linear";
3538  return "Buddy";
3539  case 0:
3540  return "Default";
3541  default:
3542  VMA_ASSERT(0);
3543  return "";
3544  }
3545 }
3546 
3547 #ifndef VMA_SORT
3548 
3549 template<typename Iterator, typename Compare>
3550 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3551 {
3552  Iterator centerValue = end; --centerValue;
3553  Iterator insertIndex = beg;
3554  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3555  {
3556  if(cmp(*memTypeIndex, *centerValue))
3557  {
3558  if(insertIndex != memTypeIndex)
3559  {
3560  VMA_SWAP(*memTypeIndex, *insertIndex);
3561  }
3562  ++insertIndex;
3563  }
3564  }
3565  if(insertIndex != centerValue)
3566  {
3567  VMA_SWAP(*insertIndex, *centerValue);
3568  }
3569  return insertIndex;
3570 }
3571 
3572 template<typename Iterator, typename Compare>
3573 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3574 {
3575  if(beg < end)
3576  {
3577  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3578  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3579  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3580  }
3581 }
3582 
3583 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3584 
3585 #endif // #ifndef VMA_SORT
3586 
3587 /*
3588 Returns true if two memory blocks occupy overlapping pages.
3589 ResourceA must be in less memory offset than ResourceB.
3590 
3591 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3592 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3593 */
3594 static inline bool VmaBlocksOnSamePage(
3595  VkDeviceSize resourceAOffset,
3596  VkDeviceSize resourceASize,
3597  VkDeviceSize resourceBOffset,
3598  VkDeviceSize pageSize)
3599 {
3600  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3601  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3602  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3603  VkDeviceSize resourceBStart = resourceBOffset;
3604  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3605  return resourceAEndPage == resourceBStartPage;
3606 }
3607 
3608 enum VmaSuballocationType
3609 {
3610  VMA_SUBALLOCATION_TYPE_FREE = 0,
3611  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3612  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3613  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3614  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3615  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3616  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3617 };
3618 
3619 /*
3620 Returns true if given suballocation types could conflict and must respect
3621 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3622 or linear image and another one is optimal image. If type is unknown, behave
3623 conservatively.
3624 */
3625 static inline bool VmaIsBufferImageGranularityConflict(
3626  VmaSuballocationType suballocType1,
3627  VmaSuballocationType suballocType2)
3628 {
3629  if(suballocType1 > suballocType2)
3630  {
3631  VMA_SWAP(suballocType1, suballocType2);
3632  }
3633 
3634  switch(suballocType1)
3635  {
3636  case VMA_SUBALLOCATION_TYPE_FREE:
3637  return false;
3638  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3639  return true;
3640  case VMA_SUBALLOCATION_TYPE_BUFFER:
3641  return
3642  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3643  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3644  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3645  return
3646  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3647  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3648  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3649  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3650  return
3651  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3652  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3653  return false;
3654  default:
3655  VMA_ASSERT(0);
3656  return true;
3657  }
3658 }
3659 
3660 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3661 {
3662  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3663  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3664  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3665  {
3666  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3667  }
3668 }
3669 
3670 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3671 {
3672  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3673  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3674  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3675  {
3676  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3677  {
3678  return false;
3679  }
3680  }
3681  return true;
3682 }
3683 
3684 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3685 struct VmaMutexLock
3686 {
3687  VMA_CLASS_NO_COPY(VmaMutexLock)
3688 public:
3689  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3690  m_pMutex(useMutex ? &mutex : VMA_NULL)
3691  { if(m_pMutex) { m_pMutex->Lock(); } }
3692  ~VmaMutexLock()
3693  { if(m_pMutex) { m_pMutex->Unlock(); } }
3694 private:
3695  VMA_MUTEX* m_pMutex;
3696 };
3697 
3698 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3699 struct VmaMutexLockRead
3700 {
3701  VMA_CLASS_NO_COPY(VmaMutexLockRead)
3702 public:
3703  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3704  m_pMutex(useMutex ? &mutex : VMA_NULL)
3705  { if(m_pMutex) { m_pMutex->LockRead(); } }
3706  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
3707 private:
3708  VMA_RW_MUTEX* m_pMutex;
3709 };
3710 
3711 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3712 struct VmaMutexLockWrite
3713 {
3714  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3715 public:
3716  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
3717  m_pMutex(useMutex ? &mutex : VMA_NULL)
3718  { if(m_pMutex) { m_pMutex->LockWrite(); } }
3719  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3720 private:
3721  VMA_RW_MUTEX* m_pMutex;
3722 };
3723 
3724 #if VMA_DEBUG_GLOBAL_MUTEX
3725  static VMA_MUTEX gDebugGlobalMutex;
3726  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3727 #else
3728  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3729 #endif
3730 
3731 // Minimum size of a free suballocation to register it in the free suballocation collection.
3732 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3733 
3734 /*
3735 Performs binary search and returns iterator to first element that is greater or
3736 equal to (key), according to comparison (cmp).
3737 
3738 Cmp should return true if first argument is less than second argument.
3739 
3740 Returned value is the found element, if present in the collection or place where
3741 new element with value (key) should be inserted.
3742 */
3743 template <typename CmpLess, typename IterT, typename KeyT>
3744 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3745 {
3746  size_t down = 0, up = (end - beg);
3747  while(down < up)
3748  {
3749  const size_t mid = (down + up) / 2;
3750  if(cmp(*(beg+mid), key))
3751  {
3752  down = mid + 1;
3753  }
3754  else
3755  {
3756  up = mid;
3757  }
3758  }
3759  return beg + down;
3760 }
3761 
3762 /*
3763 Returns true if all pointers in the array are not-null and unique.
3764 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3765 T must be pointer type, e.g. VmaAllocation, VmaPool.
3766 */
3767 template<typename T>
3768 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3769 {
3770  for(uint32_t i = 0; i < count; ++i)
3771  {
3772  const T iPtr = arr[i];
3773  if(iPtr == VMA_NULL)
3774  {
3775  return false;
3776  }
3777  for(uint32_t j = i + 1; j < count; ++j)
3778  {
3779  if(iPtr == arr[j])
3780  {
3781  return false;
3782  }
3783  }
3784  }
3785  return true;
3786 }
3787 
3789 // Memory allocation
3790 
3791 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3792 {
3793  if((pAllocationCallbacks != VMA_NULL) &&
3794  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3795  {
3796  return (*pAllocationCallbacks->pfnAllocation)(
3797  pAllocationCallbacks->pUserData,
3798  size,
3799  alignment,
3800  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3801  }
3802  else
3803  {
3804  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3805  }
3806 }
3807 
3808 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3809 {
3810  if((pAllocationCallbacks != VMA_NULL) &&
3811  (pAllocationCallbacks->pfnFree != VMA_NULL))
3812  {
3813  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3814  }
3815  else
3816  {
3817  VMA_SYSTEM_FREE(ptr);
3818  }
3819 }
3820 
3821 template<typename T>
3822 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3823 {
3824  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3825 }
3826 
3827 template<typename T>
3828 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3829 {
3830  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3831 }
3832 
3833 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3834 
3835 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3836 
3837 template<typename T>
3838 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3839 {
3840  ptr->~T();
3841  VmaFree(pAllocationCallbacks, ptr);
3842 }
3843 
3844 template<typename T>
3845 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3846 {
3847  if(ptr != VMA_NULL)
3848  {
3849  for(size_t i = count; i--; )
3850  {
3851  ptr[i].~T();
3852  }
3853  VmaFree(pAllocationCallbacks, ptr);
3854  }
3855 }
3856 
3857 // STL-compatible allocator.
3858 template<typename T>
3859 class VmaStlAllocator
3860 {
3861 public:
3862  const VkAllocationCallbacks* const m_pCallbacks;
3863  typedef T value_type;
3864 
3865  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3866  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3867 
3868  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3869  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3870 
3871  template<typename U>
3872  bool operator==(const VmaStlAllocator<U>& rhs) const
3873  {
3874  return m_pCallbacks == rhs.m_pCallbacks;
3875  }
3876  template<typename U>
3877  bool operator!=(const VmaStlAllocator<U>& rhs) const
3878  {
3879  return m_pCallbacks != rhs.m_pCallbacks;
3880  }
3881 
3882  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3883 };
3884 
3885 #if VMA_USE_STL_VECTOR
3886 
3887 #define VmaVector std::vector
3888 
3889 template<typename T, typename allocatorT>
3890 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3891 {
3892  vec.insert(vec.begin() + index, item);
3893 }
3894 
3895 template<typename T, typename allocatorT>
3896 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3897 {
3898  vec.erase(vec.begin() + index);
3899 }
3900 
3901 #else // #if VMA_USE_STL_VECTOR
3902 
3903 /* Class with interface compatible with subset of std::vector.
3904 T must be POD because constructors and destructors are not called and memcpy is
3905 used for these objects. */
3906 template<typename T, typename AllocatorT>
3907 class VmaVector
3908 {
3909 public:
3910  typedef T value_type;
3911 
3912  VmaVector(const AllocatorT& allocator) :
3913  m_Allocator(allocator),
3914  m_pArray(VMA_NULL),
3915  m_Count(0),
3916  m_Capacity(0)
3917  {
3918  }
3919 
3920  VmaVector(size_t count, const AllocatorT& allocator) :
3921  m_Allocator(allocator),
3922  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3923  m_Count(count),
3924  m_Capacity(count)
3925  {
3926  }
3927 
3928  VmaVector(const VmaVector<T, AllocatorT>& src) :
3929  m_Allocator(src.m_Allocator),
3930  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3931  m_Count(src.m_Count),
3932  m_Capacity(src.m_Count)
3933  {
3934  if(m_Count != 0)
3935  {
3936  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3937  }
3938  }
3939 
3940  ~VmaVector()
3941  {
3942  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3943  }
3944 
3945  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3946  {
3947  if(&rhs != this)
3948  {
3949  resize(rhs.m_Count);
3950  if(m_Count != 0)
3951  {
3952  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3953  }
3954  }
3955  return *this;
3956  }
3957 
3958  bool empty() const { return m_Count == 0; }
3959  size_t size() const { return m_Count; }
3960  T* data() { return m_pArray; }
3961  const T* data() const { return m_pArray; }
3962 
3963  T& operator[](size_t index)
3964  {
3965  VMA_HEAVY_ASSERT(index < m_Count);
3966  return m_pArray[index];
3967  }
3968  const T& operator[](size_t index) const
3969  {
3970  VMA_HEAVY_ASSERT(index < m_Count);
3971  return m_pArray[index];
3972  }
3973 
3974  T& front()
3975  {
3976  VMA_HEAVY_ASSERT(m_Count > 0);
3977  return m_pArray[0];
3978  }
3979  const T& front() const
3980  {
3981  VMA_HEAVY_ASSERT(m_Count > 0);
3982  return m_pArray[0];
3983  }
3984  T& back()
3985  {
3986  VMA_HEAVY_ASSERT(m_Count > 0);
3987  return m_pArray[m_Count - 1];
3988  }
3989  const T& back() const
3990  {
3991  VMA_HEAVY_ASSERT(m_Count > 0);
3992  return m_pArray[m_Count - 1];
3993  }
3994 
3995  void reserve(size_t newCapacity, bool freeMemory = false)
3996  {
3997  newCapacity = VMA_MAX(newCapacity, m_Count);
3998 
3999  if((newCapacity < m_Capacity) && !freeMemory)
4000  {
4001  newCapacity = m_Capacity;
4002  }
4003 
4004  if(newCapacity != m_Capacity)
4005  {
4006  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4007  if(m_Count != 0)
4008  {
4009  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4010  }
4011  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4012  m_Capacity = newCapacity;
4013  m_pArray = newArray;
4014  }
4015  }
4016 
4017  void resize(size_t newCount, bool freeMemory = false)
4018  {
4019  size_t newCapacity = m_Capacity;
4020  if(newCount > m_Capacity)
4021  {
4022  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4023  }
4024  else if(freeMemory)
4025  {
4026  newCapacity = newCount;
4027  }
4028 
4029  if(newCapacity != m_Capacity)
4030  {
4031  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4032  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4033  if(elementsToCopy != 0)
4034  {
4035  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4036  }
4037  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4038  m_Capacity = newCapacity;
4039  m_pArray = newArray;
4040  }
4041 
4042  m_Count = newCount;
4043  }
4044 
4045  void clear(bool freeMemory = false)
4046  {
4047  resize(0, freeMemory);
4048  }
4049 
4050  void insert(size_t index, const T& src)
4051  {
4052  VMA_HEAVY_ASSERT(index <= m_Count);
4053  const size_t oldCount = size();
4054  resize(oldCount + 1);
4055  if(index < oldCount)
4056  {
4057  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4058  }
4059  m_pArray[index] = src;
4060  }
4061 
4062  void remove(size_t index)
4063  {
4064  VMA_HEAVY_ASSERT(index < m_Count);
4065  const size_t oldCount = size();
4066  if(index < oldCount - 1)
4067  {
4068  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4069  }
4070  resize(oldCount - 1);
4071  }
4072 
4073  void push_back(const T& src)
4074  {
4075  const size_t newIndex = size();
4076  resize(newIndex + 1);
4077  m_pArray[newIndex] = src;
4078  }
4079 
4080  void pop_back()
4081  {
4082  VMA_HEAVY_ASSERT(m_Count > 0);
4083  resize(size() - 1);
4084  }
4085 
4086  void push_front(const T& src)
4087  {
4088  insert(0, src);
4089  }
4090 
4091  void pop_front()
4092  {
4093  VMA_HEAVY_ASSERT(m_Count > 0);
4094  remove(0);
4095  }
4096 
4097  typedef T* iterator;
4098 
4099  iterator begin() { return m_pArray; }
4100  iterator end() { return m_pArray + m_Count; }
4101 
4102 private:
4103  AllocatorT m_Allocator;
4104  T* m_pArray;
4105  size_t m_Count;
4106  size_t m_Capacity;
4107 };
4108 
4109 template<typename T, typename allocatorT>
4110 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4111 {
4112  vec.insert(index, item);
4113 }
4114 
4115 template<typename T, typename allocatorT>
4116 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4117 {
4118  vec.remove(index);
4119 }
4120 
4121 #endif // #if VMA_USE_STL_VECTOR
4122 
4123 template<typename CmpLess, typename VectorT>
4124 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4125 {
4126  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4127  vector.data(),
4128  vector.data() + vector.size(),
4129  value,
4130  CmpLess()) - vector.data();
4131  VmaVectorInsert(vector, indexToInsert, value);
4132  return indexToInsert;
4133 }
4134 
4135 template<typename CmpLess, typename VectorT>
4136 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4137 {
4138  CmpLess comparator;
4139  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4140  vector.begin(),
4141  vector.end(),
4142  value,
4143  comparator);
4144  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4145  {
4146  size_t indexToRemove = it - vector.begin();
4147  VmaVectorRemove(vector, indexToRemove);
4148  return true;
4149  }
4150  return false;
4151 }
4152 
4153 template<typename CmpLess, typename IterT, typename KeyT>
4154 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
4155 {
4156  CmpLess comparator;
4157  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4158  beg, end, value, comparator);
4159  if(it == end ||
4160  (!comparator(*it, value) && !comparator(value, *it)))
4161  {
4162  return it;
4163  }
4164  return end;
4165 }
4166 
4168 // class VmaPoolAllocator
4169 
4170 /*
4171 Allocator for objects of type T using a list of arrays (pools) to speed up
4172 allocation. Number of elements that can be allocated is not bounded because
4173 allocator can create multiple blocks.
4174 */
4175 template<typename T>
4176 class VmaPoolAllocator
4177 {
4178  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4179 public:
4180  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
4181  ~VmaPoolAllocator();
4182  void Clear();
4183  T* Alloc();
4184  void Free(T* ptr);
4185 
4186 private:
4187  union Item
4188  {
4189  uint32_t NextFreeIndex;
4190  T Value;
4191  };
4192 
4193  struct ItemBlock
4194  {
4195  Item* pItems;
4196  uint32_t FirstFreeIndex;
4197  };
4198 
4199  const VkAllocationCallbacks* m_pAllocationCallbacks;
4200  size_t m_ItemsPerBlock;
4201  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4202 
4203  ItemBlock& CreateNewBlock();
4204 };
4205 
4206 template<typename T>
4207 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
4208  m_pAllocationCallbacks(pAllocationCallbacks),
4209  m_ItemsPerBlock(itemsPerBlock),
4210  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4211 {
4212  VMA_ASSERT(itemsPerBlock > 0);
4213 }
4214 
4215 template<typename T>
4216 VmaPoolAllocator<T>::~VmaPoolAllocator()
4217 {
4218  Clear();
4219 }
4220 
4221 template<typename T>
4222 void VmaPoolAllocator<T>::Clear()
4223 {
4224  for(size_t i = m_ItemBlocks.size(); i--; )
4225  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
4226  m_ItemBlocks.clear();
4227 }
4228 
4229 template<typename T>
4230 T* VmaPoolAllocator<T>::Alloc()
4231 {
4232  for(size_t i = m_ItemBlocks.size(); i--; )
4233  {
4234  ItemBlock& block = m_ItemBlocks[i];
4235  // This block has some free items: Use first one.
4236  if(block.FirstFreeIndex != UINT32_MAX)
4237  {
4238  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4239  block.FirstFreeIndex = pItem->NextFreeIndex;
4240  return &pItem->Value;
4241  }
4242  }
4243 
4244  // No block has free item: Create new one and use it.
4245  ItemBlock& newBlock = CreateNewBlock();
4246  Item* const pItem = &newBlock.pItems[0];
4247  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4248  return &pItem->Value;
4249 }
4250 
4251 template<typename T>
4252 void VmaPoolAllocator<T>::Free(T* ptr)
4253 {
4254  // Search all memory blocks to find ptr.
4255  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
4256  {
4257  ItemBlock& block = m_ItemBlocks[i];
4258 
4259  // Casting to union.
4260  Item* pItemPtr;
4261  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4262 
4263  // Check if pItemPtr is in address range of this block.
4264  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
4265  {
4266  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4267  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4268  block.FirstFreeIndex = index;
4269  return;
4270  }
4271  }
4272  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4273 }
4274 
4275 template<typename T>
4276 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4277 {
4278  ItemBlock newBlock = {
4279  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
4280 
4281  m_ItemBlocks.push_back(newBlock);
4282 
4283  // Setup singly-linked list of all free items in this block.
4284  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
4285  newBlock.pItems[i].NextFreeIndex = i + 1;
4286  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
4287  return m_ItemBlocks.back();
4288 }
4289 
4291 // class VmaRawList, VmaList
4292 
4293 #if VMA_USE_STL_LIST
4294 
4295 #define VmaList std::list
4296 
4297 #else // #if VMA_USE_STL_LIST
4298 
4299 template<typename T>
4300 struct VmaListItem
4301 {
4302  VmaListItem* pPrev;
4303  VmaListItem* pNext;
4304  T Value;
4305 };
4306 
4307 // Doubly linked list.
4308 template<typename T>
4309 class VmaRawList
4310 {
4311  VMA_CLASS_NO_COPY(VmaRawList)
4312 public:
4313  typedef VmaListItem<T> ItemType;
4314 
4315  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4316  ~VmaRawList();
4317  void Clear();
4318 
4319  size_t GetCount() const { return m_Count; }
4320  bool IsEmpty() const { return m_Count == 0; }
4321 
4322  ItemType* Front() { return m_pFront; }
4323  const ItemType* Front() const { return m_pFront; }
4324  ItemType* Back() { return m_pBack; }
4325  const ItemType* Back() const { return m_pBack; }
4326 
4327  ItemType* PushBack();
4328  ItemType* PushFront();
4329  ItemType* PushBack(const T& value);
4330  ItemType* PushFront(const T& value);
4331  void PopBack();
4332  void PopFront();
4333 
4334  // Item can be null - it means PushBack.
4335  ItemType* InsertBefore(ItemType* pItem);
4336  // Item can be null - it means PushFront.
4337  ItemType* InsertAfter(ItemType* pItem);
4338 
4339  ItemType* InsertBefore(ItemType* pItem, const T& value);
4340  ItemType* InsertAfter(ItemType* pItem, const T& value);
4341 
4342  void Remove(ItemType* pItem);
4343 
4344 private:
4345  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4346  VmaPoolAllocator<ItemType> m_ItemAllocator;
4347  ItemType* m_pFront;
4348  ItemType* m_pBack;
4349  size_t m_Count;
4350 };
4351 
4352 template<typename T>
4353 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4354  m_pAllocationCallbacks(pAllocationCallbacks),
4355  m_ItemAllocator(pAllocationCallbacks, 128),
4356  m_pFront(VMA_NULL),
4357  m_pBack(VMA_NULL),
4358  m_Count(0)
4359 {
4360 }
4361 
4362 template<typename T>
4363 VmaRawList<T>::~VmaRawList()
4364 {
4365  // Intentionally not calling Clear, because that would be unnecessary
4366  // computations to return all items to m_ItemAllocator as free.
4367 }
4368 
4369 template<typename T>
4370 void VmaRawList<T>::Clear()
4371 {
4372  if(IsEmpty() == false)
4373  {
4374  ItemType* pItem = m_pBack;
4375  while(pItem != VMA_NULL)
4376  {
4377  ItemType* const pPrevItem = pItem->pPrev;
4378  m_ItemAllocator.Free(pItem);
4379  pItem = pPrevItem;
4380  }
4381  m_pFront = VMA_NULL;
4382  m_pBack = VMA_NULL;
4383  m_Count = 0;
4384  }
4385 }
4386 
4387 template<typename T>
4388 VmaListItem<T>* VmaRawList<T>::PushBack()
4389 {
4390  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4391  pNewItem->pNext = VMA_NULL;
4392  if(IsEmpty())
4393  {
4394  pNewItem->pPrev = VMA_NULL;
4395  m_pFront = pNewItem;
4396  m_pBack = pNewItem;
4397  m_Count = 1;
4398  }
4399  else
4400  {
4401  pNewItem->pPrev = m_pBack;
4402  m_pBack->pNext = pNewItem;
4403  m_pBack = pNewItem;
4404  ++m_Count;
4405  }
4406  return pNewItem;
4407 }
4408 
4409 template<typename T>
4410 VmaListItem<T>* VmaRawList<T>::PushFront()
4411 {
4412  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4413  pNewItem->pPrev = VMA_NULL;
4414  if(IsEmpty())
4415  {
4416  pNewItem->pNext = VMA_NULL;
4417  m_pFront = pNewItem;
4418  m_pBack = pNewItem;
4419  m_Count = 1;
4420  }
4421  else
4422  {
4423  pNewItem->pNext = m_pFront;
4424  m_pFront->pPrev = pNewItem;
4425  m_pFront = pNewItem;
4426  ++m_Count;
4427  }
4428  return pNewItem;
4429 }
4430 
4431 template<typename T>
4432 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4433 {
4434  ItemType* const pNewItem = PushBack();
4435  pNewItem->Value = value;
4436  return pNewItem;
4437 }
4438 
4439 template<typename T>
4440 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4441 {
4442  ItemType* const pNewItem = PushFront();
4443  pNewItem->Value = value;
4444  return pNewItem;
4445 }
4446 
4447 template<typename T>
4448 void VmaRawList<T>::PopBack()
4449 {
4450  VMA_HEAVY_ASSERT(m_Count > 0);
4451  ItemType* const pBackItem = m_pBack;
4452  ItemType* const pPrevItem = pBackItem->pPrev;
4453  if(pPrevItem != VMA_NULL)
4454  {
4455  pPrevItem->pNext = VMA_NULL;
4456  }
4457  m_pBack = pPrevItem;
4458  m_ItemAllocator.Free(pBackItem);
4459  --m_Count;
4460 }
4461 
4462 template<typename T>
4463 void VmaRawList<T>::PopFront()
4464 {
4465  VMA_HEAVY_ASSERT(m_Count > 0);
4466  ItemType* const pFrontItem = m_pFront;
4467  ItemType* const pNextItem = pFrontItem->pNext;
4468  if(pNextItem != VMA_NULL)
4469  {
4470  pNextItem->pPrev = VMA_NULL;
4471  }
4472  m_pFront = pNextItem;
4473  m_ItemAllocator.Free(pFrontItem);
4474  --m_Count;
4475 }
4476 
4477 template<typename T>
4478 void VmaRawList<T>::Remove(ItemType* pItem)
4479 {
4480  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4481  VMA_HEAVY_ASSERT(m_Count > 0);
4482 
4483  if(pItem->pPrev != VMA_NULL)
4484  {
4485  pItem->pPrev->pNext = pItem->pNext;
4486  }
4487  else
4488  {
4489  VMA_HEAVY_ASSERT(m_pFront == pItem);
4490  m_pFront = pItem->pNext;
4491  }
4492 
4493  if(pItem->pNext != VMA_NULL)
4494  {
4495  pItem->pNext->pPrev = pItem->pPrev;
4496  }
4497  else
4498  {
4499  VMA_HEAVY_ASSERT(m_pBack == pItem);
4500  m_pBack = pItem->pPrev;
4501  }
4502 
4503  m_ItemAllocator.Free(pItem);
4504  --m_Count;
4505 }
4506 
4507 template<typename T>
4508 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4509 {
4510  if(pItem != VMA_NULL)
4511  {
4512  ItemType* const prevItem = pItem->pPrev;
4513  ItemType* const newItem = m_ItemAllocator.Alloc();
4514  newItem->pPrev = prevItem;
4515  newItem->pNext = pItem;
4516  pItem->pPrev = newItem;
4517  if(prevItem != VMA_NULL)
4518  {
4519  prevItem->pNext = newItem;
4520  }
4521  else
4522  {
4523  VMA_HEAVY_ASSERT(m_pFront == pItem);
4524  m_pFront = newItem;
4525  }
4526  ++m_Count;
4527  return newItem;
4528  }
4529  else
4530  return PushBack();
4531 }
4532 
4533 template<typename T>
4534 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4535 {
4536  if(pItem != VMA_NULL)
4537  {
4538  ItemType* const nextItem = pItem->pNext;
4539  ItemType* const newItem = m_ItemAllocator.Alloc();
4540  newItem->pNext = nextItem;
4541  newItem->pPrev = pItem;
4542  pItem->pNext = newItem;
4543  if(nextItem != VMA_NULL)
4544  {
4545  nextItem->pPrev = newItem;
4546  }
4547  else
4548  {
4549  VMA_HEAVY_ASSERT(m_pBack == pItem);
4550  m_pBack = newItem;
4551  }
4552  ++m_Count;
4553  return newItem;
4554  }
4555  else
4556  return PushFront();
4557 }
4558 
4559 template<typename T>
4560 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4561 {
4562  ItemType* const newItem = InsertBefore(pItem);
4563  newItem->Value = value;
4564  return newItem;
4565 }
4566 
4567 template<typename T>
4568 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4569 {
4570  ItemType* const newItem = InsertAfter(pItem);
4571  newItem->Value = value;
4572  return newItem;
4573 }
4574 
4575 template<typename T, typename AllocatorT>
4576 class VmaList
4577 {
4578  VMA_CLASS_NO_COPY(VmaList)
4579 public:
4580  class iterator
4581  {
4582  public:
4583  iterator() :
4584  m_pList(VMA_NULL),
4585  m_pItem(VMA_NULL)
4586  {
4587  }
4588 
4589  T& operator*() const
4590  {
4591  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4592  return m_pItem->Value;
4593  }
4594  T* operator->() const
4595  {
4596  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4597  return &m_pItem->Value;
4598  }
4599 
4600  iterator& operator++()
4601  {
4602  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4603  m_pItem = m_pItem->pNext;
4604  return *this;
4605  }
4606  iterator& operator--()
4607  {
4608  if(m_pItem != VMA_NULL)
4609  {
4610  m_pItem = m_pItem->pPrev;
4611  }
4612  else
4613  {
4614  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4615  m_pItem = m_pList->Back();
4616  }
4617  return *this;
4618  }
4619 
4620  iterator operator++(int)
4621  {
4622  iterator result = *this;
4623  ++*this;
4624  return result;
4625  }
4626  iterator operator--(int)
4627  {
4628  iterator result = *this;
4629  --*this;
4630  return result;
4631  }
4632 
4633  bool operator==(const iterator& rhs) const
4634  {
4635  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4636  return m_pItem == rhs.m_pItem;
4637  }
4638  bool operator!=(const iterator& rhs) const
4639  {
4640  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4641  return m_pItem != rhs.m_pItem;
4642  }
4643 
4644  private:
4645  VmaRawList<T>* m_pList;
4646  VmaListItem<T>* m_pItem;
4647 
4648  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4649  m_pList(pList),
4650  m_pItem(pItem)
4651  {
4652  }
4653 
4654  friend class VmaList<T, AllocatorT>;
4655  };
4656 
4657  class const_iterator
4658  {
4659  public:
4660  const_iterator() :
4661  m_pList(VMA_NULL),
4662  m_pItem(VMA_NULL)
4663  {
4664  }
4665 
4666  const_iterator(const iterator& src) :
4667  m_pList(src.m_pList),
4668  m_pItem(src.m_pItem)
4669  {
4670  }
4671 
4672  const T& operator*() const
4673  {
4674  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4675  return m_pItem->Value;
4676  }
4677  const T* operator->() const
4678  {
4679  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4680  return &m_pItem->Value;
4681  }
4682 
4683  const_iterator& operator++()
4684  {
4685  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4686  m_pItem = m_pItem->pNext;
4687  return *this;
4688  }
4689  const_iterator& operator--()
4690  {
4691  if(m_pItem != VMA_NULL)
4692  {
4693  m_pItem = m_pItem->pPrev;
4694  }
4695  else
4696  {
4697  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4698  m_pItem = m_pList->Back();
4699  }
4700  return *this;
4701  }
4702 
4703  const_iterator operator++(int)
4704  {
4705  const_iterator result = *this;
4706  ++*this;
4707  return result;
4708  }
4709  const_iterator operator--(int)
4710  {
4711  const_iterator result = *this;
4712  --*this;
4713  return result;
4714  }
4715 
4716  bool operator==(const const_iterator& rhs) const
4717  {
4718  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4719  return m_pItem == rhs.m_pItem;
4720  }
4721  bool operator!=(const const_iterator& rhs) const
4722  {
4723  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4724  return m_pItem != rhs.m_pItem;
4725  }
4726 
4727  private:
4728  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4729  m_pList(pList),
4730  m_pItem(pItem)
4731  {
4732  }
4733 
4734  const VmaRawList<T>* m_pList;
4735  const VmaListItem<T>* m_pItem;
4736 
4737  friend class VmaList<T, AllocatorT>;
4738  };
4739 
4740  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4741 
4742  bool empty() const { return m_RawList.IsEmpty(); }
4743  size_t size() const { return m_RawList.GetCount(); }
4744 
4745  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4746  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4747 
4748  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4749  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4750 
4751  void clear() { m_RawList.Clear(); }
4752  void push_back(const T& value) { m_RawList.PushBack(value); }
4753  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4754  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4755 
4756 private:
4757  VmaRawList<T> m_RawList;
4758 };
4759 
4760 #endif // #if VMA_USE_STL_LIST
4761 
4763 // class VmaMap
4764 
4765 // Unused in this version.
4766 #if 0
4767 
4768 #if VMA_USE_STL_UNORDERED_MAP
4769 
4770 #define VmaPair std::pair
4771 
4772 #define VMA_MAP_TYPE(KeyT, ValueT) \
4773  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4774 
4775 #else // #if VMA_USE_STL_UNORDERED_MAP
4776 
4777 template<typename T1, typename T2>
4778 struct VmaPair
4779 {
4780  T1 first;
4781  T2 second;
4782 
4783  VmaPair() : first(), second() { }
4784  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4785 };
4786 
4787 /* Class compatible with subset of interface of std::unordered_map.
4788 KeyT, ValueT must be POD because they will be stored in VmaVector.
4789 */
4790 template<typename KeyT, typename ValueT>
4791 class VmaMap
4792 {
4793 public:
4794  typedef VmaPair<KeyT, ValueT> PairType;
4795  typedef PairType* iterator;
4796 
4797  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4798 
4799  iterator begin() { return m_Vector.begin(); }
4800  iterator end() { return m_Vector.end(); }
4801 
4802  void insert(const PairType& pair);
4803  iterator find(const KeyT& key);
4804  void erase(iterator it);
4805 
4806 private:
4807  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4808 };
4809 
4810 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4811 
4812 template<typename FirstT, typename SecondT>
4813 struct VmaPairFirstLess
4814 {
4815  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4816  {
4817  return lhs.first < rhs.first;
4818  }
4819  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4820  {
4821  return lhs.first < rhsFirst;
4822  }
4823 };
4824 
4825 template<typename KeyT, typename ValueT>
4826 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4827 {
4828  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4829  m_Vector.data(),
4830  m_Vector.data() + m_Vector.size(),
4831  pair,
4832  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4833  VmaVectorInsert(m_Vector, indexToInsert, pair);
4834 }
4835 
4836 template<typename KeyT, typename ValueT>
4837 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4838 {
4839  PairType* it = VmaBinaryFindFirstNotLess(
4840  m_Vector.data(),
4841  m_Vector.data() + m_Vector.size(),
4842  key,
4843  VmaPairFirstLess<KeyT, ValueT>());
4844  if((it != m_Vector.end()) && (it->first == key))
4845  {
4846  return it;
4847  }
4848  else
4849  {
4850  return m_Vector.end();
4851  }
4852 }
4853 
4854 template<typename KeyT, typename ValueT>
4855 void VmaMap<KeyT, ValueT>::erase(iterator it)
4856 {
4857  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4858 }
4859 
4860 #endif // #if VMA_USE_STL_UNORDERED_MAP
4861 
4862 #endif // #if 0
4863 
4865 
4866 class VmaDeviceMemoryBlock;
4867 
4868 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4869 
4870 struct VmaAllocation_T
4871 {
4872  VMA_CLASS_NO_COPY(VmaAllocation_T)
4873 private:
4874  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4875 
4876  enum FLAGS
4877  {
4878  FLAG_USER_DATA_STRING = 0x01,
4879  };
4880 
4881 public:
4882  enum ALLOCATION_TYPE
4883  {
4884  ALLOCATION_TYPE_NONE,
4885  ALLOCATION_TYPE_BLOCK,
4886  ALLOCATION_TYPE_DEDICATED,
4887  };
4888 
4889  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4890  m_Alignment(1),
4891  m_Size(0),
4892  m_pUserData(VMA_NULL),
4893  m_LastUseFrameIndex(currentFrameIndex),
4894  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4895  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4896  m_MapCount(0),
4897  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4898  {
4899 #if VMA_STATS_STRING_ENABLED
4900  m_CreationFrameIndex = currentFrameIndex;
4901  m_BufferImageUsage = 0;
4902 #endif
4903  }
4904 
4905  ~VmaAllocation_T()
4906  {
4907  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4908 
4909  // Check if owned string was freed.
4910  VMA_ASSERT(m_pUserData == VMA_NULL);
4911  }
4912 
4913  void InitBlockAllocation(
4914  VmaPool hPool,
4915  VmaDeviceMemoryBlock* block,
4916  VkDeviceSize offset,
4917  VkDeviceSize alignment,
4918  VkDeviceSize size,
4919  VmaSuballocationType suballocationType,
4920  bool mapped,
4921  bool canBecomeLost)
4922  {
4923  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4924  VMA_ASSERT(block != VMA_NULL);
4925  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4926  m_Alignment = alignment;
4927  m_Size = size;
4928  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4929  m_SuballocationType = (uint8_t)suballocationType;
4930  m_BlockAllocation.m_hPool = hPool;
4931  m_BlockAllocation.m_Block = block;
4932  m_BlockAllocation.m_Offset = offset;
4933  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4934  }
4935 
4936  void InitLost()
4937  {
4938  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4939  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4940  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4941  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4942  m_BlockAllocation.m_Block = VMA_NULL;
4943  m_BlockAllocation.m_Offset = 0;
4944  m_BlockAllocation.m_CanBecomeLost = true;
4945  }
4946 
4947  void ChangeBlockAllocation(
4948  VmaAllocator hAllocator,
4949  VmaDeviceMemoryBlock* block,
4950  VkDeviceSize offset);
4951 
4952  void ChangeSize(VkDeviceSize newSize);
4953  void ChangeOffset(VkDeviceSize newOffset);
4954 
4955  // pMappedData not null means allocation is created with MAPPED flag.
4956  void InitDedicatedAllocation(
4957  uint32_t memoryTypeIndex,
4958  VkDeviceMemory hMemory,
4959  VmaSuballocationType suballocationType,
4960  void* pMappedData,
4961  VkDeviceSize size)
4962  {
4963  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4964  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4965  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4966  m_Alignment = 0;
4967  m_Size = size;
4968  m_SuballocationType = (uint8_t)suballocationType;
4969  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4970  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4971  m_DedicatedAllocation.m_hMemory = hMemory;
4972  m_DedicatedAllocation.m_pMappedData = pMappedData;
4973  }
4974 
4975  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4976  VkDeviceSize GetAlignment() const { return m_Alignment; }
4977  VkDeviceSize GetSize() const { return m_Size; }
4978  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4979  void* GetUserData() const { return m_pUserData; }
4980  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4981  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4982 
4983  VmaDeviceMemoryBlock* GetBlock() const
4984  {
4985  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4986  return m_BlockAllocation.m_Block;
4987  }
4988  VkDeviceSize GetOffset() const;
4989  VkDeviceMemory GetMemory() const;
4990  uint32_t GetMemoryTypeIndex() const;
4991  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4992  void* GetMappedData() const;
4993  bool CanBecomeLost() const;
4994  VmaPool GetPool() const;
4995 
4996  uint32_t GetLastUseFrameIndex() const
4997  {
4998  return m_LastUseFrameIndex.load();
4999  }
5000  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5001  {
5002  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5003  }
5004  /*
5005  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5006  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5007  - Else, returns false.
5008 
5009  If hAllocation is already lost, assert - you should not call it then.
5010  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5011  */
5012  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5013 
5014  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5015  {
5016  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5017  outInfo.blockCount = 1;
5018  outInfo.allocationCount = 1;
5019  outInfo.unusedRangeCount = 0;
5020  outInfo.usedBytes = m_Size;
5021  outInfo.unusedBytes = 0;
5022  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5023  outInfo.unusedRangeSizeMin = UINT64_MAX;
5024  outInfo.unusedRangeSizeMax = 0;
5025  }
5026 
5027  void BlockAllocMap();
5028  void BlockAllocUnmap();
5029  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5030  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5031 
5032 #if VMA_STATS_STRING_ENABLED
5033  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5034  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5035 
5036  void InitBufferImageUsage(uint32_t bufferImageUsage)
5037  {
5038  VMA_ASSERT(m_BufferImageUsage == 0);
5039  m_BufferImageUsage = bufferImageUsage;
5040  }
5041 
5042  void PrintParameters(class VmaJsonWriter& json) const;
5043 #endif
5044 
5045 private:
5046  VkDeviceSize m_Alignment;
5047  VkDeviceSize m_Size;
5048  void* m_pUserData;
5049  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5050  uint8_t m_Type; // ALLOCATION_TYPE
5051  uint8_t m_SuballocationType; // VmaSuballocationType
5052  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5053  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5054  uint8_t m_MapCount;
5055  uint8_t m_Flags; // enum FLAGS
5056 
5057  // Allocation out of VmaDeviceMemoryBlock.
5058  struct BlockAllocation
5059  {
5060  VmaPool m_hPool; // Null if belongs to general memory.
5061  VmaDeviceMemoryBlock* m_Block;
5062  VkDeviceSize m_Offset;
5063  bool m_CanBecomeLost;
5064  };
5065 
5066  // Allocation for an object that has its own private VkDeviceMemory.
5067  struct DedicatedAllocation
5068  {
5069  uint32_t m_MemoryTypeIndex;
5070  VkDeviceMemory m_hMemory;
5071  void* m_pMappedData; // Not null means memory is mapped.
5072  };
5073 
5074  union
5075  {
5076  // Allocation out of VmaDeviceMemoryBlock.
5077  BlockAllocation m_BlockAllocation;
5078  // Allocation for an object that has its own private VkDeviceMemory.
5079  DedicatedAllocation m_DedicatedAllocation;
5080  };
5081 
5082 #if VMA_STATS_STRING_ENABLED
5083  uint32_t m_CreationFrameIndex;
5084  uint32_t m_BufferImageUsage; // 0 if unknown.
5085 #endif
5086 
5087  void FreeUserDataString(VmaAllocator hAllocator);
5088 };
5089 
5090 /*
5091 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5092 allocated memory block or free.
5093 */
5094 struct VmaSuballocation
5095 {
5096  VkDeviceSize offset;
5097  VkDeviceSize size;
5098  VmaAllocation hAllocation;
5099  VmaSuballocationType type;
5100 };
5101 
5102 // Comparator for offsets.
5103 struct VmaSuballocationOffsetLess
5104 {
5105  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5106  {
5107  return lhs.offset < rhs.offset;
5108  }
5109 };
5110 struct VmaSuballocationOffsetGreater
5111 {
5112  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5113  {
5114  return lhs.offset > rhs.offset;
5115  }
5116 };
5117 
5118 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5119 
5120 // Cost of one additional allocation lost, as equivalent in bytes.
5121 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5122 
5123 /*
5124 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5125 
5126 If canMakeOtherLost was false:
5127 - item points to a FREE suballocation.
5128 - itemsToMakeLostCount is 0.
5129 
5130 If canMakeOtherLost was true:
5131 - item points to first of sequence of suballocations, which are either FREE,
5132  or point to VmaAllocations that can become lost.
5133 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5134  the requested allocation to succeed.
5135 */
5136 struct VmaAllocationRequest
5137 {
5138  VkDeviceSize offset;
5139  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5140  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5141  VmaSuballocationList::iterator item;
5142  size_t itemsToMakeLostCount;
5143  void* customData;
5144 
5145  VkDeviceSize CalcCost() const
5146  {
5147  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5148  }
5149 };
5150 
5151 /*
5152 Data structure used for bookkeeping of allocations and unused ranges of memory
5153 in a single VkDeviceMemory block.
5154 */
5155 class VmaBlockMetadata
5156 {
5157 public:
5158  VmaBlockMetadata(VmaAllocator hAllocator);
5159  virtual ~VmaBlockMetadata() { }
5160  virtual void Init(VkDeviceSize size) { m_Size = size; }
5161 
5162  // Validates all data structures inside this object. If not valid, returns false.
5163  virtual bool Validate() const = 0;
5164  VkDeviceSize GetSize() const { return m_Size; }
5165  virtual size_t GetAllocationCount() const = 0;
5166  virtual VkDeviceSize GetSumFreeSize() const = 0;
5167  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5168  // Returns true if this block is empty - contains only single free suballocation.
5169  virtual bool IsEmpty() const = 0;
5170 
5171  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5172  // Shouldn't modify blockCount.
5173  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5174 
5175 #if VMA_STATS_STRING_ENABLED
5176  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5177 #endif
5178 
5179  // Tries to find a place for suballocation with given parameters inside this block.
5180  // If succeeded, fills pAllocationRequest and returns true.
5181  // If failed, returns false.
5182  virtual bool CreateAllocationRequest(
5183  uint32_t currentFrameIndex,
5184  uint32_t frameInUseCount,
5185  VkDeviceSize bufferImageGranularity,
5186  VkDeviceSize allocSize,
5187  VkDeviceSize allocAlignment,
5188  bool upperAddress,
5189  VmaSuballocationType allocType,
5190  bool canMakeOtherLost,
5191  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5192  uint32_t strategy,
5193  VmaAllocationRequest* pAllocationRequest) = 0;
5194 
5195  virtual bool MakeRequestedAllocationsLost(
5196  uint32_t currentFrameIndex,
5197  uint32_t frameInUseCount,
5198  VmaAllocationRequest* pAllocationRequest) = 0;
5199 
5200  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5201 
5202  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5203 
5204  // Makes actual allocation based on request. Request must already be checked and valid.
5205  virtual void Alloc(
5206  const VmaAllocationRequest& request,
5207  VmaSuballocationType type,
5208  VkDeviceSize allocSize,
5209  bool upperAddress,
5210  VmaAllocation hAllocation) = 0;
5211 
5212  // Frees suballocation assigned to given memory region.
5213  virtual void Free(const VmaAllocation allocation) = 0;
5214  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5215 
5216  // Tries to resize (grow or shrink) space for given allocation, in place.
5217  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
5218 
5219 protected:
5220  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5221 
5222 #if VMA_STATS_STRING_ENABLED
5223  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5224  VkDeviceSize unusedBytes,
5225  size_t allocationCount,
5226  size_t unusedRangeCount) const;
5227  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5228  VkDeviceSize offset,
5229  VmaAllocation hAllocation) const;
5230  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5231  VkDeviceSize offset,
5232  VkDeviceSize size) const;
5233  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5234 #endif
5235 
5236 private:
5237  VkDeviceSize m_Size;
5238  const VkAllocationCallbacks* m_pAllocationCallbacks;
5239 };
5240 
5241 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5242  VMA_ASSERT(0 && "Validation failed: " #cond); \
5243  return false; \
5244  } } while(false)
5245 
5246 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5247 {
5248  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5249 public:
5250  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5251  virtual ~VmaBlockMetadata_Generic();
5252  virtual void Init(VkDeviceSize size);
5253 
5254  virtual bool Validate() const;
5255  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5256  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5257  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5258  virtual bool IsEmpty() const;
5259 
5260  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5261  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5262 
5263 #if VMA_STATS_STRING_ENABLED
5264  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5265 #endif
5266 
5267  virtual bool CreateAllocationRequest(
5268  uint32_t currentFrameIndex,
5269  uint32_t frameInUseCount,
5270  VkDeviceSize bufferImageGranularity,
5271  VkDeviceSize allocSize,
5272  VkDeviceSize allocAlignment,
5273  bool upperAddress,
5274  VmaSuballocationType allocType,
5275  bool canMakeOtherLost,
5276  uint32_t strategy,
5277  VmaAllocationRequest* pAllocationRequest);
5278 
5279  virtual bool MakeRequestedAllocationsLost(
5280  uint32_t currentFrameIndex,
5281  uint32_t frameInUseCount,
5282  VmaAllocationRequest* pAllocationRequest);
5283 
5284  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5285 
5286  virtual VkResult CheckCorruption(const void* pBlockData);
5287 
5288  virtual void Alloc(
5289  const VmaAllocationRequest& request,
5290  VmaSuballocationType type,
5291  VkDeviceSize allocSize,
5292  bool upperAddress,
5293  VmaAllocation hAllocation);
5294 
5295  virtual void Free(const VmaAllocation allocation);
5296  virtual void FreeAtOffset(VkDeviceSize offset);
5297 
5298  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
5299 
5301  // For defragmentation
5302 
5303  bool IsBufferImageGranularityConflictPossible(
5304  VkDeviceSize bufferImageGranularity,
5305  VmaSuballocationType& inOutPrevSuballocType) const;
5306 
5307 private:
5308  friend class VmaDefragmentationAlgorithm_Generic;
5309  friend class VmaDefragmentationAlgorithm_Fast;
5310 
5311  uint32_t m_FreeCount;
5312  VkDeviceSize m_SumFreeSize;
5313  VmaSuballocationList m_Suballocations;
5314  // Suballocations that are free and have size greater than certain threshold.
5315  // Sorted by size, ascending.
5316  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5317 
5318  bool ValidateFreeSuballocationList() const;
5319 
5320  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5321  // If yes, fills pOffset and returns true. If no, returns false.
5322  bool CheckAllocation(
5323  uint32_t currentFrameIndex,
5324  uint32_t frameInUseCount,
5325  VkDeviceSize bufferImageGranularity,
5326  VkDeviceSize allocSize,
5327  VkDeviceSize allocAlignment,
5328  VmaSuballocationType allocType,
5329  VmaSuballocationList::const_iterator suballocItem,
5330  bool canMakeOtherLost,
5331  VkDeviceSize* pOffset,
5332  size_t* itemsToMakeLostCount,
5333  VkDeviceSize* pSumFreeSize,
5334  VkDeviceSize* pSumItemSize) const;
5335  // Given free suballocation, it merges it with following one, which must also be free.
5336  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5337  // Releases given suballocation, making it free.
5338  // Merges it with adjacent free suballocations if applicable.
5339  // Returns iterator to new free suballocation at this place.
5340  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5341  // Given free suballocation, it inserts it into sorted list of
5342  // m_FreeSuballocationsBySize if it's suitable.
5343  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5344  // Given free suballocation, it removes it from sorted list of
5345  // m_FreeSuballocationsBySize if it's suitable.
5346  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5347 };
5348 
5349 /*
5350 Allocations and their references in internal data structure look like this:
5351 
5352 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5353 
5354  0 +-------+
5355  | |
5356  | |
5357  | |
5358  +-------+
5359  | Alloc | 1st[m_1stNullItemsBeginCount]
5360  +-------+
5361  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5362  +-------+
5363  | ... |
5364  +-------+
5365  | Alloc | 1st[1st.size() - 1]
5366  +-------+
5367  | |
5368  | |
5369  | |
5370 GetSize() +-------+
5371 
5372 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5373 
5374  0 +-------+
5375  | Alloc | 2nd[0]
5376  +-------+
5377  | Alloc | 2nd[1]
5378  +-------+
5379  | ... |
5380  +-------+
5381  | Alloc | 2nd[2nd.size() - 1]
5382  +-------+
5383  | |
5384  | |
5385  | |
5386  +-------+
5387  | Alloc | 1st[m_1stNullItemsBeginCount]
5388  +-------+
5389  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5390  +-------+
5391  | ... |
5392  +-------+
5393  | Alloc | 1st[1st.size() - 1]
5394  +-------+
5395  | |
5396 GetSize() +-------+
5397 
5398 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5399 
5400  0 +-------+
5401  | |
5402  | |
5403  | |
5404  +-------+
5405  | Alloc | 1st[m_1stNullItemsBeginCount]
5406  +-------+
5407  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5408  +-------+
5409  | ... |
5410  +-------+
5411  | Alloc | 1st[1st.size() - 1]
5412  +-------+
5413  | |
5414  | |
5415  | |
5416  +-------+
5417  | Alloc | 2nd[2nd.size() - 1]
5418  +-------+
5419  | ... |
5420  +-------+
5421  | Alloc | 2nd[1]
5422  +-------+
5423  | Alloc | 2nd[0]
5424 GetSize() +-------+
5425 
5426 */
5427 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5428 {
5429  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5430 public:
5431  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5432  virtual ~VmaBlockMetadata_Linear();
5433  virtual void Init(VkDeviceSize size);
5434 
5435  virtual bool Validate() const;
5436  virtual size_t GetAllocationCount() const;
5437  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5438  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5439  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5440 
5441  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5442  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5443 
5444 #if VMA_STATS_STRING_ENABLED
5445  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5446 #endif
5447 
5448  virtual bool CreateAllocationRequest(
5449  uint32_t currentFrameIndex,
5450  uint32_t frameInUseCount,
5451  VkDeviceSize bufferImageGranularity,
5452  VkDeviceSize allocSize,
5453  VkDeviceSize allocAlignment,
5454  bool upperAddress,
5455  VmaSuballocationType allocType,
5456  bool canMakeOtherLost,
5457  uint32_t strategy,
5458  VmaAllocationRequest* pAllocationRequest);
5459 
5460  virtual bool MakeRequestedAllocationsLost(
5461  uint32_t currentFrameIndex,
5462  uint32_t frameInUseCount,
5463  VmaAllocationRequest* pAllocationRequest);
5464 
5465  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5466 
5467  virtual VkResult CheckCorruption(const void* pBlockData);
5468 
5469  virtual void Alloc(
5470  const VmaAllocationRequest& request,
5471  VmaSuballocationType type,
5472  VkDeviceSize allocSize,
5473  bool upperAddress,
5474  VmaAllocation hAllocation);
5475 
5476  virtual void Free(const VmaAllocation allocation);
5477  virtual void FreeAtOffset(VkDeviceSize offset);
5478 
5479 private:
5480  /*
5481  There are two suballocation vectors, used in ping-pong way.
5482  The one with index m_1stVectorIndex is called 1st.
5483  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5484  2nd can be non-empty only when 1st is not empty.
5485  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5486  */
5487  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5488 
5489  enum SECOND_VECTOR_MODE
5490  {
5491  SECOND_VECTOR_EMPTY,
5492  /*
5493  Suballocations in 2nd vector are created later than the ones in 1st, but they
5494  all have smaller offset.
5495  */
5496  SECOND_VECTOR_RING_BUFFER,
5497  /*
5498  Suballocations in 2nd vector are upper side of double stack.
5499  They all have offsets higher than those in 1st vector.
5500  Top of this stack means smaller offsets, but higher indices in this vector.
5501  */
5502  SECOND_VECTOR_DOUBLE_STACK,
5503  };
5504 
5505  VkDeviceSize m_SumFreeSize;
5506  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5507  uint32_t m_1stVectorIndex;
5508  SECOND_VECTOR_MODE m_2ndVectorMode;
5509 
5510  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5511  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5512  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5513  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5514 
5515  // Number of items in 1st vector with hAllocation = null at the beginning.
5516  size_t m_1stNullItemsBeginCount;
5517  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5518  size_t m_1stNullItemsMiddleCount;
5519  // Number of items in 2nd vector with hAllocation = null.
5520  size_t m_2ndNullItemsCount;
5521 
5522  bool ShouldCompact1st() const;
5523  void CleanupAfterFree();
5524 };
5525 
5526 /*
5527 - GetSize() is the original size of allocated memory block.
5528 - m_UsableSize is this size aligned down to a power of two.
5529  All allocations and calculations happen relative to m_UsableSize.
5530 - GetUnusableSize() is the difference between them.
5531  It is repoted as separate, unused range, not available for allocations.
5532 
5533 Node at level 0 has size = m_UsableSize.
5534 Each next level contains nodes with size 2 times smaller than current level.
5535 m_LevelCount is the maximum number of levels to use in the current object.
5536 */
5537 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5538 {
5539  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5540 public:
5541  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5542  virtual ~VmaBlockMetadata_Buddy();
5543  virtual void Init(VkDeviceSize size);
5544 
5545  virtual bool Validate() const;
5546  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5547  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5548  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5549  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5550 
5551  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5552  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5553 
5554 #if VMA_STATS_STRING_ENABLED
5555  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5556 #endif
5557 
5558  virtual bool CreateAllocationRequest(
5559  uint32_t currentFrameIndex,
5560  uint32_t frameInUseCount,
5561  VkDeviceSize bufferImageGranularity,
5562  VkDeviceSize allocSize,
5563  VkDeviceSize allocAlignment,
5564  bool upperAddress,
5565  VmaSuballocationType allocType,
5566  bool canMakeOtherLost,
5567  uint32_t strategy,
5568  VmaAllocationRequest* pAllocationRequest);
5569 
5570  virtual bool MakeRequestedAllocationsLost(
5571  uint32_t currentFrameIndex,
5572  uint32_t frameInUseCount,
5573  VmaAllocationRequest* pAllocationRequest);
5574 
5575  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5576 
5577  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5578 
5579  virtual void Alloc(
5580  const VmaAllocationRequest& request,
5581  VmaSuballocationType type,
5582  VkDeviceSize allocSize,
5583  bool upperAddress,
5584  VmaAllocation hAllocation);
5585 
5586  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5587  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5588 
5589 private:
5590  static const VkDeviceSize MIN_NODE_SIZE = 32;
5591  static const size_t MAX_LEVELS = 30;
5592 
5593  struct ValidationContext
5594  {
5595  size_t calculatedAllocationCount;
5596  size_t calculatedFreeCount;
5597  VkDeviceSize calculatedSumFreeSize;
5598 
5599  ValidationContext() :
5600  calculatedAllocationCount(0),
5601  calculatedFreeCount(0),
5602  calculatedSumFreeSize(0) { }
5603  };
5604 
5605  struct Node
5606  {
5607  VkDeviceSize offset;
5608  enum TYPE
5609  {
5610  TYPE_FREE,
5611  TYPE_ALLOCATION,
5612  TYPE_SPLIT,
5613  TYPE_COUNT
5614  } type;
5615  Node* parent;
5616  Node* buddy;
5617 
5618  union
5619  {
5620  struct
5621  {
5622  Node* prev;
5623  Node* next;
5624  } free;
5625  struct
5626  {
5627  VmaAllocation alloc;
5628  } allocation;
5629  struct
5630  {
5631  Node* leftChild;
5632  } split;
5633  };
5634  };
5635 
5636  // Size of the memory block aligned down to a power of two.
5637  VkDeviceSize m_UsableSize;
5638  uint32_t m_LevelCount;
5639 
5640  Node* m_Root;
5641  struct {
5642  Node* front;
5643  Node* back;
5644  } m_FreeList[MAX_LEVELS];
5645  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5646  size_t m_AllocationCount;
5647  // Number of nodes in the tree with type == TYPE_FREE.
5648  size_t m_FreeCount;
5649  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5650  VkDeviceSize m_SumFreeSize;
5651 
5652  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5653  void DeleteNode(Node* node);
5654  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5655  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5656  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5657  // Alloc passed just for validation. Can be null.
5658  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5659  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5660  // Adds node to the front of FreeList at given level.
5661  // node->type must be FREE.
5662  // node->free.prev, next can be undefined.
5663  void AddToFreeListFront(uint32_t level, Node* node);
5664  // Removes node from FreeList at given level.
5665  // node->type must be FREE.
5666  // node->free.prev, next stay untouched.
5667  void RemoveFromFreeList(uint32_t level, Node* node);
5668 
5669 #if VMA_STATS_STRING_ENABLED
5670  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5671 #endif
5672 };
5673 
5674 /*
5675 Represents a single block of device memory (`VkDeviceMemory`) with all the
5676 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5677 
5678 Thread-safety: This class must be externally synchronized.
5679 */
5680 class VmaDeviceMemoryBlock
5681 {
5682  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5683 public:
5684  VmaBlockMetadata* m_pMetadata;
5685 
5686  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5687 
5688  ~VmaDeviceMemoryBlock()
5689  {
5690  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5691  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5692  }
5693 
5694  // Always call after construction.
5695  void Init(
5696  VmaAllocator hAllocator,
5697  uint32_t newMemoryTypeIndex,
5698  VkDeviceMemory newMemory,
5699  VkDeviceSize newSize,
5700  uint32_t id,
5701  uint32_t algorithm);
5702  // Always call before destruction.
5703  void Destroy(VmaAllocator allocator);
5704 
5705  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5706  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5707  uint32_t GetId() const { return m_Id; }
5708  void* GetMappedData() const { return m_pMappedData; }
5709 
5710  // Validates all data structures inside this object. If not valid, returns false.
5711  bool Validate() const;
5712 
5713  VkResult CheckCorruption(VmaAllocator hAllocator);
5714 
5715  // ppData can be null.
5716  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5717  void Unmap(VmaAllocator hAllocator, uint32_t count);
5718 
5719  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5720  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5721 
5722  VkResult BindBufferMemory(
5723  const VmaAllocator hAllocator,
5724  const VmaAllocation hAllocation,
5725  VkBuffer hBuffer);
5726  VkResult BindImageMemory(
5727  const VmaAllocator hAllocator,
5728  const VmaAllocation hAllocation,
5729  VkImage hImage);
5730 
5731 private:
5732  uint32_t m_MemoryTypeIndex;
5733  uint32_t m_Id;
5734  VkDeviceMemory m_hMemory;
5735 
5736  /*
5737  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5738  Also protects m_MapCount, m_pMappedData.
5739  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5740  */
5741  VMA_MUTEX m_Mutex;
5742  uint32_t m_MapCount;
5743  void* m_pMappedData;
5744 };
5745 
5746 struct VmaPointerLess
5747 {
5748  bool operator()(const void* lhs, const void* rhs) const
5749  {
5750  return lhs < rhs;
5751  }
5752 };
5753 
5754 struct VmaDefragmentationMove
5755 {
5756  size_t srcBlockIndex;
5757  size_t dstBlockIndex;
5758  VkDeviceSize srcOffset;
5759  VkDeviceSize dstOffset;
5760  VkDeviceSize size;
5761 };
5762 
5763 class VmaDefragmentationAlgorithm;
5764 
5765 /*
5766 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5767 Vulkan memory type.
5768 
5769 Synchronized internally with a mutex.
5770 */
5771 struct VmaBlockVector
5772 {
5773  VMA_CLASS_NO_COPY(VmaBlockVector)
5774 public:
5775  VmaBlockVector(
5776  VmaAllocator hAllocator,
5777  uint32_t memoryTypeIndex,
5778  VkDeviceSize preferredBlockSize,
5779  size_t minBlockCount,
5780  size_t maxBlockCount,
5781  VkDeviceSize bufferImageGranularity,
5782  uint32_t frameInUseCount,
5783  bool isCustomPool,
5784  bool explicitBlockSize,
5785  uint32_t algorithm);
5786  ~VmaBlockVector();
5787 
5788  VkResult CreateMinBlocks();
5789 
5790  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5791  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5792  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5793  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5794  uint32_t GetAlgorithm() const { return m_Algorithm; }
5795 
5796  void GetPoolStats(VmaPoolStats* pStats);
5797 
5798  bool IsEmpty() const { return m_Blocks.empty(); }
5799  bool IsCorruptionDetectionEnabled() const;
5800 
5801  VkResult Allocate(
5802  VmaPool hCurrentPool,
5803  uint32_t currentFrameIndex,
5804  VkDeviceSize size,
5805  VkDeviceSize alignment,
5806  const VmaAllocationCreateInfo& createInfo,
5807  VmaSuballocationType suballocType,
5808  size_t allocationCount,
5809  VmaAllocation* pAllocations);
5810 
5811  void Free(
5812  VmaAllocation hAllocation);
5813 
5814  // Adds statistics of this BlockVector to pStats.
5815  void AddStats(VmaStats* pStats);
5816 
5817 #if VMA_STATS_STRING_ENABLED
5818  void PrintDetailedMap(class VmaJsonWriter& json);
5819 #endif
5820 
5821  void MakePoolAllocationsLost(
5822  uint32_t currentFrameIndex,
5823  size_t* pLostAllocationCount);
5824  VkResult CheckCorruption();
5825 
5826  // Saves results in pCtx->res.
5827  void Defragment(
5828  class VmaBlockVectorDefragmentationContext* pCtx,
5829  VmaDefragmentationStats* pStats,
5830  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
5831  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
5832  VkCommandBuffer commandBuffer);
5833  void DefragmentationEnd(
5834  class VmaBlockVectorDefragmentationContext* pCtx,
5835  VmaDefragmentationStats* pStats);
5836 
5838  // To be used only while the m_Mutex is locked. Used during defragmentation.
5839 
5840  size_t GetBlockCount() const { return m_Blocks.size(); }
5841  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
5842  size_t CalcAllocationCount() const;
5843  bool IsBufferImageGranularityConflictPossible() const;
5844 
5845 private:
5846  friend class VmaDefragmentationAlgorithm_Generic;
5847 
5848  const VmaAllocator m_hAllocator;
5849  const uint32_t m_MemoryTypeIndex;
5850  const VkDeviceSize m_PreferredBlockSize;
5851  const size_t m_MinBlockCount;
5852  const size_t m_MaxBlockCount;
5853  const VkDeviceSize m_BufferImageGranularity;
5854  const uint32_t m_FrameInUseCount;
5855  const bool m_IsCustomPool;
5856  const bool m_ExplicitBlockSize;
5857  const uint32_t m_Algorithm;
5858  /* There can be at most one allocation that is completely empty - a
5859  hysteresis to avoid pessimistic case of alternating creation and destruction
5860  of a VkDeviceMemory. */
5861  bool m_HasEmptyBlock;
5862  VMA_RW_MUTEX m_Mutex;
5863  // Incrementally sorted by sumFreeSize, ascending.
5864  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5865  uint32_t m_NextBlockId;
5866 
5867  VkDeviceSize CalcMaxBlockSize() const;
5868 
5869  // Finds and removes given block from vector.
5870  void Remove(VmaDeviceMemoryBlock* pBlock);
5871 
5872  // Performs single step in sorting m_Blocks. They may not be fully sorted
5873  // after this call.
5874  void IncrementallySortBlocks();
5875 
5876  VkResult AllocatePage(
5877  VmaPool hCurrentPool,
5878  uint32_t currentFrameIndex,
5879  VkDeviceSize size,
5880  VkDeviceSize alignment,
5881  const VmaAllocationCreateInfo& createInfo,
5882  VmaSuballocationType suballocType,
5883  VmaAllocation* pAllocation);
5884 
5885  // To be used only without CAN_MAKE_OTHER_LOST flag.
5886  VkResult AllocateFromBlock(
5887  VmaDeviceMemoryBlock* pBlock,
5888  VmaPool hCurrentPool,
5889  uint32_t currentFrameIndex,
5890  VkDeviceSize size,
5891  VkDeviceSize alignment,
5892  VmaAllocationCreateFlags allocFlags,
5893  void* pUserData,
5894  VmaSuballocationType suballocType,
5895  uint32_t strategy,
5896  VmaAllocation* pAllocation);
5897 
5898  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5899 
5900  // Saves result to pCtx->res.
5901  void ApplyDefragmentationMovesCpu(
5902  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5903  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
5904  // Saves result to pCtx->res.
5905  void ApplyDefragmentationMovesGpu(
5906  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5907  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5908  VkCommandBuffer commandBuffer);
5909 
5910  /*
5911  Used during defragmentation. pDefragmentationStats is optional. It's in/out
5912  - updated with new data.
5913  */
5914  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
5915 };
5916 
5917 struct VmaPool_T
5918 {
5919  VMA_CLASS_NO_COPY(VmaPool_T)
5920 public:
5921  VmaBlockVector m_BlockVector;
5922 
5923  VmaPool_T(
5924  VmaAllocator hAllocator,
5925  const VmaPoolCreateInfo& createInfo,
5926  VkDeviceSize preferredBlockSize);
5927  ~VmaPool_T();
5928 
5929  uint32_t GetId() const { return m_Id; }
5930  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5931 
5932 #if VMA_STATS_STRING_ENABLED
5933  //void PrintDetailedMap(class VmaStringBuilder& sb);
5934 #endif
5935 
5936 private:
5937  uint32_t m_Id;
5938 };
5939 
5940 /*
5941 Performs defragmentation:
5942 
5943 - Updates `pBlockVector->m_pMetadata`.
5944 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
5945 - Does not move actual data, only returns requested moves as `moves`.
5946 */
5947 class VmaDefragmentationAlgorithm
5948 {
5949  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
5950 public:
5951  VmaDefragmentationAlgorithm(
5952  VmaAllocator hAllocator,
5953  VmaBlockVector* pBlockVector,
5954  uint32_t currentFrameIndex) :
5955  m_hAllocator(hAllocator),
5956  m_pBlockVector(pBlockVector),
5957  m_CurrentFrameIndex(currentFrameIndex)
5958  {
5959  }
5960  virtual ~VmaDefragmentationAlgorithm()
5961  {
5962  }
5963 
5964  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
5965  virtual void AddAll() = 0;
5966 
5967  virtual VkResult Defragment(
5968  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5969  VkDeviceSize maxBytesToMove,
5970  uint32_t maxAllocationsToMove) = 0;
5971 
5972  virtual VkDeviceSize GetBytesMoved() const = 0;
5973  virtual uint32_t GetAllocationsMoved() const = 0;
5974 
5975 protected:
5976  VmaAllocator const m_hAllocator;
5977  VmaBlockVector* const m_pBlockVector;
5978  const uint32_t m_CurrentFrameIndex;
5979 
5980  struct AllocationInfo
5981  {
5982  VmaAllocation m_hAllocation;
5983  VkBool32* m_pChanged;
5984 
5985  AllocationInfo() :
5986  m_hAllocation(VK_NULL_HANDLE),
5987  m_pChanged(VMA_NULL)
5988  {
5989  }
5990  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
5991  m_hAllocation(hAlloc),
5992  m_pChanged(pChanged)
5993  {
5994  }
5995  };
5996 };
5997 
5998 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
5999 {
6000  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6001 public:
6002  VmaDefragmentationAlgorithm_Generic(
6003  VmaAllocator hAllocator,
6004  VmaBlockVector* pBlockVector,
6005  uint32_t currentFrameIndex,
6006  bool overlappingMoveSupported);
6007  virtual ~VmaDefragmentationAlgorithm_Generic();
6008 
6009  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6010  virtual void AddAll() { m_AllAllocations = true; }
6011 
6012  virtual VkResult Defragment(
6013  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6014  VkDeviceSize maxBytesToMove,
6015  uint32_t maxAllocationsToMove);
6016 
6017  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6018  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6019 
6020 private:
6021  uint32_t m_AllocationCount;
6022  bool m_AllAllocations;
6023 
6024  VkDeviceSize m_BytesMoved;
6025  uint32_t m_AllocationsMoved;
6026 
6027  struct AllocationInfoSizeGreater
6028  {
6029  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6030  {
6031  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6032  }
6033  };
6034 
6035  struct AllocationInfoOffsetGreater
6036  {
6037  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6038  {
6039  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6040  }
6041  };
6042 
6043  struct BlockInfo
6044  {
6045  size_t m_OriginalBlockIndex;
6046  VmaDeviceMemoryBlock* m_pBlock;
6047  bool m_HasNonMovableAllocations;
6048  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6049 
6050  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6051  m_OriginalBlockIndex(SIZE_MAX),
6052  m_pBlock(VMA_NULL),
6053  m_HasNonMovableAllocations(true),
6054  m_Allocations(pAllocationCallbacks)
6055  {
6056  }
6057 
6058  void CalcHasNonMovableAllocations()
6059  {
6060  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6061  const size_t defragmentAllocCount = m_Allocations.size();
6062  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6063  }
6064 
6065  void SortAllocationsBySizeDescending()
6066  {
6067  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6068  }
6069 
6070  void SortAllocationsByOffsetDescending()
6071  {
6072  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6073  }
6074  };
6075 
6076  struct BlockPointerLess
6077  {
6078  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6079  {
6080  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6081  }
6082  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6083  {
6084  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6085  }
6086  };
6087 
6088  // 1. Blocks with some non-movable allocations go first.
6089  // 2. Blocks with smaller sumFreeSize go first.
6090  struct BlockInfoCompareMoveDestination
6091  {
6092  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6093  {
6094  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6095  {
6096  return true;
6097  }
6098  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6099  {
6100  return false;
6101  }
6102  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6103  {
6104  return true;
6105  }
6106  return false;
6107  }
6108  };
6109 
6110  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6111  BlockInfoVector m_Blocks;
6112 
6113  VkResult DefragmentRound(
6114  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6115  VkDeviceSize maxBytesToMove,
6116  uint32_t maxAllocationsToMove);
6117 
6118  size_t CalcBlocksWithNonMovableCount() const;
6119 
6120  static bool MoveMakesSense(
6121  size_t dstBlockIndex, VkDeviceSize dstOffset,
6122  size_t srcBlockIndex, VkDeviceSize srcOffset);
6123 };
6124 
6125 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6126 {
6127  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6128 public:
6129  VmaDefragmentationAlgorithm_Fast(
6130  VmaAllocator hAllocator,
6131  VmaBlockVector* pBlockVector,
6132  uint32_t currentFrameIndex,
6133  bool overlappingMoveSupported);
6134  virtual ~VmaDefragmentationAlgorithm_Fast();
6135 
6136  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6137  virtual void AddAll() { m_AllAllocations = true; }
6138 
6139  virtual VkResult Defragment(
6140  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6141  VkDeviceSize maxBytesToMove,
6142  uint32_t maxAllocationsToMove);
6143 
6144  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6145  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6146 
6147 private:
6148  struct BlockInfo
6149  {
6150  size_t origBlockIndex;
6151  };
6152 
6153  class FreeSpaceDatabase
6154  {
6155  public:
6156  FreeSpaceDatabase()
6157  {
6158  FreeSpace s = {};
6159  s.blockInfoIndex = SIZE_MAX;
6160  for(size_t i = 0; i < MAX_COUNT; ++i)
6161  {
6162  m_FreeSpaces[i] = s;
6163  }
6164  }
6165 
6166  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6167  {
6168  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6169  {
6170  return;
6171  }
6172 
6173  // Find first invalid or the smallest structure.
6174  size_t bestIndex = SIZE_MAX;
6175  for(size_t i = 0; i < MAX_COUNT; ++i)
6176  {
6177  // Empty structure.
6178  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6179  {
6180  bestIndex = i;
6181  break;
6182  }
6183  if(m_FreeSpaces[i].size < size &&
6184  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6185  {
6186  bestIndex = i;
6187  }
6188  }
6189 
6190  if(bestIndex != SIZE_MAX)
6191  {
6192  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6193  m_FreeSpaces[bestIndex].offset = offset;
6194  m_FreeSpaces[bestIndex].size = size;
6195  }
6196  }
6197 
6198  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6199  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6200  {
6201  size_t bestIndex = SIZE_MAX;
6202  VkDeviceSize bestFreeSpaceAfter = 0;
6203  for(size_t i = 0; i < MAX_COUNT; ++i)
6204  {
6205  // Structure is valid.
6206  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6207  {
6208  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6209  // Allocation fits into this structure.
6210  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6211  {
6212  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6213  (dstOffset + size);
6214  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6215  {
6216  bestIndex = i;
6217  bestFreeSpaceAfter = freeSpaceAfter;
6218  }
6219  }
6220  }
6221  }
6222 
6223  if(bestIndex != SIZE_MAX)
6224  {
6225  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6226  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6227 
6228  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6229  {
6230  // Leave this structure for remaining empty space.
6231  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6232  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6233  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6234  }
6235  else
6236  {
6237  // This structure becomes invalid.
6238  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6239  }
6240 
6241  return true;
6242  }
6243 
6244  return false;
6245  }
6246 
6247  private:
6248  static const size_t MAX_COUNT = 4;
6249 
6250  struct FreeSpace
6251  {
6252  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6253  VkDeviceSize offset;
6254  VkDeviceSize size;
6255  } m_FreeSpaces[MAX_COUNT];
6256  };
6257 
6258  const bool m_OverlappingMoveSupported;
6259 
6260  uint32_t m_AllocationCount;
6261  bool m_AllAllocations;
6262 
6263  VkDeviceSize m_BytesMoved;
6264  uint32_t m_AllocationsMoved;
6265 
6266  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6267 
6268  void PreprocessMetadata();
6269  void PostprocessMetadata();
6270  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6271 };
6272 
6273 struct VmaBlockDefragmentationContext
6274 {
6275 private:
6276  VMA_CLASS_NO_COPY(VmaBlockDefragmentationContext)
6277 public:
6278  enum BLOCK_FLAG
6279  {
6280  BLOCK_FLAG_USED = 0x00000001,
6281  };
6282  uint32_t flags;
6283  VkBuffer hBuffer;
6284 
6285  VmaBlockDefragmentationContext() :
6286  flags(0),
6287  hBuffer(VK_NULL_HANDLE)
6288  {
6289  }
6290 };
6291 
6292 class VmaBlockVectorDefragmentationContext
6293 {
6294  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6295 public:
6296  VkResult res;
6297  bool mutexLocked;
6298  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6299 
6300  VmaBlockVectorDefragmentationContext(
6301  VmaAllocator hAllocator,
6302  VmaPool hCustomPool, // Optional.
6303  VmaBlockVector* pBlockVector,
6304  uint32_t currFrameIndex,
6305  uint32_t flags);
6306  ~VmaBlockVectorDefragmentationContext();
6307 
6308  VmaPool GetCustomPool() const { return m_hCustomPool; }
6309  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6310  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6311 
6312  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6313  void AddAll() { m_AllAllocations = true; }
6314 
6315  void Begin(bool overlappingMoveSupported);
6316 
6317 private:
6318  const VmaAllocator m_hAllocator;
6319  // Null if not from custom pool.
6320  const VmaPool m_hCustomPool;
6321  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6322  VmaBlockVector* const m_pBlockVector;
6323  const uint32_t m_CurrFrameIndex;
6324  const uint32_t m_AlgorithmFlags;
6325  // Owner of this object.
6326  VmaDefragmentationAlgorithm* m_pAlgorithm;
6327 
6328  struct AllocInfo
6329  {
6330  VmaAllocation hAlloc;
6331  VkBool32* pChanged;
6332  };
6333  // Used between constructor and Begin.
6334  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6335  bool m_AllAllocations;
6336 };
6337 
6338 struct VmaDefragmentationContext_T
6339 {
6340 private:
6341  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6342 public:
6343  VmaDefragmentationContext_T(
6344  VmaAllocator hAllocator,
6345  uint32_t currFrameIndex,
6346  uint32_t flags,
6347  VmaDefragmentationStats* pStats);
6348  ~VmaDefragmentationContext_T();
6349 
6350  void AddPools(uint32_t poolCount, VmaPool* pPools);
6351  void AddAllocations(
6352  uint32_t allocationCount,
6353  VmaAllocation* pAllocations,
6354  VkBool32* pAllocationsChanged);
6355 
6356  /*
6357  Returns:
6358  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6359  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6360  - Negative value if error occured and object can be destroyed immediately.
6361  */
6362  VkResult Defragment(
6363  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6364  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6365  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6366 
6367 private:
6368  const VmaAllocator m_hAllocator;
6369  const uint32_t m_CurrFrameIndex;
6370  const uint32_t m_Flags;
6371  VmaDefragmentationStats* const m_pStats;
6372  // Owner of these objects.
6373  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6374  // Owner of these objects.
6375  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6376 };
6377 
6378 #if VMA_RECORDING_ENABLED
6379 
6380 class VmaRecorder
6381 {
6382 public:
6383  VmaRecorder();
6384  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6385  void WriteConfiguration(
6386  const VkPhysicalDeviceProperties& devProps,
6387  const VkPhysicalDeviceMemoryProperties& memProps,
6388  bool dedicatedAllocationExtensionEnabled);
6389  ~VmaRecorder();
6390 
6391  void RecordCreateAllocator(uint32_t frameIndex);
6392  void RecordDestroyAllocator(uint32_t frameIndex);
6393  void RecordCreatePool(uint32_t frameIndex,
6394  const VmaPoolCreateInfo& createInfo,
6395  VmaPool pool);
6396  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6397  void RecordAllocateMemory(uint32_t frameIndex,
6398  const VkMemoryRequirements& vkMemReq,
6399  const VmaAllocationCreateInfo& createInfo,
6400  VmaAllocation allocation);
6401  void RecordAllocateMemoryPages(uint32_t frameIndex,
6402  const VkMemoryRequirements& vkMemReq,
6403  const VmaAllocationCreateInfo& createInfo,
6404  uint64_t allocationCount,
6405  const VmaAllocation* pAllocations);
6406  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6407  const VkMemoryRequirements& vkMemReq,
6408  bool requiresDedicatedAllocation,
6409  bool prefersDedicatedAllocation,
6410  const VmaAllocationCreateInfo& createInfo,
6411  VmaAllocation allocation);
6412  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6413  const VkMemoryRequirements& vkMemReq,
6414  bool requiresDedicatedAllocation,
6415  bool prefersDedicatedAllocation,
6416  const VmaAllocationCreateInfo& createInfo,
6417  VmaAllocation allocation);
6418  void RecordFreeMemory(uint32_t frameIndex,
6419  VmaAllocation allocation);
6420  void RecordFreeMemoryPages(uint32_t frameIndex,
6421  uint64_t allocationCount,
6422  const VmaAllocation* pAllocations);
6423  void RecordResizeAllocation(
6424  uint32_t frameIndex,
6425  VmaAllocation allocation,
6426  VkDeviceSize newSize);
6427  void RecordSetAllocationUserData(uint32_t frameIndex,
6428  VmaAllocation allocation,
6429  const void* pUserData);
6430  void RecordCreateLostAllocation(uint32_t frameIndex,
6431  VmaAllocation allocation);
6432  void RecordMapMemory(uint32_t frameIndex,
6433  VmaAllocation allocation);
6434  void RecordUnmapMemory(uint32_t frameIndex,
6435  VmaAllocation allocation);
6436  void RecordFlushAllocation(uint32_t frameIndex,
6437  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6438  void RecordInvalidateAllocation(uint32_t frameIndex,
6439  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6440  void RecordCreateBuffer(uint32_t frameIndex,
6441  const VkBufferCreateInfo& bufCreateInfo,
6442  const VmaAllocationCreateInfo& allocCreateInfo,
6443  VmaAllocation allocation);
6444  void RecordCreateImage(uint32_t frameIndex,
6445  const VkImageCreateInfo& imageCreateInfo,
6446  const VmaAllocationCreateInfo& allocCreateInfo,
6447  VmaAllocation allocation);
6448  void RecordDestroyBuffer(uint32_t frameIndex,
6449  VmaAllocation allocation);
6450  void RecordDestroyImage(uint32_t frameIndex,
6451  VmaAllocation allocation);
6452  void RecordTouchAllocation(uint32_t frameIndex,
6453  VmaAllocation allocation);
6454  void RecordGetAllocationInfo(uint32_t frameIndex,
6455  VmaAllocation allocation);
6456  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6457  VmaPool pool);
6458  void RecordDefragmentationBegin(uint32_t frameIndex,
6459  const VmaDefragmentationInfo2& info,
6461  void RecordDefragmentationEnd(uint32_t frameIndex,
6463 
6464 private:
6465  struct CallParams
6466  {
6467  uint32_t threadId;
6468  double time;
6469  };
6470 
6471  class UserDataString
6472  {
6473  public:
6474  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6475  const char* GetString() const { return m_Str; }
6476 
6477  private:
6478  char m_PtrStr[17];
6479  const char* m_Str;
6480  };
6481 
6482  bool m_UseMutex;
6483  VmaRecordFlags m_Flags;
6484  FILE* m_File;
6485  VMA_MUTEX m_FileMutex;
6486  int64_t m_Freq;
6487  int64_t m_StartCounter;
6488 
6489  void GetBasicParams(CallParams& outParams);
6490 
6491  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6492  template<typename T>
6493  void PrintPointerList(uint64_t count, const T* pItems)
6494  {
6495  if(count)
6496  {
6497  fprintf(m_File, "%p", pItems[0]);
6498  for(uint64_t i = 1; i < count; ++i)
6499  {
6500  fprintf(m_File, " %p", pItems[i]);
6501  }
6502  }
6503  }
6504 
6505  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6506  void Flush();
6507 };
6508 
6509 #endif // #if VMA_RECORDING_ENABLED
6510 
6511 // Main allocator object.
6512 struct VmaAllocator_T
6513 {
6514  VMA_CLASS_NO_COPY(VmaAllocator_T)
6515 public:
6516  bool m_UseMutex;
6517  bool m_UseKhrDedicatedAllocation;
6518  VkDevice m_hDevice;
6519  bool m_AllocationCallbacksSpecified;
6520  VkAllocationCallbacks m_AllocationCallbacks;
6521  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6522 
6523  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
6524  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6525  VMA_MUTEX m_HeapSizeLimitMutex;
6526 
6527  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6528  VkPhysicalDeviceMemoryProperties m_MemProps;
6529 
6530  // Default pools.
6531  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6532 
6533  // Each vector is sorted by memory (handle value).
6534  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6535  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6536  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6537 
6538  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6539  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6540  ~VmaAllocator_T();
6541 
6542  const VkAllocationCallbacks* GetAllocationCallbacks() const
6543  {
6544  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6545  }
6546  const VmaVulkanFunctions& GetVulkanFunctions() const
6547  {
6548  return m_VulkanFunctions;
6549  }
6550 
6551  VkDeviceSize GetBufferImageGranularity() const
6552  {
6553  return VMA_MAX(
6554  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6555  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6556  }
6557 
6558  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
6559  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6560 
6561  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6562  {
6563  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6564  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6565  }
6566  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
6567  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6568  {
6569  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6570  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6571  }
6572  // Minimum alignment for all allocations in specific memory type.
6573  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6574  {
6575  return IsMemoryTypeNonCoherent(memTypeIndex) ?
6576  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6577  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6578  }
6579 
6580  bool IsIntegratedGpu() const
6581  {
6582  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6583  }
6584 
6585 #if VMA_RECORDING_ENABLED
6586  VmaRecorder* GetRecorder() const { return m_pRecorder; }
6587 #endif
6588 
6589  void GetBufferMemoryRequirements(
6590  VkBuffer hBuffer,
6591  VkMemoryRequirements& memReq,
6592  bool& requiresDedicatedAllocation,
6593  bool& prefersDedicatedAllocation) const;
6594  void GetImageMemoryRequirements(
6595  VkImage hImage,
6596  VkMemoryRequirements& memReq,
6597  bool& requiresDedicatedAllocation,
6598  bool& prefersDedicatedAllocation) const;
6599 
6600  // Main allocation function.
6601  VkResult AllocateMemory(
6602  const VkMemoryRequirements& vkMemReq,
6603  bool requiresDedicatedAllocation,
6604  bool prefersDedicatedAllocation,
6605  VkBuffer dedicatedBuffer,
6606  VkImage dedicatedImage,
6607  const VmaAllocationCreateInfo& createInfo,
6608  VmaSuballocationType suballocType,
6609  size_t allocationCount,
6610  VmaAllocation* pAllocations);
6611 
6612  // Main deallocation function.
6613  void FreeMemory(
6614  size_t allocationCount,
6615  const VmaAllocation* pAllocations);
6616 
6617  VkResult ResizeAllocation(
6618  const VmaAllocation alloc,
6619  VkDeviceSize newSize);
6620 
6621  void CalculateStats(VmaStats* pStats);
6622 
6623 #if VMA_STATS_STRING_ENABLED
6624  void PrintDetailedMap(class VmaJsonWriter& json);
6625 #endif
6626 
6627  VkResult DefragmentationBegin(
6628  const VmaDefragmentationInfo2& info,
6629  VmaDefragmentationStats* pStats,
6630  VmaDefragmentationContext* pContext);
6631  VkResult DefragmentationEnd(
6632  VmaDefragmentationContext context);
6633 
6634  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
6635  bool TouchAllocation(VmaAllocation hAllocation);
6636 
6637  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
6638  void DestroyPool(VmaPool pool);
6639  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
6640 
6641  void SetCurrentFrameIndex(uint32_t frameIndex);
6642  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6643 
6644  void MakePoolAllocationsLost(
6645  VmaPool hPool,
6646  size_t* pLostAllocationCount);
6647  VkResult CheckPoolCorruption(VmaPool hPool);
6648  VkResult CheckCorruption(uint32_t memoryTypeBits);
6649 
6650  void CreateLostAllocation(VmaAllocation* pAllocation);
6651 
6652  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6653  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6654 
6655  VkResult Map(VmaAllocation hAllocation, void** ppData);
6656  void Unmap(VmaAllocation hAllocation);
6657 
6658  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
6659  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
6660 
6661  void FlushOrInvalidateAllocation(
6662  VmaAllocation hAllocation,
6663  VkDeviceSize offset, VkDeviceSize size,
6664  VMA_CACHE_OPERATION op);
6665 
6666  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
6667 
6668 private:
6669  VkDeviceSize m_PreferredLargeHeapBlockSize;
6670 
6671  VkPhysicalDevice m_PhysicalDevice;
6672  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6673 
6674  VMA_RW_MUTEX m_PoolsMutex;
6675  // Protected by m_PoolsMutex. Sorted by pointer value.
6676  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6677  uint32_t m_NextPoolId;
6678 
6679  VmaVulkanFunctions m_VulkanFunctions;
6680 
6681 #if VMA_RECORDING_ENABLED
6682  VmaRecorder* m_pRecorder;
6683 #endif
6684 
6685  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
6686 
6687  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6688 
6689  VkResult AllocateMemoryOfType(
6690  VkDeviceSize size,
6691  VkDeviceSize alignment,
6692  bool dedicatedAllocation,
6693  VkBuffer dedicatedBuffer,
6694  VkImage dedicatedImage,
6695  const VmaAllocationCreateInfo& createInfo,
6696  uint32_t memTypeIndex,
6697  VmaSuballocationType suballocType,
6698  size_t allocationCount,
6699  VmaAllocation* pAllocations);
6700 
6701  // Helper function only to be used inside AllocateDedicatedMemory.
6702  VkResult AllocateDedicatedMemoryPage(
6703  VkDeviceSize size,
6704  VmaSuballocationType suballocType,
6705  uint32_t memTypeIndex,
6706  const VkMemoryAllocateInfo& allocInfo,
6707  bool map,
6708  bool isUserDataString,
6709  void* pUserData,
6710  VmaAllocation* pAllocation);
6711 
6712  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
6713  VkResult AllocateDedicatedMemory(
6714  VkDeviceSize size,
6715  VmaSuballocationType suballocType,
6716  uint32_t memTypeIndex,
6717  bool map,
6718  bool isUserDataString,
6719  void* pUserData,
6720  VkBuffer dedicatedBuffer,
6721  VkImage dedicatedImage,
6722  size_t allocationCount,
6723  VmaAllocation* pAllocations);
6724 
6725  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
6726  void FreeDedicatedMemory(VmaAllocation allocation);
6727 };
6728 
6730 // Memory allocation #2 after VmaAllocator_T definition
6731 
6732 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
6733 {
6734  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6735 }
6736 
6737 static void VmaFree(VmaAllocator hAllocator, void* ptr)
6738 {
6739  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6740 }
6741 
6742 template<typename T>
6743 static T* VmaAllocate(VmaAllocator hAllocator)
6744 {
6745  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
6746 }
6747 
6748 template<typename T>
6749 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
6750 {
6751  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
6752 }
6753 
6754 template<typename T>
6755 static void vma_delete(VmaAllocator hAllocator, T* ptr)
6756 {
6757  if(ptr != VMA_NULL)
6758  {
6759  ptr->~T();
6760  VmaFree(hAllocator, ptr);
6761  }
6762 }
6763 
6764 template<typename T>
6765 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6766 {
6767  if(ptr != VMA_NULL)
6768  {
6769  for(size_t i = count; i--; )
6770  ptr[i].~T();
6771  VmaFree(hAllocator, ptr);
6772  }
6773 }
6774 
6776 // VmaStringBuilder
6777 
6778 #if VMA_STATS_STRING_ENABLED
6779 
6780 class VmaStringBuilder
6781 {
6782 public:
6783  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
6784  size_t GetLength() const { return m_Data.size(); }
6785  const char* GetData() const { return m_Data.data(); }
6786 
6787  void Add(char ch) { m_Data.push_back(ch); }
6788  void Add(const char* pStr);
6789  void AddNewLine() { Add('\n'); }
6790  void AddNumber(uint32_t num);
6791  void AddNumber(uint64_t num);
6792  void AddPointer(const void* ptr);
6793 
6794 private:
6795  VmaVector< char, VmaStlAllocator<char> > m_Data;
6796 };
6797 
6798 void VmaStringBuilder::Add(const char* pStr)
6799 {
6800  const size_t strLen = strlen(pStr);
6801  if(strLen > 0)
6802  {
6803  const size_t oldCount = m_Data.size();
6804  m_Data.resize(oldCount + strLen);
6805  memcpy(m_Data.data() + oldCount, pStr, strLen);
6806  }
6807 }
6808 
6809 void VmaStringBuilder::AddNumber(uint32_t num)
6810 {
6811  char buf[11];
6812  VmaUint32ToStr(buf, sizeof(buf), num);
6813  Add(buf);
6814 }
6815 
6816 void VmaStringBuilder::AddNumber(uint64_t num)
6817 {
6818  char buf[21];
6819  VmaUint64ToStr(buf, sizeof(buf), num);
6820  Add(buf);
6821 }
6822 
6823 void VmaStringBuilder::AddPointer(const void* ptr)
6824 {
6825  char buf[21];
6826  VmaPtrToStr(buf, sizeof(buf), ptr);
6827  Add(buf);
6828 }
6829 
6830 #endif // #if VMA_STATS_STRING_ENABLED
6831 
6833 // VmaJsonWriter
6834 
6835 #if VMA_STATS_STRING_ENABLED
6836 
6837 class VmaJsonWriter
6838 {
6839  VMA_CLASS_NO_COPY(VmaJsonWriter)
6840 public:
6841  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6842  ~VmaJsonWriter();
6843 
6844  void BeginObject(bool singleLine = false);
6845  void EndObject();
6846 
6847  void BeginArray(bool singleLine = false);
6848  void EndArray();
6849 
6850  void WriteString(const char* pStr);
6851  void BeginString(const char* pStr = VMA_NULL);
6852  void ContinueString(const char* pStr);
6853  void ContinueString(uint32_t n);
6854  void ContinueString(uint64_t n);
6855  void ContinueString_Pointer(const void* ptr);
6856  void EndString(const char* pStr = VMA_NULL);
6857 
6858  void WriteNumber(uint32_t n);
6859  void WriteNumber(uint64_t n);
6860  void WriteBool(bool b);
6861  void WriteNull();
6862 
6863 private:
6864  static const char* const INDENT;
6865 
6866  enum COLLECTION_TYPE
6867  {
6868  COLLECTION_TYPE_OBJECT,
6869  COLLECTION_TYPE_ARRAY,
6870  };
6871  struct StackItem
6872  {
6873  COLLECTION_TYPE type;
6874  uint32_t valueCount;
6875  bool singleLineMode;
6876  };
6877 
6878  VmaStringBuilder& m_SB;
6879  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6880  bool m_InsideString;
6881 
6882  void BeginValue(bool isString);
6883  void WriteIndent(bool oneLess = false);
6884 };
6885 
6886 const char* const VmaJsonWriter::INDENT = " ";
6887 
6888 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6889  m_SB(sb),
6890  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6891  m_InsideString(false)
6892 {
6893 }
6894 
6895 VmaJsonWriter::~VmaJsonWriter()
6896 {
6897  VMA_ASSERT(!m_InsideString);
6898  VMA_ASSERT(m_Stack.empty());
6899 }
6900 
6901 void VmaJsonWriter::BeginObject(bool singleLine)
6902 {
6903  VMA_ASSERT(!m_InsideString);
6904 
6905  BeginValue(false);
6906  m_SB.Add('{');
6907 
6908  StackItem item;
6909  item.type = COLLECTION_TYPE_OBJECT;
6910  item.valueCount = 0;
6911  item.singleLineMode = singleLine;
6912  m_Stack.push_back(item);
6913 }
6914 
6915 void VmaJsonWriter::EndObject()
6916 {
6917  VMA_ASSERT(!m_InsideString);
6918 
6919  WriteIndent(true);
6920  m_SB.Add('}');
6921 
6922  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6923  m_Stack.pop_back();
6924 }
6925 
6926 void VmaJsonWriter::BeginArray(bool singleLine)
6927 {
6928  VMA_ASSERT(!m_InsideString);
6929 
6930  BeginValue(false);
6931  m_SB.Add('[');
6932 
6933  StackItem item;
6934  item.type = COLLECTION_TYPE_ARRAY;
6935  item.valueCount = 0;
6936  item.singleLineMode = singleLine;
6937  m_Stack.push_back(item);
6938 }
6939 
6940 void VmaJsonWriter::EndArray()
6941 {
6942  VMA_ASSERT(!m_InsideString);
6943 
6944  WriteIndent(true);
6945  m_SB.Add(']');
6946 
6947  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6948  m_Stack.pop_back();
6949 }
6950 
6951 void VmaJsonWriter::WriteString(const char* pStr)
6952 {
6953  BeginString(pStr);
6954  EndString();
6955 }
6956 
6957 void VmaJsonWriter::BeginString(const char* pStr)
6958 {
6959  VMA_ASSERT(!m_InsideString);
6960 
6961  BeginValue(true);
6962  m_SB.Add('"');
6963  m_InsideString = true;
6964  if(pStr != VMA_NULL && pStr[0] != '\0')
6965  {
6966  ContinueString(pStr);
6967  }
6968 }
6969 
6970 void VmaJsonWriter::ContinueString(const char* pStr)
6971 {
6972  VMA_ASSERT(m_InsideString);
6973 
6974  const size_t strLen = strlen(pStr);
6975  for(size_t i = 0; i < strLen; ++i)
6976  {
6977  char ch = pStr[i];
6978  if(ch == '\\')
6979  {
6980  m_SB.Add("\\\\");
6981  }
6982  else if(ch == '"')
6983  {
6984  m_SB.Add("\\\"");
6985  }
6986  else if(ch >= 32)
6987  {
6988  m_SB.Add(ch);
6989  }
6990  else switch(ch)
6991  {
6992  case '\b':
6993  m_SB.Add("\\b");
6994  break;
6995  case '\f':
6996  m_SB.Add("\\f");
6997  break;
6998  case '\n':
6999  m_SB.Add("\\n");
7000  break;
7001  case '\r':
7002  m_SB.Add("\\r");
7003  break;
7004  case '\t':
7005  m_SB.Add("\\t");
7006  break;
7007  default:
7008  VMA_ASSERT(0 && "Character not currently supported.");
7009  break;
7010  }
7011  }
7012 }
7013 
7014 void VmaJsonWriter::ContinueString(uint32_t n)
7015 {
7016  VMA_ASSERT(m_InsideString);
7017  m_SB.AddNumber(n);
7018 }
7019 
7020 void VmaJsonWriter::ContinueString(uint64_t n)
7021 {
7022  VMA_ASSERT(m_InsideString);
7023  m_SB.AddNumber(n);
7024 }
7025 
7026 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7027 {
7028  VMA_ASSERT(m_InsideString);
7029  m_SB.AddPointer(ptr);
7030 }
7031 
7032 void VmaJsonWriter::EndString(const char* pStr)
7033 {
7034  VMA_ASSERT(m_InsideString);
7035  if(pStr != VMA_NULL && pStr[0] != '\0')
7036  {
7037  ContinueString(pStr);
7038  }
7039  m_SB.Add('"');
7040  m_InsideString = false;
7041 }
7042 
7043 void VmaJsonWriter::WriteNumber(uint32_t n)
7044 {
7045  VMA_ASSERT(!m_InsideString);
7046  BeginValue(false);
7047  m_SB.AddNumber(n);
7048 }
7049 
7050 void VmaJsonWriter::WriteNumber(uint64_t n)
7051 {
7052  VMA_ASSERT(!m_InsideString);
7053  BeginValue(false);
7054  m_SB.AddNumber(n);
7055 }
7056 
7057 void VmaJsonWriter::WriteBool(bool b)
7058 {
7059  VMA_ASSERT(!m_InsideString);
7060  BeginValue(false);
7061  m_SB.Add(b ? "true" : "false");
7062 }
7063 
7064 void VmaJsonWriter::WriteNull()
7065 {
7066  VMA_ASSERT(!m_InsideString);
7067  BeginValue(false);
7068  m_SB.Add("null");
7069 }
7070 
7071 void VmaJsonWriter::BeginValue(bool isString)
7072 {
7073  if(!m_Stack.empty())
7074  {
7075  StackItem& currItem = m_Stack.back();
7076  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7077  currItem.valueCount % 2 == 0)
7078  {
7079  VMA_ASSERT(isString);
7080  }
7081 
7082  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7083  currItem.valueCount % 2 != 0)
7084  {
7085  m_SB.Add(": ");
7086  }
7087  else if(currItem.valueCount > 0)
7088  {
7089  m_SB.Add(", ");
7090  WriteIndent();
7091  }
7092  else
7093  {
7094  WriteIndent();
7095  }
7096  ++currItem.valueCount;
7097  }
7098 }
7099 
7100 void VmaJsonWriter::WriteIndent(bool oneLess)
7101 {
7102  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7103  {
7104  m_SB.AddNewLine();
7105 
7106  size_t count = m_Stack.size();
7107  if(count > 0 && oneLess)
7108  {
7109  --count;
7110  }
7111  for(size_t i = 0; i < count; ++i)
7112  {
7113  m_SB.Add(INDENT);
7114  }
7115  }
7116 }
7117 
7118 #endif // #if VMA_STATS_STRING_ENABLED
7119 
7121 
7122 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7123 {
7124  if(IsUserDataString())
7125  {
7126  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7127 
7128  FreeUserDataString(hAllocator);
7129 
7130  if(pUserData != VMA_NULL)
7131  {
7132  const char* const newStrSrc = (char*)pUserData;
7133  const size_t newStrLen = strlen(newStrSrc);
7134  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
7135  memcpy(newStrDst, newStrSrc, newStrLen + 1);
7136  m_pUserData = newStrDst;
7137  }
7138  }
7139  else
7140  {
7141  m_pUserData = pUserData;
7142  }
7143 }
7144 
7145 void VmaAllocation_T::ChangeBlockAllocation(
7146  VmaAllocator hAllocator,
7147  VmaDeviceMemoryBlock* block,
7148  VkDeviceSize offset)
7149 {
7150  VMA_ASSERT(block != VMA_NULL);
7151  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7152 
7153  // Move mapping reference counter from old block to new block.
7154  if(block != m_BlockAllocation.m_Block)
7155  {
7156  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7157  if(IsPersistentMap())
7158  ++mapRefCount;
7159  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7160  block->Map(hAllocator, mapRefCount, VMA_NULL);
7161  }
7162 
7163  m_BlockAllocation.m_Block = block;
7164  m_BlockAllocation.m_Offset = offset;
7165 }
7166 
7167 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
7168 {
7169  VMA_ASSERT(newSize > 0);
7170  m_Size = newSize;
7171 }
7172 
7173 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7174 {
7175  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7176  m_BlockAllocation.m_Offset = newOffset;
7177 }
7178 
7179 VkDeviceSize VmaAllocation_T::GetOffset() const
7180 {
7181  switch(m_Type)
7182  {
7183  case ALLOCATION_TYPE_BLOCK:
7184  return m_BlockAllocation.m_Offset;
7185  case ALLOCATION_TYPE_DEDICATED:
7186  return 0;
7187  default:
7188  VMA_ASSERT(0);
7189  return 0;
7190  }
7191 }
7192 
7193 VkDeviceMemory VmaAllocation_T::GetMemory() const
7194 {
7195  switch(m_Type)
7196  {
7197  case ALLOCATION_TYPE_BLOCK:
7198  return m_BlockAllocation.m_Block->GetDeviceMemory();
7199  case ALLOCATION_TYPE_DEDICATED:
7200  return m_DedicatedAllocation.m_hMemory;
7201  default:
7202  VMA_ASSERT(0);
7203  return VK_NULL_HANDLE;
7204  }
7205 }
7206 
7207 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7208 {
7209  switch(m_Type)
7210  {
7211  case ALLOCATION_TYPE_BLOCK:
7212  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7213  case ALLOCATION_TYPE_DEDICATED:
7214  return m_DedicatedAllocation.m_MemoryTypeIndex;
7215  default:
7216  VMA_ASSERT(0);
7217  return UINT32_MAX;
7218  }
7219 }
7220 
7221 void* VmaAllocation_T::GetMappedData() const
7222 {
7223  switch(m_Type)
7224  {
7225  case ALLOCATION_TYPE_BLOCK:
7226  if(m_MapCount != 0)
7227  {
7228  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7229  VMA_ASSERT(pBlockData != VMA_NULL);
7230  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7231  }
7232  else
7233  {
7234  return VMA_NULL;
7235  }
7236  break;
7237  case ALLOCATION_TYPE_DEDICATED:
7238  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7239  return m_DedicatedAllocation.m_pMappedData;
7240  default:
7241  VMA_ASSERT(0);
7242  return VMA_NULL;
7243  }
7244 }
7245 
7246 bool VmaAllocation_T::CanBecomeLost() const
7247 {
7248  switch(m_Type)
7249  {
7250  case ALLOCATION_TYPE_BLOCK:
7251  return m_BlockAllocation.m_CanBecomeLost;
7252  case ALLOCATION_TYPE_DEDICATED:
7253  return false;
7254  default:
7255  VMA_ASSERT(0);
7256  return false;
7257  }
7258 }
7259 
7260 VmaPool VmaAllocation_T::GetPool() const
7261 {
7262  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7263  return m_BlockAllocation.m_hPool;
7264 }
7265 
7266 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7267 {
7268  VMA_ASSERT(CanBecomeLost());
7269 
7270  /*
7271  Warning: This is a carefully designed algorithm.
7272  Do not modify unless you really know what you're doing :)
7273  */
7274  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7275  for(;;)
7276  {
7277  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7278  {
7279  VMA_ASSERT(0);
7280  return false;
7281  }
7282  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7283  {
7284  return false;
7285  }
7286  else // Last use time earlier than current time.
7287  {
7288  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7289  {
7290  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7291  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7292  return true;
7293  }
7294  }
7295  }
7296 }
7297 
7298 #if VMA_STATS_STRING_ENABLED
7299 
7300 // Correspond to values of enum VmaSuballocationType.
7301 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7302  "FREE",
7303  "UNKNOWN",
7304  "BUFFER",
7305  "IMAGE_UNKNOWN",
7306  "IMAGE_LINEAR",
7307  "IMAGE_OPTIMAL",
7308 };
7309 
7310 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7311 {
7312  json.WriteString("Type");
7313  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7314 
7315  json.WriteString("Size");
7316  json.WriteNumber(m_Size);
7317 
7318  if(m_pUserData != VMA_NULL)
7319  {
7320  json.WriteString("UserData");
7321  if(IsUserDataString())
7322  {
7323  json.WriteString((const char*)m_pUserData);
7324  }
7325  else
7326  {
7327  json.BeginString();
7328  json.ContinueString_Pointer(m_pUserData);
7329  json.EndString();
7330  }
7331  }
7332 
7333  json.WriteString("CreationFrameIndex");
7334  json.WriteNumber(m_CreationFrameIndex);
7335 
7336  json.WriteString("LastUseFrameIndex");
7337  json.WriteNumber(GetLastUseFrameIndex());
7338 
7339  if(m_BufferImageUsage != 0)
7340  {
7341  json.WriteString("Usage");
7342  json.WriteNumber(m_BufferImageUsage);
7343  }
7344 }
7345 
7346 #endif
7347 
7348 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7349 {
7350  VMA_ASSERT(IsUserDataString());
7351  if(m_pUserData != VMA_NULL)
7352  {
7353  char* const oldStr = (char*)m_pUserData;
7354  const size_t oldStrLen = strlen(oldStr);
7355  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7356  m_pUserData = VMA_NULL;
7357  }
7358 }
7359 
7360 void VmaAllocation_T::BlockAllocMap()
7361 {
7362  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7363 
7364  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7365  {
7366  ++m_MapCount;
7367  }
7368  else
7369  {
7370  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7371  }
7372 }
7373 
7374 void VmaAllocation_T::BlockAllocUnmap()
7375 {
7376  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7377 
7378  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7379  {
7380  --m_MapCount;
7381  }
7382  else
7383  {
7384  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7385  }
7386 }
7387 
7388 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7389 {
7390  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7391 
7392  if(m_MapCount != 0)
7393  {
7394  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7395  {
7396  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7397  *ppData = m_DedicatedAllocation.m_pMappedData;
7398  ++m_MapCount;
7399  return VK_SUCCESS;
7400  }
7401  else
7402  {
7403  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7404  return VK_ERROR_MEMORY_MAP_FAILED;
7405  }
7406  }
7407  else
7408  {
7409  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7410  hAllocator->m_hDevice,
7411  m_DedicatedAllocation.m_hMemory,
7412  0, // offset
7413  VK_WHOLE_SIZE,
7414  0, // flags
7415  ppData);
7416  if(result == VK_SUCCESS)
7417  {
7418  m_DedicatedAllocation.m_pMappedData = *ppData;
7419  m_MapCount = 1;
7420  }
7421  return result;
7422  }
7423 }
7424 
7425 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7426 {
7427  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7428 
7429  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7430  {
7431  --m_MapCount;
7432  if(m_MapCount == 0)
7433  {
7434  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7435  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7436  hAllocator->m_hDevice,
7437  m_DedicatedAllocation.m_hMemory);
7438  }
7439  }
7440  else
7441  {
7442  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7443  }
7444 }
7445 
7446 #if VMA_STATS_STRING_ENABLED
7447 
7448 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7449 {
7450  json.BeginObject();
7451 
7452  json.WriteString("Blocks");
7453  json.WriteNumber(stat.blockCount);
7454 
7455  json.WriteString("Allocations");
7456  json.WriteNumber(stat.allocationCount);
7457 
7458  json.WriteString("UnusedRanges");
7459  json.WriteNumber(stat.unusedRangeCount);
7460 
7461  json.WriteString("UsedBytes");
7462  json.WriteNumber(stat.usedBytes);
7463 
7464  json.WriteString("UnusedBytes");
7465  json.WriteNumber(stat.unusedBytes);
7466 
7467  if(stat.allocationCount > 1)
7468  {
7469  json.WriteString("AllocationSize");
7470  json.BeginObject(true);
7471  json.WriteString("Min");
7472  json.WriteNumber(stat.allocationSizeMin);
7473  json.WriteString("Avg");
7474  json.WriteNumber(stat.allocationSizeAvg);
7475  json.WriteString("Max");
7476  json.WriteNumber(stat.allocationSizeMax);
7477  json.EndObject();
7478  }
7479 
7480  if(stat.unusedRangeCount > 1)
7481  {
7482  json.WriteString("UnusedRangeSize");
7483  json.BeginObject(true);
7484  json.WriteString("Min");
7485  json.WriteNumber(stat.unusedRangeSizeMin);
7486  json.WriteString("Avg");
7487  json.WriteNumber(stat.unusedRangeSizeAvg);
7488  json.WriteString("Max");
7489  json.WriteNumber(stat.unusedRangeSizeMax);
7490  json.EndObject();
7491  }
7492 
7493  json.EndObject();
7494 }
7495 
7496 #endif // #if VMA_STATS_STRING_ENABLED
7497 
7498 struct VmaSuballocationItemSizeLess
7499 {
7500  bool operator()(
7501  const VmaSuballocationList::iterator lhs,
7502  const VmaSuballocationList::iterator rhs) const
7503  {
7504  return lhs->size < rhs->size;
7505  }
7506  bool operator()(
7507  const VmaSuballocationList::iterator lhs,
7508  VkDeviceSize rhsSize) const
7509  {
7510  return lhs->size < rhsSize;
7511  }
7512 };
7513 
7514 
7516 // class VmaBlockMetadata
7517 
7518 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7519  m_Size(0),
7520  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7521 {
7522 }
7523 
7524 #if VMA_STATS_STRING_ENABLED
7525 
7526 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7527  VkDeviceSize unusedBytes,
7528  size_t allocationCount,
7529  size_t unusedRangeCount) const
7530 {
7531  json.BeginObject();
7532 
7533  json.WriteString("TotalBytes");
7534  json.WriteNumber(GetSize());
7535 
7536  json.WriteString("UnusedBytes");
7537  json.WriteNumber(unusedBytes);
7538 
7539  json.WriteString("Allocations");
7540  json.WriteNumber((uint64_t)allocationCount);
7541 
7542  json.WriteString("UnusedRanges");
7543  json.WriteNumber((uint64_t)unusedRangeCount);
7544 
7545  json.WriteString("Suballocations");
7546  json.BeginArray();
7547 }
7548 
7549 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
7550  VkDeviceSize offset,
7551  VmaAllocation hAllocation) const
7552 {
7553  json.BeginObject(true);
7554 
7555  json.WriteString("Offset");
7556  json.WriteNumber(offset);
7557 
7558  hAllocation->PrintParameters(json);
7559 
7560  json.EndObject();
7561 }
7562 
7563 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
7564  VkDeviceSize offset,
7565  VkDeviceSize size) const
7566 {
7567  json.BeginObject(true);
7568 
7569  json.WriteString("Offset");
7570  json.WriteNumber(offset);
7571 
7572  json.WriteString("Type");
7573  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7574 
7575  json.WriteString("Size");
7576  json.WriteNumber(size);
7577 
7578  json.EndObject();
7579 }
7580 
7581 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
7582 {
7583  json.EndArray();
7584  json.EndObject();
7585 }
7586 
7587 #endif // #if VMA_STATS_STRING_ENABLED
7588 
7590 // class VmaBlockMetadata_Generic
7591 
7592 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
7593  VmaBlockMetadata(hAllocator),
7594  m_FreeCount(0),
7595  m_SumFreeSize(0),
7596  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7597  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7598 {
7599 }
7600 
7601 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7602 {
7603 }
7604 
7605 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7606 {
7607  VmaBlockMetadata::Init(size);
7608 
7609  m_FreeCount = 1;
7610  m_SumFreeSize = size;
7611 
7612  VmaSuballocation suballoc = {};
7613  suballoc.offset = 0;
7614  suballoc.size = size;
7615  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7616  suballoc.hAllocation = VK_NULL_HANDLE;
7617 
7618  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7619  m_Suballocations.push_back(suballoc);
7620  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7621  --suballocItem;
7622  m_FreeSuballocationsBySize.push_back(suballocItem);
7623 }
7624 
7625 bool VmaBlockMetadata_Generic::Validate() const
7626 {
7627  VMA_VALIDATE(!m_Suballocations.empty());
7628 
7629  // Expected offset of new suballocation as calculated from previous ones.
7630  VkDeviceSize calculatedOffset = 0;
7631  // Expected number of free suballocations as calculated from traversing their list.
7632  uint32_t calculatedFreeCount = 0;
7633  // Expected sum size of free suballocations as calculated from traversing their list.
7634  VkDeviceSize calculatedSumFreeSize = 0;
7635  // Expected number of free suballocations that should be registered in
7636  // m_FreeSuballocationsBySize calculated from traversing their list.
7637  size_t freeSuballocationsToRegister = 0;
7638  // True if previous visited suballocation was free.
7639  bool prevFree = false;
7640 
7641  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7642  suballocItem != m_Suballocations.cend();
7643  ++suballocItem)
7644  {
7645  const VmaSuballocation& subAlloc = *suballocItem;
7646 
7647  // Actual offset of this suballocation doesn't match expected one.
7648  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7649 
7650  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7651  // Two adjacent free suballocations are invalid. They should be merged.
7652  VMA_VALIDATE(!prevFree || !currFree);
7653 
7654  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7655 
7656  if(currFree)
7657  {
7658  calculatedSumFreeSize += subAlloc.size;
7659  ++calculatedFreeCount;
7660  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7661  {
7662  ++freeSuballocationsToRegister;
7663  }
7664 
7665  // Margin required between allocations - every free space must be at least that large.
7666  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7667  }
7668  else
7669  {
7670  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7671  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7672 
7673  // Margin required between allocations - previous allocation must be free.
7674  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7675  }
7676 
7677  calculatedOffset += subAlloc.size;
7678  prevFree = currFree;
7679  }
7680 
7681  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
7682  // match expected one.
7683  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7684 
7685  VkDeviceSize lastSize = 0;
7686  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7687  {
7688  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7689 
7690  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
7691  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7692  // They must be sorted by size ascending.
7693  VMA_VALIDATE(suballocItem->size >= lastSize);
7694 
7695  lastSize = suballocItem->size;
7696  }
7697 
7698  // Check if totals match calculacted values.
7699  VMA_VALIDATE(ValidateFreeSuballocationList());
7700  VMA_VALIDATE(calculatedOffset == GetSize());
7701  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7702  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7703 
7704  return true;
7705 }
7706 
7707 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
7708 {
7709  if(!m_FreeSuballocationsBySize.empty())
7710  {
7711  return m_FreeSuballocationsBySize.back()->size;
7712  }
7713  else
7714  {
7715  return 0;
7716  }
7717 }
7718 
7719 bool VmaBlockMetadata_Generic::IsEmpty() const
7720 {
7721  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7722 }
7723 
7724 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7725 {
7726  outInfo.blockCount = 1;
7727 
7728  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7729  outInfo.allocationCount = rangeCount - m_FreeCount;
7730  outInfo.unusedRangeCount = m_FreeCount;
7731 
7732  outInfo.unusedBytes = m_SumFreeSize;
7733  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
7734 
7735  outInfo.allocationSizeMin = UINT64_MAX;
7736  outInfo.allocationSizeMax = 0;
7737  outInfo.unusedRangeSizeMin = UINT64_MAX;
7738  outInfo.unusedRangeSizeMax = 0;
7739 
7740  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7741  suballocItem != m_Suballocations.cend();
7742  ++suballocItem)
7743  {
7744  const VmaSuballocation& suballoc = *suballocItem;
7745  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7746  {
7747  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7748  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
7749  }
7750  else
7751  {
7752  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
7753  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
7754  }
7755  }
7756 }
7757 
7758 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
7759 {
7760  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7761 
7762  inoutStats.size += GetSize();
7763  inoutStats.unusedSize += m_SumFreeSize;
7764  inoutStats.allocationCount += rangeCount - m_FreeCount;
7765  inoutStats.unusedRangeCount += m_FreeCount;
7766  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
7767 }
7768 
7769 #if VMA_STATS_STRING_ENABLED
7770 
7771 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7772 {
7773  PrintDetailedMap_Begin(json,
7774  m_SumFreeSize, // unusedBytes
7775  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7776  m_FreeCount); // unusedRangeCount
7777 
7778  size_t i = 0;
7779  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7780  suballocItem != m_Suballocations.cend();
7781  ++suballocItem, ++i)
7782  {
7783  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7784  {
7785  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
7786  }
7787  else
7788  {
7789  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
7790  }
7791  }
7792 
7793  PrintDetailedMap_End(json);
7794 }
7795 
7796 #endif // #if VMA_STATS_STRING_ENABLED
7797 
7798 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7799  uint32_t currentFrameIndex,
7800  uint32_t frameInUseCount,
7801  VkDeviceSize bufferImageGranularity,
7802  VkDeviceSize allocSize,
7803  VkDeviceSize allocAlignment,
7804  bool upperAddress,
7805  VmaSuballocationType allocType,
7806  bool canMakeOtherLost,
7807  uint32_t strategy,
7808  VmaAllocationRequest* pAllocationRequest)
7809 {
7810  VMA_ASSERT(allocSize > 0);
7811  VMA_ASSERT(!upperAddress);
7812  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7813  VMA_ASSERT(pAllocationRequest != VMA_NULL);
7814  VMA_HEAVY_ASSERT(Validate());
7815 
7816  // There is not enough total free space in this block to fullfill the request: Early return.
7817  if(canMakeOtherLost == false &&
7818  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
7819  {
7820  return false;
7821  }
7822 
7823  // New algorithm, efficiently searching freeSuballocationsBySize.
7824  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7825  if(freeSuballocCount > 0)
7826  {
7828  {
7829  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7830  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7831  m_FreeSuballocationsBySize.data(),
7832  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7833  allocSize + 2 * VMA_DEBUG_MARGIN,
7834  VmaSuballocationItemSizeLess());
7835  size_t index = it - m_FreeSuballocationsBySize.data();
7836  for(; index < freeSuballocCount; ++index)
7837  {
7838  if(CheckAllocation(
7839  currentFrameIndex,
7840  frameInUseCount,
7841  bufferImageGranularity,
7842  allocSize,
7843  allocAlignment,
7844  allocType,
7845  m_FreeSuballocationsBySize[index],
7846  false, // canMakeOtherLost
7847  &pAllocationRequest->offset,
7848  &pAllocationRequest->itemsToMakeLostCount,
7849  &pAllocationRequest->sumFreeSize,
7850  &pAllocationRequest->sumItemSize))
7851  {
7852  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7853  return true;
7854  }
7855  }
7856  }
7857  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7858  {
7859  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7860  it != m_Suballocations.end();
7861  ++it)
7862  {
7863  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
7864  currentFrameIndex,
7865  frameInUseCount,
7866  bufferImageGranularity,
7867  allocSize,
7868  allocAlignment,
7869  allocType,
7870  it,
7871  false, // canMakeOtherLost
7872  &pAllocationRequest->offset,
7873  &pAllocationRequest->itemsToMakeLostCount,
7874  &pAllocationRequest->sumFreeSize,
7875  &pAllocationRequest->sumItemSize))
7876  {
7877  pAllocationRequest->item = it;
7878  return true;
7879  }
7880  }
7881  }
7882  else // WORST_FIT, FIRST_FIT
7883  {
7884  // Search staring from biggest suballocations.
7885  for(size_t index = freeSuballocCount; index--; )
7886  {
7887  if(CheckAllocation(
7888  currentFrameIndex,
7889  frameInUseCount,
7890  bufferImageGranularity,
7891  allocSize,
7892  allocAlignment,
7893  allocType,
7894  m_FreeSuballocationsBySize[index],
7895  false, // canMakeOtherLost
7896  &pAllocationRequest->offset,
7897  &pAllocationRequest->itemsToMakeLostCount,
7898  &pAllocationRequest->sumFreeSize,
7899  &pAllocationRequest->sumItemSize))
7900  {
7901  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7902  return true;
7903  }
7904  }
7905  }
7906  }
7907 
7908  if(canMakeOtherLost)
7909  {
7910  // Brute-force algorithm. TODO: Come up with something better.
7911 
7912  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
7913  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
7914 
7915  VmaAllocationRequest tmpAllocRequest = {};
7916  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
7917  suballocIt != m_Suballocations.end();
7918  ++suballocIt)
7919  {
7920  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
7921  suballocIt->hAllocation->CanBecomeLost())
7922  {
7923  if(CheckAllocation(
7924  currentFrameIndex,
7925  frameInUseCount,
7926  bufferImageGranularity,
7927  allocSize,
7928  allocAlignment,
7929  allocType,
7930  suballocIt,
7931  canMakeOtherLost,
7932  &tmpAllocRequest.offset,
7933  &tmpAllocRequest.itemsToMakeLostCount,
7934  &tmpAllocRequest.sumFreeSize,
7935  &tmpAllocRequest.sumItemSize))
7936  {
7937  tmpAllocRequest.item = suballocIt;
7938 
7939  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7941  {
7942  *pAllocationRequest = tmpAllocRequest;
7943  }
7944  }
7945  }
7946  }
7947 
7948  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7949  {
7950  return true;
7951  }
7952  }
7953 
7954  return false;
7955 }
7956 
7957 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7958  uint32_t currentFrameIndex,
7959  uint32_t frameInUseCount,
7960  VmaAllocationRequest* pAllocationRequest)
7961 {
7962  while(pAllocationRequest->itemsToMakeLostCount > 0)
7963  {
7964  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7965  {
7966  ++pAllocationRequest->item;
7967  }
7968  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7969  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7970  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7971  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7972  {
7973  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7974  --pAllocationRequest->itemsToMakeLostCount;
7975  }
7976  else
7977  {
7978  return false;
7979  }
7980  }
7981 
7982  VMA_HEAVY_ASSERT(Validate());
7983  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7984  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
7985 
7986  return true;
7987 }
7988 
7989 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7990 {
7991  uint32_t lostAllocationCount = 0;
7992  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7993  it != m_Suballocations.end();
7994  ++it)
7995  {
7996  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
7997  it->hAllocation->CanBecomeLost() &&
7998  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7999  {
8000  it = FreeSuballocation(it);
8001  ++lostAllocationCount;
8002  }
8003  }
8004  return lostAllocationCount;
8005 }
8006 
8007 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8008 {
8009  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8010  it != m_Suballocations.end();
8011  ++it)
8012  {
8013  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8014  {
8015  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8016  {
8017  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8018  return VK_ERROR_VALIDATION_FAILED_EXT;
8019  }
8020  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8021  {
8022  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8023  return VK_ERROR_VALIDATION_FAILED_EXT;
8024  }
8025  }
8026  }
8027 
8028  return VK_SUCCESS;
8029 }
8030 
8031 void VmaBlockMetadata_Generic::Alloc(
8032  const VmaAllocationRequest& request,
8033  VmaSuballocationType type,
8034  VkDeviceSize allocSize,
8035  bool upperAddress,
8036  VmaAllocation hAllocation)
8037 {
8038  VMA_ASSERT(!upperAddress);
8039  VMA_ASSERT(request.item != m_Suballocations.end());
8040  VmaSuballocation& suballoc = *request.item;
8041  // Given suballocation is a free block.
8042  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8043  // Given offset is inside this suballocation.
8044  VMA_ASSERT(request.offset >= suballoc.offset);
8045  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8046  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8047  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8048 
8049  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8050  // it to become used.
8051  UnregisterFreeSuballocation(request.item);
8052 
8053  suballoc.offset = request.offset;
8054  suballoc.size = allocSize;
8055  suballoc.type = type;
8056  suballoc.hAllocation = hAllocation;
8057 
8058  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8059  if(paddingEnd)
8060  {
8061  VmaSuballocation paddingSuballoc = {};
8062  paddingSuballoc.offset = request.offset + allocSize;
8063  paddingSuballoc.size = paddingEnd;
8064  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8065  VmaSuballocationList::iterator next = request.item;
8066  ++next;
8067  const VmaSuballocationList::iterator paddingEndItem =
8068  m_Suballocations.insert(next, paddingSuballoc);
8069  RegisterFreeSuballocation(paddingEndItem);
8070  }
8071 
8072  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8073  if(paddingBegin)
8074  {
8075  VmaSuballocation paddingSuballoc = {};
8076  paddingSuballoc.offset = request.offset - paddingBegin;
8077  paddingSuballoc.size = paddingBegin;
8078  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8079  const VmaSuballocationList::iterator paddingBeginItem =
8080  m_Suballocations.insert(request.item, paddingSuballoc);
8081  RegisterFreeSuballocation(paddingBeginItem);
8082  }
8083 
8084  // Update totals.
8085  m_FreeCount = m_FreeCount - 1;
8086  if(paddingBegin > 0)
8087  {
8088  ++m_FreeCount;
8089  }
8090  if(paddingEnd > 0)
8091  {
8092  ++m_FreeCount;
8093  }
8094  m_SumFreeSize -= allocSize;
8095 }
8096 
8097 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8098 {
8099  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8100  suballocItem != m_Suballocations.end();
8101  ++suballocItem)
8102  {
8103  VmaSuballocation& suballoc = *suballocItem;
8104  if(suballoc.hAllocation == allocation)
8105  {
8106  FreeSuballocation(suballocItem);
8107  VMA_HEAVY_ASSERT(Validate());
8108  return;
8109  }
8110  }
8111  VMA_ASSERT(0 && "Not found!");
8112 }
8113 
8114 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8115 {
8116  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8117  suballocItem != m_Suballocations.end();
8118  ++suballocItem)
8119  {
8120  VmaSuballocation& suballoc = *suballocItem;
8121  if(suballoc.offset == offset)
8122  {
8123  FreeSuballocation(suballocItem);
8124  return;
8125  }
8126  }
8127  VMA_ASSERT(0 && "Not found!");
8128 }
8129 
8130 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
8131 {
8132  typedef VmaSuballocationList::iterator iter_type;
8133  for(iter_type suballocItem = m_Suballocations.begin();
8134  suballocItem != m_Suballocations.end();
8135  ++suballocItem)
8136  {
8137  VmaSuballocation& suballoc = *suballocItem;
8138  if(suballoc.hAllocation == alloc)
8139  {
8140  iter_type nextItem = suballocItem;
8141  ++nextItem;
8142 
8143  // Should have been ensured on higher level.
8144  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
8145 
8146  // Shrinking.
8147  if(newSize < alloc->GetSize())
8148  {
8149  const VkDeviceSize sizeDiff = suballoc.size - newSize;
8150 
8151  // There is next item.
8152  if(nextItem != m_Suballocations.end())
8153  {
8154  // Next item is free.
8155  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8156  {
8157  // Grow this next item backward.
8158  UnregisterFreeSuballocation(nextItem);
8159  nextItem->offset -= sizeDiff;
8160  nextItem->size += sizeDiff;
8161  RegisterFreeSuballocation(nextItem);
8162  }
8163  // Next item is not free.
8164  else
8165  {
8166  // Create free item after current one.
8167  VmaSuballocation newFreeSuballoc;
8168  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8169  newFreeSuballoc.offset = suballoc.offset + newSize;
8170  newFreeSuballoc.size = sizeDiff;
8171  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8172  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
8173  RegisterFreeSuballocation(newFreeSuballocIt);
8174 
8175  ++m_FreeCount;
8176  }
8177  }
8178  // This is the last item.
8179  else
8180  {
8181  // Create free item at the end.
8182  VmaSuballocation newFreeSuballoc;
8183  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8184  newFreeSuballoc.offset = suballoc.offset + newSize;
8185  newFreeSuballoc.size = sizeDiff;
8186  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8187  m_Suballocations.push_back(newFreeSuballoc);
8188 
8189  iter_type newFreeSuballocIt = m_Suballocations.end();
8190  RegisterFreeSuballocation(--newFreeSuballocIt);
8191 
8192  ++m_FreeCount;
8193  }
8194 
8195  suballoc.size = newSize;
8196  m_SumFreeSize += sizeDiff;
8197  }
8198  // Growing.
8199  else
8200  {
8201  const VkDeviceSize sizeDiff = newSize - suballoc.size;
8202 
8203  // There is next item.
8204  if(nextItem != m_Suballocations.end())
8205  {
8206  // Next item is free.
8207  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8208  {
8209  // There is not enough free space, including margin.
8210  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
8211  {
8212  return false;
8213  }
8214 
8215  // There is more free space than required.
8216  if(nextItem->size > sizeDiff)
8217  {
8218  // Move and shrink this next item.
8219  UnregisterFreeSuballocation(nextItem);
8220  nextItem->offset += sizeDiff;
8221  nextItem->size -= sizeDiff;
8222  RegisterFreeSuballocation(nextItem);
8223  }
8224  // There is exactly the amount of free space required.
8225  else
8226  {
8227  // Remove this next free item.
8228  UnregisterFreeSuballocation(nextItem);
8229  m_Suballocations.erase(nextItem);
8230  --m_FreeCount;
8231  }
8232  }
8233  // Next item is not free - there is no space to grow.
8234  else
8235  {
8236  return false;
8237  }
8238  }
8239  // This is the last item - there is no space to grow.
8240  else
8241  {
8242  return false;
8243  }
8244 
8245  suballoc.size = newSize;
8246  m_SumFreeSize -= sizeDiff;
8247  }
8248 
8249  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
8250  return true;
8251  }
8252  }
8253  VMA_ASSERT(0 && "Not found!");
8254  return false;
8255 }
8256 
8257 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8258 {
8259  VkDeviceSize lastSize = 0;
8260  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8261  {
8262  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8263 
8264  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8265  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8266  VMA_VALIDATE(it->size >= lastSize);
8267  lastSize = it->size;
8268  }
8269  return true;
8270 }
8271 
8272 bool VmaBlockMetadata_Generic::CheckAllocation(
8273  uint32_t currentFrameIndex,
8274  uint32_t frameInUseCount,
8275  VkDeviceSize bufferImageGranularity,
8276  VkDeviceSize allocSize,
8277  VkDeviceSize allocAlignment,
8278  VmaSuballocationType allocType,
8279  VmaSuballocationList::const_iterator suballocItem,
8280  bool canMakeOtherLost,
8281  VkDeviceSize* pOffset,
8282  size_t* itemsToMakeLostCount,
8283  VkDeviceSize* pSumFreeSize,
8284  VkDeviceSize* pSumItemSize) const
8285 {
8286  VMA_ASSERT(allocSize > 0);
8287  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8288  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8289  VMA_ASSERT(pOffset != VMA_NULL);
8290 
8291  *itemsToMakeLostCount = 0;
8292  *pSumFreeSize = 0;
8293  *pSumItemSize = 0;
8294 
8295  if(canMakeOtherLost)
8296  {
8297  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8298  {
8299  *pSumFreeSize = suballocItem->size;
8300  }
8301  else
8302  {
8303  if(suballocItem->hAllocation->CanBecomeLost() &&
8304  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8305  {
8306  ++*itemsToMakeLostCount;
8307  *pSumItemSize = suballocItem->size;
8308  }
8309  else
8310  {
8311  return false;
8312  }
8313  }
8314 
8315  // Remaining size is too small for this request: Early return.
8316  if(GetSize() - suballocItem->offset < allocSize)
8317  {
8318  return false;
8319  }
8320 
8321  // Start from offset equal to beginning of this suballocation.
8322  *pOffset = suballocItem->offset;
8323 
8324  // Apply VMA_DEBUG_MARGIN at the beginning.
8325  if(VMA_DEBUG_MARGIN > 0)
8326  {
8327  *pOffset += VMA_DEBUG_MARGIN;
8328  }
8329 
8330  // Apply alignment.
8331  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8332 
8333  // Check previous suballocations for BufferImageGranularity conflicts.
8334  // Make bigger alignment if necessary.
8335  if(bufferImageGranularity > 1)
8336  {
8337  bool bufferImageGranularityConflict = false;
8338  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8339  while(prevSuballocItem != m_Suballocations.cbegin())
8340  {
8341  --prevSuballocItem;
8342  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8343  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8344  {
8345  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8346  {
8347  bufferImageGranularityConflict = true;
8348  break;
8349  }
8350  }
8351  else
8352  // Already on previous page.
8353  break;
8354  }
8355  if(bufferImageGranularityConflict)
8356  {
8357  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8358  }
8359  }
8360 
8361  // Now that we have final *pOffset, check if we are past suballocItem.
8362  // If yes, return false - this function should be called for another suballocItem as starting point.
8363  if(*pOffset >= suballocItem->offset + suballocItem->size)
8364  {
8365  return false;
8366  }
8367 
8368  // Calculate padding at the beginning based on current offset.
8369  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8370 
8371  // Calculate required margin at the end.
8372  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8373 
8374  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8375  // Another early return check.
8376  if(suballocItem->offset + totalSize > GetSize())
8377  {
8378  return false;
8379  }
8380 
8381  // Advance lastSuballocItem until desired size is reached.
8382  // Update itemsToMakeLostCount.
8383  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8384  if(totalSize > suballocItem->size)
8385  {
8386  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8387  while(remainingSize > 0)
8388  {
8389  ++lastSuballocItem;
8390  if(lastSuballocItem == m_Suballocations.cend())
8391  {
8392  return false;
8393  }
8394  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8395  {
8396  *pSumFreeSize += lastSuballocItem->size;
8397  }
8398  else
8399  {
8400  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8401  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8402  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8403  {
8404  ++*itemsToMakeLostCount;
8405  *pSumItemSize += lastSuballocItem->size;
8406  }
8407  else
8408  {
8409  return false;
8410  }
8411  }
8412  remainingSize = (lastSuballocItem->size < remainingSize) ?
8413  remainingSize - lastSuballocItem->size : 0;
8414  }
8415  }
8416 
8417  // Check next suballocations for BufferImageGranularity conflicts.
8418  // If conflict exists, we must mark more allocations lost or fail.
8419  if(bufferImageGranularity > 1)
8420  {
8421  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8422  ++nextSuballocItem;
8423  while(nextSuballocItem != m_Suballocations.cend())
8424  {
8425  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8426  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8427  {
8428  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8429  {
8430  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8431  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8432  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8433  {
8434  ++*itemsToMakeLostCount;
8435  }
8436  else
8437  {
8438  return false;
8439  }
8440  }
8441  }
8442  else
8443  {
8444  // Already on next page.
8445  break;
8446  }
8447  ++nextSuballocItem;
8448  }
8449  }
8450  }
8451  else
8452  {
8453  const VmaSuballocation& suballoc = *suballocItem;
8454  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8455 
8456  *pSumFreeSize = suballoc.size;
8457 
8458  // Size of this suballocation is too small for this request: Early return.
8459  if(suballoc.size < allocSize)
8460  {
8461  return false;
8462  }
8463 
8464  // Start from offset equal to beginning of this suballocation.
8465  *pOffset = suballoc.offset;
8466 
8467  // Apply VMA_DEBUG_MARGIN at the beginning.
8468  if(VMA_DEBUG_MARGIN > 0)
8469  {
8470  *pOffset += VMA_DEBUG_MARGIN;
8471  }
8472 
8473  // Apply alignment.
8474  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8475 
8476  // Check previous suballocations for BufferImageGranularity conflicts.
8477  // Make bigger alignment if necessary.
8478  if(bufferImageGranularity > 1)
8479  {
8480  bool bufferImageGranularityConflict = false;
8481  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8482  while(prevSuballocItem != m_Suballocations.cbegin())
8483  {
8484  --prevSuballocItem;
8485  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8486  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8487  {
8488  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8489  {
8490  bufferImageGranularityConflict = true;
8491  break;
8492  }
8493  }
8494  else
8495  // Already on previous page.
8496  break;
8497  }
8498  if(bufferImageGranularityConflict)
8499  {
8500  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8501  }
8502  }
8503 
8504  // Calculate padding at the beginning based on current offset.
8505  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8506 
8507  // Calculate required margin at the end.
8508  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8509 
8510  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8511  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8512  {
8513  return false;
8514  }
8515 
8516  // Check next suballocations for BufferImageGranularity conflicts.
8517  // If conflict exists, allocation cannot be made here.
8518  if(bufferImageGranularity > 1)
8519  {
8520  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8521  ++nextSuballocItem;
8522  while(nextSuballocItem != m_Suballocations.cend())
8523  {
8524  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8525  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8526  {
8527  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8528  {
8529  return false;
8530  }
8531  }
8532  else
8533  {
8534  // Already on next page.
8535  break;
8536  }
8537  ++nextSuballocItem;
8538  }
8539  }
8540  }
8541 
8542  // All tests passed: Success. pOffset is already filled.
8543  return true;
8544 }
8545 
8546 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8547 {
8548  VMA_ASSERT(item != m_Suballocations.end());
8549  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8550 
8551  VmaSuballocationList::iterator nextItem = item;
8552  ++nextItem;
8553  VMA_ASSERT(nextItem != m_Suballocations.end());
8554  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8555 
8556  item->size += nextItem->size;
8557  --m_FreeCount;
8558  m_Suballocations.erase(nextItem);
8559 }
8560 
8561 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8562 {
8563  // Change this suballocation to be marked as free.
8564  VmaSuballocation& suballoc = *suballocItem;
8565  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8566  suballoc.hAllocation = VK_NULL_HANDLE;
8567 
8568  // Update totals.
8569  ++m_FreeCount;
8570  m_SumFreeSize += suballoc.size;
8571 
8572  // Merge with previous and/or next suballocation if it's also free.
8573  bool mergeWithNext = false;
8574  bool mergeWithPrev = false;
8575 
8576  VmaSuballocationList::iterator nextItem = suballocItem;
8577  ++nextItem;
8578  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8579  {
8580  mergeWithNext = true;
8581  }
8582 
8583  VmaSuballocationList::iterator prevItem = suballocItem;
8584  if(suballocItem != m_Suballocations.begin())
8585  {
8586  --prevItem;
8587  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8588  {
8589  mergeWithPrev = true;
8590  }
8591  }
8592 
8593  if(mergeWithNext)
8594  {
8595  UnregisterFreeSuballocation(nextItem);
8596  MergeFreeWithNext(suballocItem);
8597  }
8598 
8599  if(mergeWithPrev)
8600  {
8601  UnregisterFreeSuballocation(prevItem);
8602  MergeFreeWithNext(prevItem);
8603  RegisterFreeSuballocation(prevItem);
8604  return prevItem;
8605  }
8606  else
8607  {
8608  RegisterFreeSuballocation(suballocItem);
8609  return suballocItem;
8610  }
8611 }
8612 
8613 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8614 {
8615  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8616  VMA_ASSERT(item->size > 0);
8617 
8618  // You may want to enable this validation at the beginning or at the end of
8619  // this function, depending on what do you want to check.
8620  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8621 
8622  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8623  {
8624  if(m_FreeSuballocationsBySize.empty())
8625  {
8626  m_FreeSuballocationsBySize.push_back(item);
8627  }
8628  else
8629  {
8630  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8631  }
8632  }
8633 
8634  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8635 }
8636 
8637 
8638 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8639 {
8640  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8641  VMA_ASSERT(item->size > 0);
8642 
8643  // You may want to enable this validation at the beginning or at the end of
8644  // this function, depending on what do you want to check.
8645  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8646 
8647  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8648  {
8649  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8650  m_FreeSuballocationsBySize.data(),
8651  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8652  item,
8653  VmaSuballocationItemSizeLess());
8654  for(size_t index = it - m_FreeSuballocationsBySize.data();
8655  index < m_FreeSuballocationsBySize.size();
8656  ++index)
8657  {
8658  if(m_FreeSuballocationsBySize[index] == item)
8659  {
8660  VmaVectorRemove(m_FreeSuballocationsBySize, index);
8661  return;
8662  }
8663  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8664  }
8665  VMA_ASSERT(0 && "Not found.");
8666  }
8667 
8668  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8669 }
8670 
8671 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8672  VkDeviceSize bufferImageGranularity,
8673  VmaSuballocationType& inOutPrevSuballocType) const
8674 {
8675  if(bufferImageGranularity == 1 || IsEmpty())
8676  {
8677  return false;
8678  }
8679 
8680  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8681  bool typeConflictFound = false;
8682  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8683  it != m_Suballocations.cend();
8684  ++it)
8685  {
8686  const VmaSuballocationType suballocType = it->type;
8687  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8688  {
8689  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8690  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8691  {
8692  typeConflictFound = true;
8693  }
8694  inOutPrevSuballocType = suballocType;
8695  }
8696  }
8697 
8698  return typeConflictFound || minAlignment >= bufferImageGranularity;
8699 }
8700 
8702 // class VmaBlockMetadata_Linear
8703 
8704 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
8705  VmaBlockMetadata(hAllocator),
8706  m_SumFreeSize(0),
8707  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8708  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8709  m_1stVectorIndex(0),
8710  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8711  m_1stNullItemsBeginCount(0),
8712  m_1stNullItemsMiddleCount(0),
8713  m_2ndNullItemsCount(0)
8714 {
8715 }
8716 
8717 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8718 {
8719 }
8720 
8721 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8722 {
8723  VmaBlockMetadata::Init(size);
8724  m_SumFreeSize = size;
8725 }
8726 
8727 bool VmaBlockMetadata_Linear::Validate() const
8728 {
8729  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8730  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8731 
8732  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8733  VMA_VALIDATE(!suballocations1st.empty() ||
8734  suballocations2nd.empty() ||
8735  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8736 
8737  if(!suballocations1st.empty())
8738  {
8739  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
8740  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8741  // Null item at the end should be just pop_back().
8742  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8743  }
8744  if(!suballocations2nd.empty())
8745  {
8746  // Null item at the end should be just pop_back().
8747  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8748  }
8749 
8750  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8751  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8752 
8753  VkDeviceSize sumUsedSize = 0;
8754  const size_t suballoc1stCount = suballocations1st.size();
8755  VkDeviceSize offset = VMA_DEBUG_MARGIN;
8756 
8757  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8758  {
8759  const size_t suballoc2ndCount = suballocations2nd.size();
8760  size_t nullItem2ndCount = 0;
8761  for(size_t i = 0; i < suballoc2ndCount; ++i)
8762  {
8763  const VmaSuballocation& suballoc = suballocations2nd[i];
8764  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8765 
8766  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8767  VMA_VALIDATE(suballoc.offset >= offset);
8768 
8769  if(!currFree)
8770  {
8771  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8772  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8773  sumUsedSize += suballoc.size;
8774  }
8775  else
8776  {
8777  ++nullItem2ndCount;
8778  }
8779 
8780  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8781  }
8782 
8783  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8784  }
8785 
8786  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8787  {
8788  const VmaSuballocation& suballoc = suballocations1st[i];
8789  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8790  suballoc.hAllocation == VK_NULL_HANDLE);
8791  }
8792 
8793  size_t nullItem1stCount = m_1stNullItemsBeginCount;
8794 
8795  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8796  {
8797  const VmaSuballocation& suballoc = suballocations1st[i];
8798  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8799 
8800  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8801  VMA_VALIDATE(suballoc.offset >= offset);
8802  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8803 
8804  if(!currFree)
8805  {
8806  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8807  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8808  sumUsedSize += suballoc.size;
8809  }
8810  else
8811  {
8812  ++nullItem1stCount;
8813  }
8814 
8815  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8816  }
8817  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8818 
8819  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8820  {
8821  const size_t suballoc2ndCount = suballocations2nd.size();
8822  size_t nullItem2ndCount = 0;
8823  for(size_t i = suballoc2ndCount; i--; )
8824  {
8825  const VmaSuballocation& suballoc = suballocations2nd[i];
8826  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8827 
8828  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8829  VMA_VALIDATE(suballoc.offset >= offset);
8830 
8831  if(!currFree)
8832  {
8833  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8834  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8835  sumUsedSize += suballoc.size;
8836  }
8837  else
8838  {
8839  ++nullItem2ndCount;
8840  }
8841 
8842  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8843  }
8844 
8845  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8846  }
8847 
8848  VMA_VALIDATE(offset <= GetSize());
8849  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
8850 
8851  return true;
8852 }
8853 
8854 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
8855 {
8856  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
8857  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
8858 }
8859 
8860 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
8861 {
8862  const VkDeviceSize size = GetSize();
8863 
8864  /*
8865  We don't consider gaps inside allocation vectors with freed allocations because
8866  they are not suitable for reuse in linear allocator. We consider only space that
8867  is available for new allocations.
8868  */
8869  if(IsEmpty())
8870  {
8871  return size;
8872  }
8873 
8874  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8875 
8876  switch(m_2ndVectorMode)
8877  {
8878  case SECOND_VECTOR_EMPTY:
8879  /*
8880  Available space is after end of 1st, as well as before beginning of 1st (which
8881  whould make it a ring buffer).
8882  */
8883  {
8884  const size_t suballocations1stCount = suballocations1st.size();
8885  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
8886  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8887  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
8888  return VMA_MAX(
8889  firstSuballoc.offset,
8890  size - (lastSuballoc.offset + lastSuballoc.size));
8891  }
8892  break;
8893 
8894  case SECOND_VECTOR_RING_BUFFER:
8895  /*
8896  Available space is only between end of 2nd and beginning of 1st.
8897  */
8898  {
8899  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8900  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
8901  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
8902  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
8903  }
8904  break;
8905 
8906  case SECOND_VECTOR_DOUBLE_STACK:
8907  /*
8908  Available space is only between end of 1st and top of 2nd.
8909  */
8910  {
8911  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8912  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
8913  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
8914  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
8915  }
8916  break;
8917 
8918  default:
8919  VMA_ASSERT(0);
8920  return 0;
8921  }
8922 }
8923 
8924 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8925 {
8926  const VkDeviceSize size = GetSize();
8927  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8928  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8929  const size_t suballoc1stCount = suballocations1st.size();
8930  const size_t suballoc2ndCount = suballocations2nd.size();
8931 
8932  outInfo.blockCount = 1;
8933  outInfo.allocationCount = (uint32_t)GetAllocationCount();
8934  outInfo.unusedRangeCount = 0;
8935  outInfo.usedBytes = 0;
8936  outInfo.allocationSizeMin = UINT64_MAX;
8937  outInfo.allocationSizeMax = 0;
8938  outInfo.unusedRangeSizeMin = UINT64_MAX;
8939  outInfo.unusedRangeSizeMax = 0;
8940 
8941  VkDeviceSize lastOffset = 0;
8942 
8943  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8944  {
8945  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8946  size_t nextAlloc2ndIndex = 0;
8947  while(lastOffset < freeSpace2ndTo1stEnd)
8948  {
8949  // Find next non-null allocation or move nextAllocIndex to the end.
8950  while(nextAlloc2ndIndex < suballoc2ndCount &&
8951  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8952  {
8953  ++nextAlloc2ndIndex;
8954  }
8955 
8956  // Found non-null allocation.
8957  if(nextAlloc2ndIndex < suballoc2ndCount)
8958  {
8959  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8960 
8961  // 1. Process free space before this allocation.
8962  if(lastOffset < suballoc.offset)
8963  {
8964  // There is free space from lastOffset to suballoc.offset.
8965  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8966  ++outInfo.unusedRangeCount;
8967  outInfo.unusedBytes += unusedRangeSize;
8968  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8969  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8970  }
8971 
8972  // 2. Process this allocation.
8973  // There is allocation with suballoc.offset, suballoc.size.
8974  outInfo.usedBytes += suballoc.size;
8975  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8976  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8977 
8978  // 3. Prepare for next iteration.
8979  lastOffset = suballoc.offset + suballoc.size;
8980  ++nextAlloc2ndIndex;
8981  }
8982  // We are at the end.
8983  else
8984  {
8985  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8986  if(lastOffset < freeSpace2ndTo1stEnd)
8987  {
8988  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8989  ++outInfo.unusedRangeCount;
8990  outInfo.unusedBytes += unusedRangeSize;
8991  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8992  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8993  }
8994 
8995  // End of loop.
8996  lastOffset = freeSpace2ndTo1stEnd;
8997  }
8998  }
8999  }
9000 
9001  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9002  const VkDeviceSize freeSpace1stTo2ndEnd =
9003  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9004  while(lastOffset < freeSpace1stTo2ndEnd)
9005  {
9006  // Find next non-null allocation or move nextAllocIndex to the end.
9007  while(nextAlloc1stIndex < suballoc1stCount &&
9008  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9009  {
9010  ++nextAlloc1stIndex;
9011  }
9012 
9013  // Found non-null allocation.
9014  if(nextAlloc1stIndex < suballoc1stCount)
9015  {
9016  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9017 
9018  // 1. Process free space before this allocation.
9019  if(lastOffset < suballoc.offset)
9020  {
9021  // There is free space from lastOffset to suballoc.offset.
9022  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9023  ++outInfo.unusedRangeCount;
9024  outInfo.unusedBytes += unusedRangeSize;
9025  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9026  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9027  }
9028 
9029  // 2. Process this allocation.
9030  // There is allocation with suballoc.offset, suballoc.size.
9031  outInfo.usedBytes += suballoc.size;
9032  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9033  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9034 
9035  // 3. Prepare for next iteration.
9036  lastOffset = suballoc.offset + suballoc.size;
9037  ++nextAlloc1stIndex;
9038  }
9039  // We are at the end.
9040  else
9041  {
9042  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9043  if(lastOffset < freeSpace1stTo2ndEnd)
9044  {
9045  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9046  ++outInfo.unusedRangeCount;
9047  outInfo.unusedBytes += unusedRangeSize;
9048  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9049  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9050  }
9051 
9052  // End of loop.
9053  lastOffset = freeSpace1stTo2ndEnd;
9054  }
9055  }
9056 
9057  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9058  {
9059  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9060  while(lastOffset < size)
9061  {
9062  // Find next non-null allocation or move nextAllocIndex to the end.
9063  while(nextAlloc2ndIndex != SIZE_MAX &&
9064  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9065  {
9066  --nextAlloc2ndIndex;
9067  }
9068 
9069  // Found non-null allocation.
9070  if(nextAlloc2ndIndex != SIZE_MAX)
9071  {
9072  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9073 
9074  // 1. Process free space before this allocation.
9075  if(lastOffset < suballoc.offset)
9076  {
9077  // There is free space from lastOffset to suballoc.offset.
9078  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9079  ++outInfo.unusedRangeCount;
9080  outInfo.unusedBytes += unusedRangeSize;
9081  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9082  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9083  }
9084 
9085  // 2. Process this allocation.
9086  // There is allocation with suballoc.offset, suballoc.size.
9087  outInfo.usedBytes += suballoc.size;
9088  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9089  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9090 
9091  // 3. Prepare for next iteration.
9092  lastOffset = suballoc.offset + suballoc.size;
9093  --nextAlloc2ndIndex;
9094  }
9095  // We are at the end.
9096  else
9097  {
9098  // There is free space from lastOffset to size.
9099  if(lastOffset < size)
9100  {
9101  const VkDeviceSize unusedRangeSize = size - lastOffset;
9102  ++outInfo.unusedRangeCount;
9103  outInfo.unusedBytes += unusedRangeSize;
9104  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9105  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9106  }
9107 
9108  // End of loop.
9109  lastOffset = size;
9110  }
9111  }
9112  }
9113 
9114  outInfo.unusedBytes = size - outInfo.usedBytes;
9115 }
9116 
9117 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9118 {
9119  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9120  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9121  const VkDeviceSize size = GetSize();
9122  const size_t suballoc1stCount = suballocations1st.size();
9123  const size_t suballoc2ndCount = suballocations2nd.size();
9124 
9125  inoutStats.size += size;
9126 
9127  VkDeviceSize lastOffset = 0;
9128 
9129  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9130  {
9131  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9132  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9133  while(lastOffset < freeSpace2ndTo1stEnd)
9134  {
9135  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9136  while(nextAlloc2ndIndex < suballoc2ndCount &&
9137  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9138  {
9139  ++nextAlloc2ndIndex;
9140  }
9141 
9142  // Found non-null allocation.
9143  if(nextAlloc2ndIndex < suballoc2ndCount)
9144  {
9145  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9146 
9147  // 1. Process free space before this allocation.
9148  if(lastOffset < suballoc.offset)
9149  {
9150  // There is free space from lastOffset to suballoc.offset.
9151  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9152  inoutStats.unusedSize += unusedRangeSize;
9153  ++inoutStats.unusedRangeCount;
9154  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9155  }
9156 
9157  // 2. Process this allocation.
9158  // There is allocation with suballoc.offset, suballoc.size.
9159  ++inoutStats.allocationCount;
9160 
9161  // 3. Prepare for next iteration.
9162  lastOffset = suballoc.offset + suballoc.size;
9163  ++nextAlloc2ndIndex;
9164  }
9165  // We are at the end.
9166  else
9167  {
9168  if(lastOffset < freeSpace2ndTo1stEnd)
9169  {
9170  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9171  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9172  inoutStats.unusedSize += unusedRangeSize;
9173  ++inoutStats.unusedRangeCount;
9174  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9175  }
9176 
9177  // End of loop.
9178  lastOffset = freeSpace2ndTo1stEnd;
9179  }
9180  }
9181  }
9182 
9183  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9184  const VkDeviceSize freeSpace1stTo2ndEnd =
9185  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9186  while(lastOffset < freeSpace1stTo2ndEnd)
9187  {
9188  // Find next non-null allocation or move nextAllocIndex to the end.
9189  while(nextAlloc1stIndex < suballoc1stCount &&
9190  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9191  {
9192  ++nextAlloc1stIndex;
9193  }
9194 
9195  // Found non-null allocation.
9196  if(nextAlloc1stIndex < suballoc1stCount)
9197  {
9198  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9199 
9200  // 1. Process free space before this allocation.
9201  if(lastOffset < suballoc.offset)
9202  {
9203  // There is free space from lastOffset to suballoc.offset.
9204  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9205  inoutStats.unusedSize += unusedRangeSize;
9206  ++inoutStats.unusedRangeCount;
9207  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9208  }
9209 
9210  // 2. Process this allocation.
9211  // There is allocation with suballoc.offset, suballoc.size.
9212  ++inoutStats.allocationCount;
9213 
9214  // 3. Prepare for next iteration.
9215  lastOffset = suballoc.offset + suballoc.size;
9216  ++nextAlloc1stIndex;
9217  }
9218  // We are at the end.
9219  else
9220  {
9221  if(lastOffset < freeSpace1stTo2ndEnd)
9222  {
9223  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9224  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9225  inoutStats.unusedSize += unusedRangeSize;
9226  ++inoutStats.unusedRangeCount;
9227  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9228  }
9229 
9230  // End of loop.
9231  lastOffset = freeSpace1stTo2ndEnd;
9232  }
9233  }
9234 
9235  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9236  {
9237  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9238  while(lastOffset < size)
9239  {
9240  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9241  while(nextAlloc2ndIndex != SIZE_MAX &&
9242  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9243  {
9244  --nextAlloc2ndIndex;
9245  }
9246 
9247  // Found non-null allocation.
9248  if(nextAlloc2ndIndex != SIZE_MAX)
9249  {
9250  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9251 
9252  // 1. Process free space before this allocation.
9253  if(lastOffset < suballoc.offset)
9254  {
9255  // There is free space from lastOffset to suballoc.offset.
9256  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9257  inoutStats.unusedSize += unusedRangeSize;
9258  ++inoutStats.unusedRangeCount;
9259  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9260  }
9261 
9262  // 2. Process this allocation.
9263  // There is allocation with suballoc.offset, suballoc.size.
9264  ++inoutStats.allocationCount;
9265 
9266  // 3. Prepare for next iteration.
9267  lastOffset = suballoc.offset + suballoc.size;
9268  --nextAlloc2ndIndex;
9269  }
9270  // We are at the end.
9271  else
9272  {
9273  if(lastOffset < size)
9274  {
9275  // There is free space from lastOffset to size.
9276  const VkDeviceSize unusedRangeSize = size - lastOffset;
9277  inoutStats.unusedSize += unusedRangeSize;
9278  ++inoutStats.unusedRangeCount;
9279  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9280  }
9281 
9282  // End of loop.
9283  lastOffset = size;
9284  }
9285  }
9286  }
9287 }
9288 
9289 #if VMA_STATS_STRING_ENABLED
9290 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9291 {
9292  const VkDeviceSize size = GetSize();
9293  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9294  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9295  const size_t suballoc1stCount = suballocations1st.size();
9296  const size_t suballoc2ndCount = suballocations2nd.size();
9297 
9298  // FIRST PASS
9299 
9300  size_t unusedRangeCount = 0;
9301  VkDeviceSize usedBytes = 0;
9302 
9303  VkDeviceSize lastOffset = 0;
9304 
9305  size_t alloc2ndCount = 0;
9306  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9307  {
9308  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9309  size_t nextAlloc2ndIndex = 0;
9310  while(lastOffset < freeSpace2ndTo1stEnd)
9311  {
9312  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9313  while(nextAlloc2ndIndex < suballoc2ndCount &&
9314  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9315  {
9316  ++nextAlloc2ndIndex;
9317  }
9318 
9319  // Found non-null allocation.
9320  if(nextAlloc2ndIndex < suballoc2ndCount)
9321  {
9322  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9323 
9324  // 1. Process free space before this allocation.
9325  if(lastOffset < suballoc.offset)
9326  {
9327  // There is free space from lastOffset to suballoc.offset.
9328  ++unusedRangeCount;
9329  }
9330 
9331  // 2. Process this allocation.
9332  // There is allocation with suballoc.offset, suballoc.size.
9333  ++alloc2ndCount;
9334  usedBytes += suballoc.size;
9335 
9336  // 3. Prepare for next iteration.
9337  lastOffset = suballoc.offset + suballoc.size;
9338  ++nextAlloc2ndIndex;
9339  }
9340  // We are at the end.
9341  else
9342  {
9343  if(lastOffset < freeSpace2ndTo1stEnd)
9344  {
9345  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9346  ++unusedRangeCount;
9347  }
9348 
9349  // End of loop.
9350  lastOffset = freeSpace2ndTo1stEnd;
9351  }
9352  }
9353  }
9354 
9355  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9356  size_t alloc1stCount = 0;
9357  const VkDeviceSize freeSpace1stTo2ndEnd =
9358  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9359  while(lastOffset < freeSpace1stTo2ndEnd)
9360  {
9361  // Find next non-null allocation or move nextAllocIndex to the end.
9362  while(nextAlloc1stIndex < suballoc1stCount &&
9363  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9364  {
9365  ++nextAlloc1stIndex;
9366  }
9367 
9368  // Found non-null allocation.
9369  if(nextAlloc1stIndex < suballoc1stCount)
9370  {
9371  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9372 
9373  // 1. Process free space before this allocation.
9374  if(lastOffset < suballoc.offset)
9375  {
9376  // There is free space from lastOffset to suballoc.offset.
9377  ++unusedRangeCount;
9378  }
9379 
9380  // 2. Process this allocation.
9381  // There is allocation with suballoc.offset, suballoc.size.
9382  ++alloc1stCount;
9383  usedBytes += suballoc.size;
9384 
9385  // 3. Prepare for next iteration.
9386  lastOffset = suballoc.offset + suballoc.size;
9387  ++nextAlloc1stIndex;
9388  }
9389  // We are at the end.
9390  else
9391  {
9392  if(lastOffset < size)
9393  {
9394  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9395  ++unusedRangeCount;
9396  }
9397 
9398  // End of loop.
9399  lastOffset = freeSpace1stTo2ndEnd;
9400  }
9401  }
9402 
9403  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9404  {
9405  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9406  while(lastOffset < size)
9407  {
9408  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9409  while(nextAlloc2ndIndex != SIZE_MAX &&
9410  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9411  {
9412  --nextAlloc2ndIndex;
9413  }
9414 
9415  // Found non-null allocation.
9416  if(nextAlloc2ndIndex != SIZE_MAX)
9417  {
9418  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9419 
9420  // 1. Process free space before this allocation.
9421  if(lastOffset < suballoc.offset)
9422  {
9423  // There is free space from lastOffset to suballoc.offset.
9424  ++unusedRangeCount;
9425  }
9426 
9427  // 2. Process this allocation.
9428  // There is allocation with suballoc.offset, suballoc.size.
9429  ++alloc2ndCount;
9430  usedBytes += suballoc.size;
9431 
9432  // 3. Prepare for next iteration.
9433  lastOffset = suballoc.offset + suballoc.size;
9434  --nextAlloc2ndIndex;
9435  }
9436  // We are at the end.
9437  else
9438  {
9439  if(lastOffset < size)
9440  {
9441  // There is free space from lastOffset to size.
9442  ++unusedRangeCount;
9443  }
9444 
9445  // End of loop.
9446  lastOffset = size;
9447  }
9448  }
9449  }
9450 
9451  const VkDeviceSize unusedBytes = size - usedBytes;
9452  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9453 
9454  // SECOND PASS
9455  lastOffset = 0;
9456 
9457  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9458  {
9459  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9460  size_t nextAlloc2ndIndex = 0;
9461  while(lastOffset < freeSpace2ndTo1stEnd)
9462  {
9463  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9464  while(nextAlloc2ndIndex < suballoc2ndCount &&
9465  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9466  {
9467  ++nextAlloc2ndIndex;
9468  }
9469 
9470  // Found non-null allocation.
9471  if(nextAlloc2ndIndex < suballoc2ndCount)
9472  {
9473  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9474 
9475  // 1. Process free space before this allocation.
9476  if(lastOffset < suballoc.offset)
9477  {
9478  // There is free space from lastOffset to suballoc.offset.
9479  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9480  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9481  }
9482 
9483  // 2. Process this allocation.
9484  // There is allocation with suballoc.offset, suballoc.size.
9485  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9486 
9487  // 3. Prepare for next iteration.
9488  lastOffset = suballoc.offset + suballoc.size;
9489  ++nextAlloc2ndIndex;
9490  }
9491  // We are at the end.
9492  else
9493  {
9494  if(lastOffset < freeSpace2ndTo1stEnd)
9495  {
9496  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9497  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9498  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9499  }
9500 
9501  // End of loop.
9502  lastOffset = freeSpace2ndTo1stEnd;
9503  }
9504  }
9505  }
9506 
9507  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9508  while(lastOffset < freeSpace1stTo2ndEnd)
9509  {
9510  // Find next non-null allocation or move nextAllocIndex to the end.
9511  while(nextAlloc1stIndex < suballoc1stCount &&
9512  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9513  {
9514  ++nextAlloc1stIndex;
9515  }
9516 
9517  // Found non-null allocation.
9518  if(nextAlloc1stIndex < suballoc1stCount)
9519  {
9520  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9521 
9522  // 1. Process free space before this allocation.
9523  if(lastOffset < suballoc.offset)
9524  {
9525  // There is free space from lastOffset to suballoc.offset.
9526  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9527  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9528  }
9529 
9530  // 2. Process this allocation.
9531  // There is allocation with suballoc.offset, suballoc.size.
9532  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9533 
9534  // 3. Prepare for next iteration.
9535  lastOffset = suballoc.offset + suballoc.size;
9536  ++nextAlloc1stIndex;
9537  }
9538  // We are at the end.
9539  else
9540  {
9541  if(lastOffset < freeSpace1stTo2ndEnd)
9542  {
9543  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9544  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9545  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9546  }
9547 
9548  // End of loop.
9549  lastOffset = freeSpace1stTo2ndEnd;
9550  }
9551  }
9552 
9553  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9554  {
9555  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9556  while(lastOffset < size)
9557  {
9558  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9559  while(nextAlloc2ndIndex != SIZE_MAX &&
9560  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9561  {
9562  --nextAlloc2ndIndex;
9563  }
9564 
9565  // Found non-null allocation.
9566  if(nextAlloc2ndIndex != SIZE_MAX)
9567  {
9568  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9569 
9570  // 1. Process free space before this allocation.
9571  if(lastOffset < suballoc.offset)
9572  {
9573  // There is free space from lastOffset to suballoc.offset.
9574  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9575  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9576  }
9577 
9578  // 2. Process this allocation.
9579  // There is allocation with suballoc.offset, suballoc.size.
9580  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9581 
9582  // 3. Prepare for next iteration.
9583  lastOffset = suballoc.offset + suballoc.size;
9584  --nextAlloc2ndIndex;
9585  }
9586  // We are at the end.
9587  else
9588  {
9589  if(lastOffset < size)
9590  {
9591  // There is free space from lastOffset to size.
9592  const VkDeviceSize unusedRangeSize = size - lastOffset;
9593  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9594  }
9595 
9596  // End of loop.
9597  lastOffset = size;
9598  }
9599  }
9600  }
9601 
9602  PrintDetailedMap_End(json);
9603 }
9604 #endif // #if VMA_STATS_STRING_ENABLED
9605 
9606 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9607  uint32_t currentFrameIndex,
9608  uint32_t frameInUseCount,
9609  VkDeviceSize bufferImageGranularity,
9610  VkDeviceSize allocSize,
9611  VkDeviceSize allocAlignment,
9612  bool upperAddress,
9613  VmaSuballocationType allocType,
9614  bool canMakeOtherLost,
9615  uint32_t strategy,
9616  VmaAllocationRequest* pAllocationRequest)
9617 {
9618  VMA_ASSERT(allocSize > 0);
9619  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9620  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9621  VMA_HEAVY_ASSERT(Validate());
9622 
9623  const VkDeviceSize size = GetSize();
9624  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9625  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9626 
9627  if(upperAddress)
9628  {
9629  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9630  {
9631  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9632  return false;
9633  }
9634 
9635  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9636  if(allocSize > size)
9637  {
9638  return false;
9639  }
9640  VkDeviceSize resultBaseOffset = size - allocSize;
9641  if(!suballocations2nd.empty())
9642  {
9643  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9644  resultBaseOffset = lastSuballoc.offset - allocSize;
9645  if(allocSize > lastSuballoc.offset)
9646  {
9647  return false;
9648  }
9649  }
9650 
9651  // Start from offset equal to end of free space.
9652  VkDeviceSize resultOffset = resultBaseOffset;
9653 
9654  // Apply VMA_DEBUG_MARGIN at the end.
9655  if(VMA_DEBUG_MARGIN > 0)
9656  {
9657  if(resultOffset < VMA_DEBUG_MARGIN)
9658  {
9659  return false;
9660  }
9661  resultOffset -= VMA_DEBUG_MARGIN;
9662  }
9663 
9664  // Apply alignment.
9665  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9666 
9667  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9668  // Make bigger alignment if necessary.
9669  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9670  {
9671  bool bufferImageGranularityConflict = false;
9672  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9673  {
9674  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9675  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9676  {
9677  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9678  {
9679  bufferImageGranularityConflict = true;
9680  break;
9681  }
9682  }
9683  else
9684  // Already on previous page.
9685  break;
9686  }
9687  if(bufferImageGranularityConflict)
9688  {
9689  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9690  }
9691  }
9692 
9693  // There is enough free space.
9694  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9695  suballocations1st.back().offset + suballocations1st.back().size :
9696  0;
9697  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9698  {
9699  // Check previous suballocations for BufferImageGranularity conflicts.
9700  // If conflict exists, allocation cannot be made here.
9701  if(bufferImageGranularity > 1)
9702  {
9703  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9704  {
9705  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9706  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9707  {
9708  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9709  {
9710  return false;
9711  }
9712  }
9713  else
9714  {
9715  // Already on next page.
9716  break;
9717  }
9718  }
9719  }
9720 
9721  // All tests passed: Success.
9722  pAllocationRequest->offset = resultOffset;
9723  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9724  pAllocationRequest->sumItemSize = 0;
9725  // pAllocationRequest->item unused.
9726  pAllocationRequest->itemsToMakeLostCount = 0;
9727  return true;
9728  }
9729  }
9730  else // !upperAddress
9731  {
9732  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9733  {
9734  // Try to allocate at the end of 1st vector.
9735 
9736  VkDeviceSize resultBaseOffset = 0;
9737  if(!suballocations1st.empty())
9738  {
9739  const VmaSuballocation& lastSuballoc = suballocations1st.back();
9740  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9741  }
9742 
9743  // Start from offset equal to beginning of free space.
9744  VkDeviceSize resultOffset = resultBaseOffset;
9745 
9746  // Apply VMA_DEBUG_MARGIN at the beginning.
9747  if(VMA_DEBUG_MARGIN > 0)
9748  {
9749  resultOffset += VMA_DEBUG_MARGIN;
9750  }
9751 
9752  // Apply alignment.
9753  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9754 
9755  // Check previous suballocations for BufferImageGranularity conflicts.
9756  // Make bigger alignment if necessary.
9757  if(bufferImageGranularity > 1 && !suballocations1st.empty())
9758  {
9759  bool bufferImageGranularityConflict = false;
9760  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9761  {
9762  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9763  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9764  {
9765  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9766  {
9767  bufferImageGranularityConflict = true;
9768  break;
9769  }
9770  }
9771  else
9772  // Already on previous page.
9773  break;
9774  }
9775  if(bufferImageGranularityConflict)
9776  {
9777  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9778  }
9779  }
9780 
9781  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9782  suballocations2nd.back().offset : size;
9783 
9784  // There is enough free space at the end after alignment.
9785  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9786  {
9787  // Check next suballocations for BufferImageGranularity conflicts.
9788  // If conflict exists, allocation cannot be made here.
9789  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9790  {
9791  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9792  {
9793  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9794  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9795  {
9796  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9797  {
9798  return false;
9799  }
9800  }
9801  else
9802  {
9803  // Already on previous page.
9804  break;
9805  }
9806  }
9807  }
9808 
9809  // All tests passed: Success.
9810  pAllocationRequest->offset = resultOffset;
9811  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
9812  pAllocationRequest->sumItemSize = 0;
9813  // pAllocationRequest->item unused.
9814  pAllocationRequest->itemsToMakeLostCount = 0;
9815  return true;
9816  }
9817  }
9818 
9819  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9820  // beginning of 1st vector as the end of free space.
9821  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9822  {
9823  VMA_ASSERT(!suballocations1st.empty());
9824 
9825  VkDeviceSize resultBaseOffset = 0;
9826  if(!suballocations2nd.empty())
9827  {
9828  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9829  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9830  }
9831 
9832  // Start from offset equal to beginning of free space.
9833  VkDeviceSize resultOffset = resultBaseOffset;
9834 
9835  // Apply VMA_DEBUG_MARGIN at the beginning.
9836  if(VMA_DEBUG_MARGIN > 0)
9837  {
9838  resultOffset += VMA_DEBUG_MARGIN;
9839  }
9840 
9841  // Apply alignment.
9842  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9843 
9844  // Check previous suballocations for BufferImageGranularity conflicts.
9845  // Make bigger alignment if necessary.
9846  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9847  {
9848  bool bufferImageGranularityConflict = false;
9849  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
9850  {
9851  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
9852  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9853  {
9854  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9855  {
9856  bufferImageGranularityConflict = true;
9857  break;
9858  }
9859  }
9860  else
9861  // Already on previous page.
9862  break;
9863  }
9864  if(bufferImageGranularityConflict)
9865  {
9866  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9867  }
9868  }
9869 
9870  pAllocationRequest->itemsToMakeLostCount = 0;
9871  pAllocationRequest->sumItemSize = 0;
9872  size_t index1st = m_1stNullItemsBeginCount;
9873 
9874  if(canMakeOtherLost)
9875  {
9876  while(index1st < suballocations1st.size() &&
9877  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
9878  {
9879  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
9880  const VmaSuballocation& suballoc = suballocations1st[index1st];
9881  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
9882  {
9883  // No problem.
9884  }
9885  else
9886  {
9887  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9888  if(suballoc.hAllocation->CanBecomeLost() &&
9889  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9890  {
9891  ++pAllocationRequest->itemsToMakeLostCount;
9892  pAllocationRequest->sumItemSize += suballoc.size;
9893  }
9894  else
9895  {
9896  return false;
9897  }
9898  }
9899  ++index1st;
9900  }
9901 
9902  // Check next suballocations for BufferImageGranularity conflicts.
9903  // If conflict exists, we must mark more allocations lost or fail.
9904  if(bufferImageGranularity > 1)
9905  {
9906  while(index1st < suballocations1st.size())
9907  {
9908  const VmaSuballocation& suballoc = suballocations1st[index1st];
9909  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
9910  {
9911  if(suballoc.hAllocation != VK_NULL_HANDLE)
9912  {
9913  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
9914  if(suballoc.hAllocation->CanBecomeLost() &&
9915  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9916  {
9917  ++pAllocationRequest->itemsToMakeLostCount;
9918  pAllocationRequest->sumItemSize += suballoc.size;
9919  }
9920  else
9921  {
9922  return false;
9923  }
9924  }
9925  }
9926  else
9927  {
9928  // Already on next page.
9929  break;
9930  }
9931  ++index1st;
9932  }
9933  }
9934  }
9935 
9936  // There is enough free space at the end after alignment.
9937  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
9938  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
9939  {
9940  // Check next suballocations for BufferImageGranularity conflicts.
9941  // If conflict exists, allocation cannot be made here.
9942  if(bufferImageGranularity > 1)
9943  {
9944  for(size_t nextSuballocIndex = index1st;
9945  nextSuballocIndex < suballocations1st.size();
9946  nextSuballocIndex++)
9947  {
9948  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
9949  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9950  {
9951  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9952  {
9953  return false;
9954  }
9955  }
9956  else
9957  {
9958  // Already on next page.
9959  break;
9960  }
9961  }
9962  }
9963 
9964  // All tests passed: Success.
9965  pAllocationRequest->offset = resultOffset;
9966  pAllocationRequest->sumFreeSize =
9967  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
9968  - resultBaseOffset
9969  - pAllocationRequest->sumItemSize;
9970  // pAllocationRequest->item unused.
9971  return true;
9972  }
9973  }
9974  }
9975 
9976  return false;
9977 }
9978 
9979 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
9980  uint32_t currentFrameIndex,
9981  uint32_t frameInUseCount,
9982  VmaAllocationRequest* pAllocationRequest)
9983 {
9984  if(pAllocationRequest->itemsToMakeLostCount == 0)
9985  {
9986  return true;
9987  }
9988 
9989  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
9990 
9991  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9992  size_t index1st = m_1stNullItemsBeginCount;
9993  size_t madeLostCount = 0;
9994  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
9995  {
9996  VMA_ASSERT(index1st < suballocations1st.size());
9997  VmaSuballocation& suballoc = suballocations1st[index1st];
9998  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9999  {
10000  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10001  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10002  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10003  {
10004  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10005  suballoc.hAllocation = VK_NULL_HANDLE;
10006  m_SumFreeSize += suballoc.size;
10007  ++m_1stNullItemsMiddleCount;
10008  ++madeLostCount;
10009  }
10010  else
10011  {
10012  return false;
10013  }
10014  }
10015  ++index1st;
10016  }
10017 
10018  CleanupAfterFree();
10019  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10020 
10021  return true;
10022 }
10023 
10024 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10025 {
10026  uint32_t lostAllocationCount = 0;
10027 
10028  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10029  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10030  {
10031  VmaSuballocation& suballoc = suballocations1st[i];
10032  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10033  suballoc.hAllocation->CanBecomeLost() &&
10034  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10035  {
10036  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10037  suballoc.hAllocation = VK_NULL_HANDLE;
10038  ++m_1stNullItemsMiddleCount;
10039  m_SumFreeSize += suballoc.size;
10040  ++lostAllocationCount;
10041  }
10042  }
10043 
10044  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10045  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10046  {
10047  VmaSuballocation& suballoc = suballocations2nd[i];
10048  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10049  suballoc.hAllocation->CanBecomeLost() &&
10050  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10051  {
10052  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10053  suballoc.hAllocation = VK_NULL_HANDLE;
10054  ++m_2ndNullItemsCount;
10055  ++lostAllocationCount;
10056  }
10057  }
10058 
10059  if(lostAllocationCount)
10060  {
10061  CleanupAfterFree();
10062  }
10063 
10064  return lostAllocationCount;
10065 }
10066 
10067 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10068 {
10069  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10070  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10071  {
10072  const VmaSuballocation& suballoc = suballocations1st[i];
10073  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10074  {
10075  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10076  {
10077  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10078  return VK_ERROR_VALIDATION_FAILED_EXT;
10079  }
10080  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10081  {
10082  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10083  return VK_ERROR_VALIDATION_FAILED_EXT;
10084  }
10085  }
10086  }
10087 
10088  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10089  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10090  {
10091  const VmaSuballocation& suballoc = suballocations2nd[i];
10092  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10093  {
10094  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10095  {
10096  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10097  return VK_ERROR_VALIDATION_FAILED_EXT;
10098  }
10099  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10100  {
10101  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10102  return VK_ERROR_VALIDATION_FAILED_EXT;
10103  }
10104  }
10105  }
10106 
10107  return VK_SUCCESS;
10108 }
10109 
10110 void VmaBlockMetadata_Linear::Alloc(
10111  const VmaAllocationRequest& request,
10112  VmaSuballocationType type,
10113  VkDeviceSize allocSize,
10114  bool upperAddress,
10115  VmaAllocation hAllocation)
10116 {
10117  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10118 
10119  if(upperAddress)
10120  {
10121  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10122  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10123  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10124  suballocations2nd.push_back(newSuballoc);
10125  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10126  }
10127  else
10128  {
10129  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10130 
10131  // First allocation.
10132  if(suballocations1st.empty())
10133  {
10134  suballocations1st.push_back(newSuballoc);
10135  }
10136  else
10137  {
10138  // New allocation at the end of 1st vector.
10139  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
10140  {
10141  // Check if it fits before the end of the block.
10142  VMA_ASSERT(request.offset + allocSize <= GetSize());
10143  suballocations1st.push_back(newSuballoc);
10144  }
10145  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10146  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
10147  {
10148  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10149 
10150  switch(m_2ndVectorMode)
10151  {
10152  case SECOND_VECTOR_EMPTY:
10153  // First allocation from second part ring buffer.
10154  VMA_ASSERT(suballocations2nd.empty());
10155  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10156  break;
10157  case SECOND_VECTOR_RING_BUFFER:
10158  // 2-part ring buffer is already started.
10159  VMA_ASSERT(!suballocations2nd.empty());
10160  break;
10161  case SECOND_VECTOR_DOUBLE_STACK:
10162  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10163  break;
10164  default:
10165  VMA_ASSERT(0);
10166  }
10167 
10168  suballocations2nd.push_back(newSuballoc);
10169  }
10170  else
10171  {
10172  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10173  }
10174  }
10175  }
10176 
10177  m_SumFreeSize -= newSuballoc.size;
10178 }
10179 
10180 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10181 {
10182  FreeAtOffset(allocation->GetOffset());
10183 }
10184 
10185 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10186 {
10187  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10188  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10189 
10190  if(!suballocations1st.empty())
10191  {
10192  // First allocation: Mark it as next empty at the beginning.
10193  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10194  if(firstSuballoc.offset == offset)
10195  {
10196  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10197  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10198  m_SumFreeSize += firstSuballoc.size;
10199  ++m_1stNullItemsBeginCount;
10200  CleanupAfterFree();
10201  return;
10202  }
10203  }
10204 
10205  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10206  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10207  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10208  {
10209  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10210  if(lastSuballoc.offset == offset)
10211  {
10212  m_SumFreeSize += lastSuballoc.size;
10213  suballocations2nd.pop_back();
10214  CleanupAfterFree();
10215  return;
10216  }
10217  }
10218  // Last allocation in 1st vector.
10219  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10220  {
10221  VmaSuballocation& lastSuballoc = suballocations1st.back();
10222  if(lastSuballoc.offset == offset)
10223  {
10224  m_SumFreeSize += lastSuballoc.size;
10225  suballocations1st.pop_back();
10226  CleanupAfterFree();
10227  return;
10228  }
10229  }
10230 
10231  // Item from the middle of 1st vector.
10232  {
10233  VmaSuballocation refSuballoc;
10234  refSuballoc.offset = offset;
10235  // Rest of members stays uninitialized intentionally for better performance.
10236  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
10237  suballocations1st.begin() + m_1stNullItemsBeginCount,
10238  suballocations1st.end(),
10239  refSuballoc);
10240  if(it != suballocations1st.end())
10241  {
10242  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10243  it->hAllocation = VK_NULL_HANDLE;
10244  ++m_1stNullItemsMiddleCount;
10245  m_SumFreeSize += it->size;
10246  CleanupAfterFree();
10247  return;
10248  }
10249  }
10250 
10251  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10252  {
10253  // Item from the middle of 2nd vector.
10254  VmaSuballocation refSuballoc;
10255  refSuballoc.offset = offset;
10256  // Rest of members stays uninitialized intentionally for better performance.
10257  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10258  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
10259  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
10260  if(it != suballocations2nd.end())
10261  {
10262  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10263  it->hAllocation = VK_NULL_HANDLE;
10264  ++m_2ndNullItemsCount;
10265  m_SumFreeSize += it->size;
10266  CleanupAfterFree();
10267  return;
10268  }
10269  }
10270 
10271  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10272 }
10273 
10274 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10275 {
10276  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10277  const size_t suballocCount = AccessSuballocations1st().size();
10278  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10279 }
10280 
10281 void VmaBlockMetadata_Linear::CleanupAfterFree()
10282 {
10283  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10284  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10285 
10286  if(IsEmpty())
10287  {
10288  suballocations1st.clear();
10289  suballocations2nd.clear();
10290  m_1stNullItemsBeginCount = 0;
10291  m_1stNullItemsMiddleCount = 0;
10292  m_2ndNullItemsCount = 0;
10293  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10294  }
10295  else
10296  {
10297  const size_t suballoc1stCount = suballocations1st.size();
10298  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10299  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10300 
10301  // Find more null items at the beginning of 1st vector.
10302  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10303  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10304  {
10305  ++m_1stNullItemsBeginCount;
10306  --m_1stNullItemsMiddleCount;
10307  }
10308 
10309  // Find more null items at the end of 1st vector.
10310  while(m_1stNullItemsMiddleCount > 0 &&
10311  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10312  {
10313  --m_1stNullItemsMiddleCount;
10314  suballocations1st.pop_back();
10315  }
10316 
10317  // Find more null items at the end of 2nd vector.
10318  while(m_2ndNullItemsCount > 0 &&
10319  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10320  {
10321  --m_2ndNullItemsCount;
10322  suballocations2nd.pop_back();
10323  }
10324 
10325  if(ShouldCompact1st())
10326  {
10327  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10328  size_t srcIndex = m_1stNullItemsBeginCount;
10329  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10330  {
10331  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10332  {
10333  ++srcIndex;
10334  }
10335  if(dstIndex != srcIndex)
10336  {
10337  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10338  }
10339  ++srcIndex;
10340  }
10341  suballocations1st.resize(nonNullItemCount);
10342  m_1stNullItemsBeginCount = 0;
10343  m_1stNullItemsMiddleCount = 0;
10344  }
10345 
10346  // 2nd vector became empty.
10347  if(suballocations2nd.empty())
10348  {
10349  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10350  }
10351 
10352  // 1st vector became empty.
10353  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10354  {
10355  suballocations1st.clear();
10356  m_1stNullItemsBeginCount = 0;
10357 
10358  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10359  {
10360  // Swap 1st with 2nd. Now 2nd is empty.
10361  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10362  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10363  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10364  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10365  {
10366  ++m_1stNullItemsBeginCount;
10367  --m_1stNullItemsMiddleCount;
10368  }
10369  m_2ndNullItemsCount = 0;
10370  m_1stVectorIndex ^= 1;
10371  }
10372  }
10373  }
10374 
10375  VMA_HEAVY_ASSERT(Validate());
10376 }
10377 
10378 
10380 // class VmaBlockMetadata_Buddy
10381 
10382 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10383  VmaBlockMetadata(hAllocator),
10384  m_Root(VMA_NULL),
10385  m_AllocationCount(0),
10386  m_FreeCount(1),
10387  m_SumFreeSize(0)
10388 {
10389  memset(m_FreeList, 0, sizeof(m_FreeList));
10390 }
10391 
10392 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10393 {
10394  DeleteNode(m_Root);
10395 }
10396 
10397 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10398 {
10399  VmaBlockMetadata::Init(size);
10400 
10401  m_UsableSize = VmaPrevPow2(size);
10402  m_SumFreeSize = m_UsableSize;
10403 
10404  // Calculate m_LevelCount.
10405  m_LevelCount = 1;
10406  while(m_LevelCount < MAX_LEVELS &&
10407  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10408  {
10409  ++m_LevelCount;
10410  }
10411 
10412  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10413  rootNode->offset = 0;
10414  rootNode->type = Node::TYPE_FREE;
10415  rootNode->parent = VMA_NULL;
10416  rootNode->buddy = VMA_NULL;
10417 
10418  m_Root = rootNode;
10419  AddToFreeListFront(0, rootNode);
10420 }
10421 
10422 bool VmaBlockMetadata_Buddy::Validate() const
10423 {
10424  // Validate tree.
10425  ValidationContext ctx;
10426  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10427  {
10428  VMA_VALIDATE(false && "ValidateNode failed.");
10429  }
10430  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10431  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10432 
10433  // Validate free node lists.
10434  for(uint32_t level = 0; level < m_LevelCount; ++level)
10435  {
10436  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10437  m_FreeList[level].front->free.prev == VMA_NULL);
10438 
10439  for(Node* node = m_FreeList[level].front;
10440  node != VMA_NULL;
10441  node = node->free.next)
10442  {
10443  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10444 
10445  if(node->free.next == VMA_NULL)
10446  {
10447  VMA_VALIDATE(m_FreeList[level].back == node);
10448  }
10449  else
10450  {
10451  VMA_VALIDATE(node->free.next->free.prev == node);
10452  }
10453  }
10454  }
10455 
10456  // Validate that free lists ar higher levels are empty.
10457  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10458  {
10459  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10460  }
10461 
10462  return true;
10463 }
10464 
10465 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10466 {
10467  for(uint32_t level = 0; level < m_LevelCount; ++level)
10468  {
10469  if(m_FreeList[level].front != VMA_NULL)
10470  {
10471  return LevelToNodeSize(level);
10472  }
10473  }
10474  return 0;
10475 }
10476 
10477 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10478 {
10479  const VkDeviceSize unusableSize = GetUnusableSize();
10480 
10481  outInfo.blockCount = 1;
10482 
10483  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10484  outInfo.usedBytes = outInfo.unusedBytes = 0;
10485 
10486  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10487  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10488  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10489 
10490  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10491 
10492  if(unusableSize > 0)
10493  {
10494  ++outInfo.unusedRangeCount;
10495  outInfo.unusedBytes += unusableSize;
10496  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10497  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10498  }
10499 }
10500 
10501 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10502 {
10503  const VkDeviceSize unusableSize = GetUnusableSize();
10504 
10505  inoutStats.size += GetSize();
10506  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10507  inoutStats.allocationCount += m_AllocationCount;
10508  inoutStats.unusedRangeCount += m_FreeCount;
10509  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10510 
10511  if(unusableSize > 0)
10512  {
10513  ++inoutStats.unusedRangeCount;
10514  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10515  }
10516 }
10517 
10518 #if VMA_STATS_STRING_ENABLED
10519 
10520 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10521 {
10522  // TODO optimize
10523  VmaStatInfo stat;
10524  CalcAllocationStatInfo(stat);
10525 
10526  PrintDetailedMap_Begin(
10527  json,
10528  stat.unusedBytes,
10529  stat.allocationCount,
10530  stat.unusedRangeCount);
10531 
10532  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10533 
10534  const VkDeviceSize unusableSize = GetUnusableSize();
10535  if(unusableSize > 0)
10536  {
10537  PrintDetailedMap_UnusedRange(json,
10538  m_UsableSize, // offset
10539  unusableSize); // size
10540  }
10541 
10542  PrintDetailedMap_End(json);
10543 }
10544 
10545 #endif // #if VMA_STATS_STRING_ENABLED
10546 
10547 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10548  uint32_t currentFrameIndex,
10549  uint32_t frameInUseCount,
10550  VkDeviceSize bufferImageGranularity,
10551  VkDeviceSize allocSize,
10552  VkDeviceSize allocAlignment,
10553  bool upperAddress,
10554  VmaSuballocationType allocType,
10555  bool canMakeOtherLost,
10556  uint32_t strategy,
10557  VmaAllocationRequest* pAllocationRequest)
10558 {
10559  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10560 
10561  // Simple way to respect bufferImageGranularity. May be optimized some day.
10562  // Whenever it might be an OPTIMAL image...
10563  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10564  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10565  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10566  {
10567  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10568  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10569  }
10570 
10571  if(allocSize > m_UsableSize)
10572  {
10573  return false;
10574  }
10575 
10576  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10577  for(uint32_t level = targetLevel + 1; level--; )
10578  {
10579  for(Node* freeNode = m_FreeList[level].front;
10580  freeNode != VMA_NULL;
10581  freeNode = freeNode->free.next)
10582  {
10583  if(freeNode->offset % allocAlignment == 0)
10584  {
10585  pAllocationRequest->offset = freeNode->offset;
10586  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10587  pAllocationRequest->sumItemSize = 0;
10588  pAllocationRequest->itemsToMakeLostCount = 0;
10589  pAllocationRequest->customData = (void*)(uintptr_t)level;
10590  return true;
10591  }
10592  }
10593  }
10594 
10595  return false;
10596 }
10597 
10598 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10599  uint32_t currentFrameIndex,
10600  uint32_t frameInUseCount,
10601  VmaAllocationRequest* pAllocationRequest)
10602 {
10603  /*
10604  Lost allocations are not supported in buddy allocator at the moment.
10605  Support might be added in the future.
10606  */
10607  return pAllocationRequest->itemsToMakeLostCount == 0;
10608 }
10609 
10610 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10611 {
10612  /*
10613  Lost allocations are not supported in buddy allocator at the moment.
10614  Support might be added in the future.
10615  */
10616  return 0;
10617 }
10618 
10619 void VmaBlockMetadata_Buddy::Alloc(
10620  const VmaAllocationRequest& request,
10621  VmaSuballocationType type,
10622  VkDeviceSize allocSize,
10623  bool upperAddress,
10624  VmaAllocation hAllocation)
10625 {
10626  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10627  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10628 
10629  Node* currNode = m_FreeList[currLevel].front;
10630  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10631  while(currNode->offset != request.offset)
10632  {
10633  currNode = currNode->free.next;
10634  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10635  }
10636 
10637  // Go down, splitting free nodes.
10638  while(currLevel < targetLevel)
10639  {
10640  // currNode is already first free node at currLevel.
10641  // Remove it from list of free nodes at this currLevel.
10642  RemoveFromFreeList(currLevel, currNode);
10643 
10644  const uint32_t childrenLevel = currLevel + 1;
10645 
10646  // Create two free sub-nodes.
10647  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10648  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10649 
10650  leftChild->offset = currNode->offset;
10651  leftChild->type = Node::TYPE_FREE;
10652  leftChild->parent = currNode;
10653  leftChild->buddy = rightChild;
10654 
10655  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10656  rightChild->type = Node::TYPE_FREE;
10657  rightChild->parent = currNode;
10658  rightChild->buddy = leftChild;
10659 
10660  // Convert current currNode to split type.
10661  currNode->type = Node::TYPE_SPLIT;
10662  currNode->split.leftChild = leftChild;
10663 
10664  // Add child nodes to free list. Order is important!
10665  AddToFreeListFront(childrenLevel, rightChild);
10666  AddToFreeListFront(childrenLevel, leftChild);
10667 
10668  ++m_FreeCount;
10669  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
10670  ++currLevel;
10671  currNode = m_FreeList[currLevel].front;
10672 
10673  /*
10674  We can be sure that currNode, as left child of node previously split,
10675  also fullfills the alignment requirement.
10676  */
10677  }
10678 
10679  // Remove from free list.
10680  VMA_ASSERT(currLevel == targetLevel &&
10681  currNode != VMA_NULL &&
10682  currNode->type == Node::TYPE_FREE);
10683  RemoveFromFreeList(currLevel, currNode);
10684 
10685  // Convert to allocation node.
10686  currNode->type = Node::TYPE_ALLOCATION;
10687  currNode->allocation.alloc = hAllocation;
10688 
10689  ++m_AllocationCount;
10690  --m_FreeCount;
10691  m_SumFreeSize -= allocSize;
10692 }
10693 
10694 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10695 {
10696  if(node->type == Node::TYPE_SPLIT)
10697  {
10698  DeleteNode(node->split.leftChild->buddy);
10699  DeleteNode(node->split.leftChild);
10700  }
10701 
10702  vma_delete(GetAllocationCallbacks(), node);
10703 }
10704 
10705 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
10706 {
10707  VMA_VALIDATE(level < m_LevelCount);
10708  VMA_VALIDATE(curr->parent == parent);
10709  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10710  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10711  switch(curr->type)
10712  {
10713  case Node::TYPE_FREE:
10714  // curr->free.prev, next are validated separately.
10715  ctx.calculatedSumFreeSize += levelNodeSize;
10716  ++ctx.calculatedFreeCount;
10717  break;
10718  case Node::TYPE_ALLOCATION:
10719  ++ctx.calculatedAllocationCount;
10720  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10721  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10722  break;
10723  case Node::TYPE_SPLIT:
10724  {
10725  const uint32_t childrenLevel = level + 1;
10726  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10727  const Node* const leftChild = curr->split.leftChild;
10728  VMA_VALIDATE(leftChild != VMA_NULL);
10729  VMA_VALIDATE(leftChild->offset == curr->offset);
10730  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10731  {
10732  VMA_VALIDATE(false && "ValidateNode for left child failed.");
10733  }
10734  const Node* const rightChild = leftChild->buddy;
10735  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10736  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10737  {
10738  VMA_VALIDATE(false && "ValidateNode for right child failed.");
10739  }
10740  }
10741  break;
10742  default:
10743  return false;
10744  }
10745 
10746  return true;
10747 }
10748 
10749 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
10750 {
10751  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
10752  uint32_t level = 0;
10753  VkDeviceSize currLevelNodeSize = m_UsableSize;
10754  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10755  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10756  {
10757  ++level;
10758  currLevelNodeSize = nextLevelNodeSize;
10759  nextLevelNodeSize = currLevelNodeSize >> 1;
10760  }
10761  return level;
10762 }
10763 
10764 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
10765 {
10766  // Find node and level.
10767  Node* node = m_Root;
10768  VkDeviceSize nodeOffset = 0;
10769  uint32_t level = 0;
10770  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
10771  while(node->type == Node::TYPE_SPLIT)
10772  {
10773  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
10774  if(offset < nodeOffset + nextLevelSize)
10775  {
10776  node = node->split.leftChild;
10777  }
10778  else
10779  {
10780  node = node->split.leftChild->buddy;
10781  nodeOffset += nextLevelSize;
10782  }
10783  ++level;
10784  levelNodeSize = nextLevelSize;
10785  }
10786 
10787  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
10788  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
10789 
10790  ++m_FreeCount;
10791  --m_AllocationCount;
10792  m_SumFreeSize += alloc->GetSize();
10793 
10794  node->type = Node::TYPE_FREE;
10795 
10796  // Join free nodes if possible.
10797  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
10798  {
10799  RemoveFromFreeList(level, node->buddy);
10800  Node* const parent = node->parent;
10801 
10802  vma_delete(GetAllocationCallbacks(), node->buddy);
10803  vma_delete(GetAllocationCallbacks(), node);
10804  parent->type = Node::TYPE_FREE;
10805 
10806  node = parent;
10807  --level;
10808  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
10809  --m_FreeCount;
10810  }
10811 
10812  AddToFreeListFront(level, node);
10813 }
10814 
10815 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
10816 {
10817  switch(node->type)
10818  {
10819  case Node::TYPE_FREE:
10820  ++outInfo.unusedRangeCount;
10821  outInfo.unusedBytes += levelNodeSize;
10822  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
10823  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
10824  break;
10825  case Node::TYPE_ALLOCATION:
10826  {
10827  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10828  ++outInfo.allocationCount;
10829  outInfo.usedBytes += allocSize;
10830  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
10831  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
10832 
10833  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
10834  if(unusedRangeSize > 0)
10835  {
10836  ++outInfo.unusedRangeCount;
10837  outInfo.unusedBytes += unusedRangeSize;
10838  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
10839  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
10840  }
10841  }
10842  break;
10843  case Node::TYPE_SPLIT:
10844  {
10845  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10846  const Node* const leftChild = node->split.leftChild;
10847  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
10848  const Node* const rightChild = leftChild->buddy;
10849  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
10850  }
10851  break;
10852  default:
10853  VMA_ASSERT(0);
10854  }
10855 }
10856 
10857 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
10858 {
10859  VMA_ASSERT(node->type == Node::TYPE_FREE);
10860 
10861  // List is empty.
10862  Node* const frontNode = m_FreeList[level].front;
10863  if(frontNode == VMA_NULL)
10864  {
10865  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
10866  node->free.prev = node->free.next = VMA_NULL;
10867  m_FreeList[level].front = m_FreeList[level].back = node;
10868  }
10869  else
10870  {
10871  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
10872  node->free.prev = VMA_NULL;
10873  node->free.next = frontNode;
10874  frontNode->free.prev = node;
10875  m_FreeList[level].front = node;
10876  }
10877 }
10878 
10879 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
10880 {
10881  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
10882 
10883  // It is at the front.
10884  if(node->free.prev == VMA_NULL)
10885  {
10886  VMA_ASSERT(m_FreeList[level].front == node);
10887  m_FreeList[level].front = node->free.next;
10888  }
10889  else
10890  {
10891  Node* const prevFreeNode = node->free.prev;
10892  VMA_ASSERT(prevFreeNode->free.next == node);
10893  prevFreeNode->free.next = node->free.next;
10894  }
10895 
10896  // It is at the back.
10897  if(node->free.next == VMA_NULL)
10898  {
10899  VMA_ASSERT(m_FreeList[level].back == node);
10900  m_FreeList[level].back = node->free.prev;
10901  }
10902  else
10903  {
10904  Node* const nextFreeNode = node->free.next;
10905  VMA_ASSERT(nextFreeNode->free.prev == node);
10906  nextFreeNode->free.prev = node->free.prev;
10907  }
10908 }
10909 
10910 #if VMA_STATS_STRING_ENABLED
10911 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
10912 {
10913  switch(node->type)
10914  {
10915  case Node::TYPE_FREE:
10916  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
10917  break;
10918  case Node::TYPE_ALLOCATION:
10919  {
10920  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
10921  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10922  if(allocSize < levelNodeSize)
10923  {
10924  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
10925  }
10926  }
10927  break;
10928  case Node::TYPE_SPLIT:
10929  {
10930  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10931  const Node* const leftChild = node->split.leftChild;
10932  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
10933  const Node* const rightChild = leftChild->buddy;
10934  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
10935  }
10936  break;
10937  default:
10938  VMA_ASSERT(0);
10939  }
10940 }
10941 #endif // #if VMA_STATS_STRING_ENABLED
10942 
10943 
10945 // class VmaDeviceMemoryBlock
10946 
10947 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
10948  m_pMetadata(VMA_NULL),
10949  m_MemoryTypeIndex(UINT32_MAX),
10950  m_Id(0),
10951  m_hMemory(VK_NULL_HANDLE),
10952  m_MapCount(0),
10953  m_pMappedData(VMA_NULL)
10954 {
10955 }
10956 
10957 void VmaDeviceMemoryBlock::Init(
10958  VmaAllocator hAllocator,
10959  uint32_t newMemoryTypeIndex,
10960  VkDeviceMemory newMemory,
10961  VkDeviceSize newSize,
10962  uint32_t id,
10963  uint32_t algorithm)
10964 {
10965  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
10966 
10967  m_MemoryTypeIndex = newMemoryTypeIndex;
10968  m_Id = id;
10969  m_hMemory = newMemory;
10970 
10971  switch(algorithm)
10972  {
10974  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
10975  break;
10977  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
10978  break;
10979  default:
10980  VMA_ASSERT(0);
10981  // Fall-through.
10982  case 0:
10983  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
10984  }
10985  m_pMetadata->Init(newSize);
10986 }
10987 
10988 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
10989 {
10990  // This is the most important assert in the entire library.
10991  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
10992  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
10993 
10994  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
10995  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
10996  m_hMemory = VK_NULL_HANDLE;
10997 
10998  vma_delete(allocator, m_pMetadata);
10999  m_pMetadata = VMA_NULL;
11000 }
11001 
11002 bool VmaDeviceMemoryBlock::Validate() const
11003 {
11004  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11005  (m_pMetadata->GetSize() != 0));
11006 
11007  return m_pMetadata->Validate();
11008 }
11009 
11010 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11011 {
11012  void* pData = nullptr;
11013  VkResult res = Map(hAllocator, 1, &pData);
11014  if(res != VK_SUCCESS)
11015  {
11016  return res;
11017  }
11018 
11019  res = m_pMetadata->CheckCorruption(pData);
11020 
11021  Unmap(hAllocator, 1);
11022 
11023  return res;
11024 }
11025 
11026 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11027 {
11028  if(count == 0)
11029  {
11030  return VK_SUCCESS;
11031  }
11032 
11033  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11034  if(m_MapCount != 0)
11035  {
11036  m_MapCount += count;
11037  VMA_ASSERT(m_pMappedData != VMA_NULL);
11038  if(ppData != VMA_NULL)
11039  {
11040  *ppData = m_pMappedData;
11041  }
11042  return VK_SUCCESS;
11043  }
11044  else
11045  {
11046  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11047  hAllocator->m_hDevice,
11048  m_hMemory,
11049  0, // offset
11050  VK_WHOLE_SIZE,
11051  0, // flags
11052  &m_pMappedData);
11053  if(result == VK_SUCCESS)
11054  {
11055  if(ppData != VMA_NULL)
11056  {
11057  *ppData = m_pMappedData;
11058  }
11059  m_MapCount = count;
11060  }
11061  return result;
11062  }
11063 }
11064 
11065 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11066 {
11067  if(count == 0)
11068  {
11069  return;
11070  }
11071 
11072  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11073  if(m_MapCount >= count)
11074  {
11075  m_MapCount -= count;
11076  if(m_MapCount == 0)
11077  {
11078  m_pMappedData = VMA_NULL;
11079  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11080  }
11081  }
11082  else
11083  {
11084  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11085  }
11086 }
11087 
11088 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11089 {
11090  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11091  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11092 
11093  void* pData;
11094  VkResult res = Map(hAllocator, 1, &pData);
11095  if(res != VK_SUCCESS)
11096  {
11097  return res;
11098  }
11099 
11100  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11101  VmaWriteMagicValue(pData, allocOffset + allocSize);
11102 
11103  Unmap(hAllocator, 1);
11104 
11105  return VK_SUCCESS;
11106 }
11107 
11108 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11109 {
11110  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11111  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11112 
11113  void* pData;
11114  VkResult res = Map(hAllocator, 1, &pData);
11115  if(res != VK_SUCCESS)
11116  {
11117  return res;
11118  }
11119 
11120  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11121  {
11122  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11123  }
11124  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11125  {
11126  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11127  }
11128 
11129  Unmap(hAllocator, 1);
11130 
11131  return VK_SUCCESS;
11132 }
11133 
11134 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11135  const VmaAllocator hAllocator,
11136  const VmaAllocation hAllocation,
11137  VkBuffer hBuffer)
11138 {
11139  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11140  hAllocation->GetBlock() == this);
11141  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11142  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11143  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
11144  hAllocator->m_hDevice,
11145  hBuffer,
11146  m_hMemory,
11147  hAllocation->GetOffset());
11148 }
11149 
11150 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11151  const VmaAllocator hAllocator,
11152  const VmaAllocation hAllocation,
11153  VkImage hImage)
11154 {
11155  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11156  hAllocation->GetBlock() == this);
11157  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11158  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11159  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
11160  hAllocator->m_hDevice,
11161  hImage,
11162  m_hMemory,
11163  hAllocation->GetOffset());
11164 }
11165 
11166 static void InitStatInfo(VmaStatInfo& outInfo)
11167 {
11168  memset(&outInfo, 0, sizeof(outInfo));
11169  outInfo.allocationSizeMin = UINT64_MAX;
11170  outInfo.unusedRangeSizeMin = UINT64_MAX;
11171 }
11172 
11173 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11174 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11175 {
11176  inoutInfo.blockCount += srcInfo.blockCount;
11177  inoutInfo.allocationCount += srcInfo.allocationCount;
11178  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11179  inoutInfo.usedBytes += srcInfo.usedBytes;
11180  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11181  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11182  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11183  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11184  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11185 }
11186 
11187 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11188 {
11189  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11190  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11191  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11192  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11193 }
11194 
11195 VmaPool_T::VmaPool_T(
11196  VmaAllocator hAllocator,
11197  const VmaPoolCreateInfo& createInfo,
11198  VkDeviceSize preferredBlockSize) :
11199  m_BlockVector(
11200  hAllocator,
11201  createInfo.memoryTypeIndex,
11202  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11203  createInfo.minBlockCount,
11204  createInfo.maxBlockCount,
11205  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11206  createInfo.frameInUseCount,
11207  true, // isCustomPool
11208  createInfo.blockSize != 0, // explicitBlockSize
11209  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11210  m_Id(0)
11211 {
11212 }
11213 
11214 VmaPool_T::~VmaPool_T()
11215 {
11216 }
11217 
11218 #if VMA_STATS_STRING_ENABLED
11219 
11220 #endif // #if VMA_STATS_STRING_ENABLED
11221 
11222 VmaBlockVector::VmaBlockVector(
11223  VmaAllocator hAllocator,
11224  uint32_t memoryTypeIndex,
11225  VkDeviceSize preferredBlockSize,
11226  size_t minBlockCount,
11227  size_t maxBlockCount,
11228  VkDeviceSize bufferImageGranularity,
11229  uint32_t frameInUseCount,
11230  bool isCustomPool,
11231  bool explicitBlockSize,
11232  uint32_t algorithm) :
11233  m_hAllocator(hAllocator),
11234  m_MemoryTypeIndex(memoryTypeIndex),
11235  m_PreferredBlockSize(preferredBlockSize),
11236  m_MinBlockCount(minBlockCount),
11237  m_MaxBlockCount(maxBlockCount),
11238  m_BufferImageGranularity(bufferImageGranularity),
11239  m_FrameInUseCount(frameInUseCount),
11240  m_IsCustomPool(isCustomPool),
11241  m_ExplicitBlockSize(explicitBlockSize),
11242  m_Algorithm(algorithm),
11243  m_HasEmptyBlock(false),
11244  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11245  m_NextBlockId(0)
11246 {
11247 }
11248 
11249 VmaBlockVector::~VmaBlockVector()
11250 {
11251  for(size_t i = m_Blocks.size(); i--; )
11252  {
11253  m_Blocks[i]->Destroy(m_hAllocator);
11254  vma_delete(m_hAllocator, m_Blocks[i]);
11255  }
11256 }
11257 
11258 VkResult VmaBlockVector::CreateMinBlocks()
11259 {
11260  for(size_t i = 0; i < m_MinBlockCount; ++i)
11261  {
11262  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11263  if(res != VK_SUCCESS)
11264  {
11265  return res;
11266  }
11267  }
11268  return VK_SUCCESS;
11269 }
11270 
11271 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11272 {
11273  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11274 
11275  const size_t blockCount = m_Blocks.size();
11276 
11277  pStats->size = 0;
11278  pStats->unusedSize = 0;
11279  pStats->allocationCount = 0;
11280  pStats->unusedRangeCount = 0;
11281  pStats->unusedRangeSizeMax = 0;
11282  pStats->blockCount = blockCount;
11283 
11284  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11285  {
11286  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11287  VMA_ASSERT(pBlock);
11288  VMA_HEAVY_ASSERT(pBlock->Validate());
11289  pBlock->m_pMetadata->AddPoolStats(*pStats);
11290  }
11291 }
11292 
11293 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11294 {
11295  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11296  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11297  (VMA_DEBUG_MARGIN > 0) &&
11298  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11299 }
11300 
11301 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11302 
11303 VkResult VmaBlockVector::Allocate(
11304  VmaPool hCurrentPool,
11305  uint32_t currentFrameIndex,
11306  VkDeviceSize size,
11307  VkDeviceSize alignment,
11308  const VmaAllocationCreateInfo& createInfo,
11309  VmaSuballocationType suballocType,
11310  size_t allocationCount,
11311  VmaAllocation* pAllocations)
11312 {
11313  size_t allocIndex;
11314  VkResult res = VK_SUCCESS;
11315 
11316  {
11317  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11318  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11319  {
11320  res = AllocatePage(
11321  hCurrentPool,
11322  currentFrameIndex,
11323  size,
11324  alignment,
11325  createInfo,
11326  suballocType,
11327  pAllocations + allocIndex);
11328  if(res != VK_SUCCESS)
11329  {
11330  break;
11331  }
11332  }
11333  }
11334 
11335  if(res != VK_SUCCESS)
11336  {
11337  // Free all already created allocations.
11338  while(allocIndex--)
11339  {
11340  Free(pAllocations[allocIndex]);
11341  }
11342  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11343  }
11344 
11345  return res;
11346 }
11347 
11348 VkResult VmaBlockVector::AllocatePage(
11349  VmaPool hCurrentPool,
11350  uint32_t currentFrameIndex,
11351  VkDeviceSize size,
11352  VkDeviceSize alignment,
11353  const VmaAllocationCreateInfo& createInfo,
11354  VmaSuballocationType suballocType,
11355  VmaAllocation* pAllocation)
11356 {
11357  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11358  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11359  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11360  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11361  const bool canCreateNewBlock =
11362  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11363  (m_Blocks.size() < m_MaxBlockCount);
11364  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11365 
11366  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11367  // Which in turn is available only when maxBlockCount = 1.
11368  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11369  {
11370  canMakeOtherLost = false;
11371  }
11372 
11373  // Upper address can only be used with linear allocator and within single memory block.
11374  if(isUpperAddress &&
11375  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11376  {
11377  return VK_ERROR_FEATURE_NOT_PRESENT;
11378  }
11379 
11380  // Validate strategy.
11381  switch(strategy)
11382  {
11383  case 0:
11385  break;
11389  break;
11390  default:
11391  return VK_ERROR_FEATURE_NOT_PRESENT;
11392  }
11393 
11394  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11395  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11396  {
11397  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11398  }
11399 
11400  /*
11401  Under certain condition, this whole section can be skipped for optimization, so
11402  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11403  e.g. for custom pools with linear algorithm.
11404  */
11405  if(!canMakeOtherLost || canCreateNewBlock)
11406  {
11407  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11408  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11410 
11411  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11412  {
11413  // Use only last block.
11414  if(!m_Blocks.empty())
11415  {
11416  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11417  VMA_ASSERT(pCurrBlock);
11418  VkResult res = AllocateFromBlock(
11419  pCurrBlock,
11420  hCurrentPool,
11421  currentFrameIndex,
11422  size,
11423  alignment,
11424  allocFlagsCopy,
11425  createInfo.pUserData,
11426  suballocType,
11427  strategy,
11428  pAllocation);
11429  if(res == VK_SUCCESS)
11430  {
11431  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11432  return VK_SUCCESS;
11433  }
11434  }
11435  }
11436  else
11437  {
11439  {
11440  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11441  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11442  {
11443  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11444  VMA_ASSERT(pCurrBlock);
11445  VkResult res = AllocateFromBlock(
11446  pCurrBlock,
11447  hCurrentPool,
11448  currentFrameIndex,
11449  size,
11450  alignment,
11451  allocFlagsCopy,
11452  createInfo.pUserData,
11453  suballocType,
11454  strategy,
11455  pAllocation);
11456  if(res == VK_SUCCESS)
11457  {
11458  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11459  return VK_SUCCESS;
11460  }
11461  }
11462  }
11463  else // WORST_FIT, FIRST_FIT
11464  {
11465  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11466  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11467  {
11468  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11469  VMA_ASSERT(pCurrBlock);
11470  VkResult res = AllocateFromBlock(
11471  pCurrBlock,
11472  hCurrentPool,
11473  currentFrameIndex,
11474  size,
11475  alignment,
11476  allocFlagsCopy,
11477  createInfo.pUserData,
11478  suballocType,
11479  strategy,
11480  pAllocation);
11481  if(res == VK_SUCCESS)
11482  {
11483  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11484  return VK_SUCCESS;
11485  }
11486  }
11487  }
11488  }
11489 
11490  // 2. Try to create new block.
11491  if(canCreateNewBlock)
11492  {
11493  // Calculate optimal size for new block.
11494  VkDeviceSize newBlockSize = m_PreferredBlockSize;
11495  uint32_t newBlockSizeShift = 0;
11496  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11497 
11498  if(!m_ExplicitBlockSize)
11499  {
11500  // Allocate 1/8, 1/4, 1/2 as first blocks.
11501  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11502  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11503  {
11504  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11505  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11506  {
11507  newBlockSize = smallerNewBlockSize;
11508  ++newBlockSizeShift;
11509  }
11510  else
11511  {
11512  break;
11513  }
11514  }
11515  }
11516 
11517  size_t newBlockIndex = 0;
11518  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11519  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11520  if(!m_ExplicitBlockSize)
11521  {
11522  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11523  {
11524  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11525  if(smallerNewBlockSize >= size)
11526  {
11527  newBlockSize = smallerNewBlockSize;
11528  ++newBlockSizeShift;
11529  res = CreateBlock(newBlockSize, &newBlockIndex);
11530  }
11531  else
11532  {
11533  break;
11534  }
11535  }
11536  }
11537 
11538  if(res == VK_SUCCESS)
11539  {
11540  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11541  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11542 
11543  res = AllocateFromBlock(
11544  pBlock,
11545  hCurrentPool,
11546  currentFrameIndex,
11547  size,
11548  alignment,
11549  allocFlagsCopy,
11550  createInfo.pUserData,
11551  suballocType,
11552  strategy,
11553  pAllocation);
11554  if(res == VK_SUCCESS)
11555  {
11556  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
11557  return VK_SUCCESS;
11558  }
11559  else
11560  {
11561  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11562  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11563  }
11564  }
11565  }
11566  }
11567 
11568  // 3. Try to allocate from existing blocks with making other allocations lost.
11569  if(canMakeOtherLost)
11570  {
11571  uint32_t tryIndex = 0;
11572  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11573  {
11574  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11575  VmaAllocationRequest bestRequest = {};
11576  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11577 
11578  // 1. Search existing allocations.
11580  {
11581  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11582  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11583  {
11584  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11585  VMA_ASSERT(pCurrBlock);
11586  VmaAllocationRequest currRequest = {};
11587  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11588  currentFrameIndex,
11589  m_FrameInUseCount,
11590  m_BufferImageGranularity,
11591  size,
11592  alignment,
11593  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11594  suballocType,
11595  canMakeOtherLost,
11596  strategy,
11597  &currRequest))
11598  {
11599  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11600  if(pBestRequestBlock == VMA_NULL ||
11601  currRequestCost < bestRequestCost)
11602  {
11603  pBestRequestBlock = pCurrBlock;
11604  bestRequest = currRequest;
11605  bestRequestCost = currRequestCost;
11606 
11607  if(bestRequestCost == 0)
11608  {
11609  break;
11610  }
11611  }
11612  }
11613  }
11614  }
11615  else // WORST_FIT, FIRST_FIT
11616  {
11617  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11618  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11619  {
11620  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11621  VMA_ASSERT(pCurrBlock);
11622  VmaAllocationRequest currRequest = {};
11623  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11624  currentFrameIndex,
11625  m_FrameInUseCount,
11626  m_BufferImageGranularity,
11627  size,
11628  alignment,
11629  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11630  suballocType,
11631  canMakeOtherLost,
11632  strategy,
11633  &currRequest))
11634  {
11635  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11636  if(pBestRequestBlock == VMA_NULL ||
11637  currRequestCost < bestRequestCost ||
11639  {
11640  pBestRequestBlock = pCurrBlock;
11641  bestRequest = currRequest;
11642  bestRequestCost = currRequestCost;
11643 
11644  if(bestRequestCost == 0 ||
11646  {
11647  break;
11648  }
11649  }
11650  }
11651  }
11652  }
11653 
11654  if(pBestRequestBlock != VMA_NULL)
11655  {
11656  if(mapped)
11657  {
11658  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11659  if(res != VK_SUCCESS)
11660  {
11661  return res;
11662  }
11663  }
11664 
11665  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11666  currentFrameIndex,
11667  m_FrameInUseCount,
11668  &bestRequest))
11669  {
11670  // We no longer have an empty Allocation.
11671  if(pBestRequestBlock->m_pMetadata->IsEmpty())
11672  {
11673  m_HasEmptyBlock = false;
11674  }
11675  // Allocate from this pBlock.
11676  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11677  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
11678  (*pAllocation)->InitBlockAllocation(
11679  hCurrentPool,
11680  pBestRequestBlock,
11681  bestRequest.offset,
11682  alignment,
11683  size,
11684  suballocType,
11685  mapped,
11686  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11687  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11688  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
11689  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
11690  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11691  {
11692  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11693  }
11694  if(IsCorruptionDetectionEnabled())
11695  {
11696  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11697  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11698  }
11699  return VK_SUCCESS;
11700  }
11701  // else: Some allocations must have been touched while we are here. Next try.
11702  }
11703  else
11704  {
11705  // Could not find place in any of the blocks - break outer loop.
11706  break;
11707  }
11708  }
11709  /* Maximum number of tries exceeded - a very unlike event when many other
11710  threads are simultaneously touching allocations making it impossible to make
11711  lost at the same time as we try to allocate. */
11712  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11713  {
11714  return VK_ERROR_TOO_MANY_OBJECTS;
11715  }
11716  }
11717 
11718  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11719 }
11720 
11721 void VmaBlockVector::Free(
11722  VmaAllocation hAllocation)
11723 {
11724  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11725 
11726  // Scope for lock.
11727  {
11728  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11729 
11730  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11731 
11732  if(IsCorruptionDetectionEnabled())
11733  {
11734  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11735  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
11736  }
11737 
11738  if(hAllocation->IsPersistentMap())
11739  {
11740  pBlock->Unmap(m_hAllocator, 1);
11741  }
11742 
11743  pBlock->m_pMetadata->Free(hAllocation);
11744  VMA_HEAVY_ASSERT(pBlock->Validate());
11745 
11746  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
11747 
11748  // pBlock became empty after this deallocation.
11749  if(pBlock->m_pMetadata->IsEmpty())
11750  {
11751  // Already has empty Allocation. We don't want to have two, so delete this one.
11752  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11753  {
11754  pBlockToDelete = pBlock;
11755  Remove(pBlock);
11756  }
11757  // We now have first empty block.
11758  else
11759  {
11760  m_HasEmptyBlock = true;
11761  }
11762  }
11763  // pBlock didn't become empty, but we have another empty block - find and free that one.
11764  // (This is optional, heuristics.)
11765  else if(m_HasEmptyBlock)
11766  {
11767  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
11768  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
11769  {
11770  pBlockToDelete = pLastBlock;
11771  m_Blocks.pop_back();
11772  m_HasEmptyBlock = false;
11773  }
11774  }
11775 
11776  IncrementallySortBlocks();
11777  }
11778 
11779  // Destruction of a free Allocation. Deferred until this point, outside of mutex
11780  // lock, for performance reason.
11781  if(pBlockToDelete != VMA_NULL)
11782  {
11783  VMA_DEBUG_LOG(" Deleted empty allocation");
11784  pBlockToDelete->Destroy(m_hAllocator);
11785  vma_delete(m_hAllocator, pBlockToDelete);
11786  }
11787 }
11788 
11789 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
11790 {
11791  VkDeviceSize result = 0;
11792  for(size_t i = m_Blocks.size(); i--; )
11793  {
11794  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
11795  if(result >= m_PreferredBlockSize)
11796  {
11797  break;
11798  }
11799  }
11800  return result;
11801 }
11802 
11803 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
11804 {
11805  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11806  {
11807  if(m_Blocks[blockIndex] == pBlock)
11808  {
11809  VmaVectorRemove(m_Blocks, blockIndex);
11810  return;
11811  }
11812  }
11813  VMA_ASSERT(0);
11814 }
11815 
11816 void VmaBlockVector::IncrementallySortBlocks()
11817 {
11818  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11819  {
11820  // Bubble sort only until first swap.
11821  for(size_t i = 1; i < m_Blocks.size(); ++i)
11822  {
11823  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
11824  {
11825  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
11826  return;
11827  }
11828  }
11829  }
11830 }
11831 
11832 VkResult VmaBlockVector::AllocateFromBlock(
11833  VmaDeviceMemoryBlock* pBlock,
11834  VmaPool hCurrentPool,
11835  uint32_t currentFrameIndex,
11836  VkDeviceSize size,
11837  VkDeviceSize alignment,
11838  VmaAllocationCreateFlags allocFlags,
11839  void* pUserData,
11840  VmaSuballocationType suballocType,
11841  uint32_t strategy,
11842  VmaAllocation* pAllocation)
11843 {
11844  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
11845  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11846  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11847  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11848 
11849  VmaAllocationRequest currRequest = {};
11850  if(pBlock->m_pMetadata->CreateAllocationRequest(
11851  currentFrameIndex,
11852  m_FrameInUseCount,
11853  m_BufferImageGranularity,
11854  size,
11855  alignment,
11856  isUpperAddress,
11857  suballocType,
11858  false, // canMakeOtherLost
11859  strategy,
11860  &currRequest))
11861  {
11862  // Allocate from pCurrBlock.
11863  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
11864 
11865  if(mapped)
11866  {
11867  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
11868  if(res != VK_SUCCESS)
11869  {
11870  return res;
11871  }
11872  }
11873 
11874  // We no longer have an empty Allocation.
11875  if(pBlock->m_pMetadata->IsEmpty())
11876  {
11877  m_HasEmptyBlock = false;
11878  }
11879 
11880  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11881  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
11882  (*pAllocation)->InitBlockAllocation(
11883  hCurrentPool,
11884  pBlock,
11885  currRequest.offset,
11886  alignment,
11887  size,
11888  suballocType,
11889  mapped,
11890  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11891  VMA_HEAVY_ASSERT(pBlock->Validate());
11892  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
11893  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11894  {
11895  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11896  }
11897  if(IsCorruptionDetectionEnabled())
11898  {
11899  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
11900  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11901  }
11902  return VK_SUCCESS;
11903  }
11904  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11905 }
11906 
11907 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
11908 {
11909  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
11910  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
11911  allocInfo.allocationSize = blockSize;
11912  VkDeviceMemory mem = VK_NULL_HANDLE;
11913  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
11914  if(res < 0)
11915  {
11916  return res;
11917  }
11918 
11919  // New VkDeviceMemory successfully created.
11920 
11921  // Create new Allocation for it.
11922  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
11923  pBlock->Init(
11924  m_hAllocator,
11925  m_MemoryTypeIndex,
11926  mem,
11927  allocInfo.allocationSize,
11928  m_NextBlockId++,
11929  m_Algorithm);
11930 
11931  m_Blocks.push_back(pBlock);
11932  if(pNewBlockIndex != VMA_NULL)
11933  {
11934  *pNewBlockIndex = m_Blocks.size() - 1;
11935  }
11936 
11937  return VK_SUCCESS;
11938 }
11939 
11940 void VmaBlockVector::ApplyDefragmentationMovesCpu(
11941  class VmaBlockVectorDefragmentationContext* pDefragCtx,
11942  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
11943 {
11944  const size_t blockCount = m_Blocks.size();
11945  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
11946 
11947  enum BLOCK_FLAG
11948  {
11949  BLOCK_FLAG_USED = 0x00000001,
11950  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
11951  };
11952 
11953  struct BlockInfo
11954  {
11955  uint32_t flags;
11956  void* pMappedData;
11957  };
11958  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
11959  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
11960  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
11961 
11962  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
11963  const size_t moveCount = moves.size();
11964  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11965  {
11966  const VmaDefragmentationMove& move = moves[moveIndex];
11967  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
11968  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
11969  }
11970 
11971  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
11972 
11973  // Go over all blocks. Get mapped pointer or map if necessary.
11974  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
11975  {
11976  BlockInfo& currBlockInfo = blockInfo[blockIndex];
11977  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11978  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
11979  {
11980  currBlockInfo.pMappedData = pBlock->GetMappedData();
11981  // It is not originally mapped - map it.
11982  if(currBlockInfo.pMappedData == VMA_NULL)
11983  {
11984  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
11985  if(pDefragCtx->res == VK_SUCCESS)
11986  {
11987  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
11988  }
11989  }
11990  }
11991  }
11992 
11993  // Go over all moves. Do actual data transfer.
11994  if(pDefragCtx->res == VK_SUCCESS)
11995  {
11996  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
11997  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
11998 
11999  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12000  {
12001  const VmaDefragmentationMove& move = moves[moveIndex];
12002 
12003  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12004  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12005 
12006  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12007 
12008  // Invalidate source.
12009  if(isNonCoherent)
12010  {
12011  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12012  memRange.memory = pSrcBlock->GetDeviceMemory();
12013  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12014  memRange.size = VMA_MIN(
12015  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12016  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12017  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12018  }
12019 
12020  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12021  memmove(
12022  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12023  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12024  static_cast<size_t>(move.size));
12025 
12026  if(IsCorruptionDetectionEnabled())
12027  {
12028  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12029  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12030  }
12031 
12032  // Flush destination.
12033  if(isNonCoherent)
12034  {
12035  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12036  memRange.memory = pDstBlock->GetDeviceMemory();
12037  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12038  memRange.size = VMA_MIN(
12039  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12040  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12041  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12042  }
12043  }
12044  }
12045 
12046  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12047  // Regardless of pCtx->res == VK_SUCCESS.
12048  for(size_t blockIndex = blockCount; blockIndex--; )
12049  {
12050  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12051  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12052  {
12053  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12054  pBlock->Unmap(m_hAllocator, 1);
12055  }
12056  }
12057 }
12058 
12059 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12060  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12061  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12062  VkCommandBuffer commandBuffer)
12063 {
12064  const size_t blockCount = m_Blocks.size();
12065 
12066  pDefragCtx->blockContexts.resize(blockCount);
12067  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12068 
12069  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12070  const size_t moveCount = moves.size();
12071  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12072  {
12073  const VmaDefragmentationMove& move = moves[moveIndex];
12074  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12075  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12076  }
12077 
12078  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12079 
12080  // Go over all blocks. Create and bind buffer for whole block if necessary.
12081  {
12082  VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
12083  bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
12084  VK_BUFFER_USAGE_TRANSFER_DST_BIT;
12085 
12086  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12087  {
12088  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12089  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12090  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12091  {
12092  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12093  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12094  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12095  if(pDefragCtx->res == VK_SUCCESS)
12096  {
12097  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12098  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12099  }
12100  }
12101  }
12102  }
12103 
12104  // Go over all moves. Post data transfer commands to command buffer.
12105  if(pDefragCtx->res == VK_SUCCESS)
12106  {
12107  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12108  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12109 
12110  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12111  {
12112  const VmaDefragmentationMove& move = moves[moveIndex];
12113 
12114  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12115  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12116 
12117  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12118 
12119  VkBufferCopy region = {
12120  move.srcOffset,
12121  move.dstOffset,
12122  move.size };
12123  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12124  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12125  }
12126  }
12127 
12128  // Save buffers to defrag context for later destruction.
12129  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12130  {
12131  pDefragCtx->res = VK_NOT_READY;
12132  }
12133 }
12134 
12135 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12136 {
12137  m_HasEmptyBlock = false;
12138  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12139  {
12140  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12141  if(pBlock->m_pMetadata->IsEmpty())
12142  {
12143  if(m_Blocks.size() > m_MinBlockCount)
12144  {
12145  if(pDefragmentationStats != VMA_NULL)
12146  {
12147  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12148  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12149  }
12150 
12151  VmaVectorRemove(m_Blocks, blockIndex);
12152  pBlock->Destroy(m_hAllocator);
12153  vma_delete(m_hAllocator, pBlock);
12154  }
12155  else
12156  {
12157  m_HasEmptyBlock = true;
12158  }
12159  }
12160  }
12161 }
12162 
12163 #if VMA_STATS_STRING_ENABLED
12164 
12165 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12166 {
12167  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12168 
12169  json.BeginObject();
12170 
12171  if(m_IsCustomPool)
12172  {
12173  json.WriteString("MemoryTypeIndex");
12174  json.WriteNumber(m_MemoryTypeIndex);
12175 
12176  json.WriteString("BlockSize");
12177  json.WriteNumber(m_PreferredBlockSize);
12178 
12179  json.WriteString("BlockCount");
12180  json.BeginObject(true);
12181  if(m_MinBlockCount > 0)
12182  {
12183  json.WriteString("Min");
12184  json.WriteNumber((uint64_t)m_MinBlockCount);
12185  }
12186  if(m_MaxBlockCount < SIZE_MAX)
12187  {
12188  json.WriteString("Max");
12189  json.WriteNumber((uint64_t)m_MaxBlockCount);
12190  }
12191  json.WriteString("Cur");
12192  json.WriteNumber((uint64_t)m_Blocks.size());
12193  json.EndObject();
12194 
12195  if(m_FrameInUseCount > 0)
12196  {
12197  json.WriteString("FrameInUseCount");
12198  json.WriteNumber(m_FrameInUseCount);
12199  }
12200 
12201  if(m_Algorithm != 0)
12202  {
12203  json.WriteString("Algorithm");
12204  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12205  }
12206  }
12207  else
12208  {
12209  json.WriteString("PreferredBlockSize");
12210  json.WriteNumber(m_PreferredBlockSize);
12211  }
12212 
12213  json.WriteString("Blocks");
12214  json.BeginObject();
12215  for(size_t i = 0; i < m_Blocks.size(); ++i)
12216  {
12217  json.BeginString();
12218  json.ContinueString(m_Blocks[i]->GetId());
12219  json.EndString();
12220 
12221  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12222  }
12223  json.EndObject();
12224 
12225  json.EndObject();
12226 }
12227 
12228 #endif // #if VMA_STATS_STRING_ENABLED
12229 
12230 void VmaBlockVector::Defragment(
12231  class VmaBlockVectorDefragmentationContext* pCtx,
12232  VmaDefragmentationStats* pStats,
12233  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12234  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12235  VkCommandBuffer commandBuffer)
12236 {
12237  pCtx->res = VK_SUCCESS;
12238 
12239  const VkMemoryPropertyFlags memPropFlags =
12240  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12241  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12242  const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
12243 
12244  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12245  isHostVisible;
12246  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12247  (VMA_DEBUG_DETECT_CORRUPTION == 0 || !(isHostVisible && isHostCoherent));
12248 
12249  // There are options to defragment this memory type.
12250  if(canDefragmentOnCpu || canDefragmentOnGpu)
12251  {
12252  bool defragmentOnGpu;
12253  // There is only one option to defragment this memory type.
12254  if(canDefragmentOnGpu != canDefragmentOnCpu)
12255  {
12256  defragmentOnGpu = canDefragmentOnGpu;
12257  }
12258  // Both options are available: Heuristics to choose the best one.
12259  else
12260  {
12261  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12262  m_hAllocator->IsIntegratedGpu();
12263  }
12264 
12265  bool overlappingMoveSupported = !defragmentOnGpu;
12266 
12267  if(m_hAllocator->m_UseMutex)
12268  {
12269  m_Mutex.LockWrite();
12270  pCtx->mutexLocked = true;
12271  }
12272 
12273  pCtx->Begin(overlappingMoveSupported);
12274 
12275  // Defragment.
12276 
12277  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12278  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12279  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12280  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12281  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12282 
12283  // Accumulate statistics.
12284  if(pStats != VMA_NULL)
12285  {
12286  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12287  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12288  pStats->bytesMoved += bytesMoved;
12289  pStats->allocationsMoved += allocationsMoved;
12290  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12291  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12292  if(defragmentOnGpu)
12293  {
12294  maxGpuBytesToMove -= bytesMoved;
12295  maxGpuAllocationsToMove -= allocationsMoved;
12296  }
12297  else
12298  {
12299  maxCpuBytesToMove -= bytesMoved;
12300  maxCpuAllocationsToMove -= allocationsMoved;
12301  }
12302  }
12303 
12304  if(pCtx->res >= VK_SUCCESS)
12305  {
12306  if(defragmentOnGpu)
12307  {
12308  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12309  }
12310  else
12311  {
12312  ApplyDefragmentationMovesCpu(pCtx, moves);
12313  }
12314  }
12315  }
12316 }
12317 
12318 void VmaBlockVector::DefragmentationEnd(
12319  class VmaBlockVectorDefragmentationContext* pCtx,
12320  VmaDefragmentationStats* pStats)
12321 {
12322  // Destroy buffers.
12323  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12324  {
12325  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12326  if(blockCtx.hBuffer)
12327  {
12328  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12329  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12330  }
12331  }
12332 
12333  if(pCtx->res >= VK_SUCCESS)
12334  {
12335  FreeEmptyBlocks(pStats);
12336  }
12337 
12338  if(pCtx->mutexLocked)
12339  {
12340  VMA_ASSERT(m_hAllocator->m_UseMutex);
12341  m_Mutex.UnlockWrite();
12342  }
12343 }
12344 
12345 size_t VmaBlockVector::CalcAllocationCount() const
12346 {
12347  size_t result = 0;
12348  for(size_t i = 0; i < m_Blocks.size(); ++i)
12349  {
12350  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12351  }
12352  return result;
12353 }
12354 
12355 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12356 {
12357  if(m_BufferImageGranularity == 1)
12358  {
12359  return false;
12360  }
12361  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12362  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12363  {
12364  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12365  VMA_ASSERT(m_Algorithm == 0);
12366  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12367  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12368  {
12369  return true;
12370  }
12371  }
12372  return false;
12373 }
12374 
12375 void VmaBlockVector::MakePoolAllocationsLost(
12376  uint32_t currentFrameIndex,
12377  size_t* pLostAllocationCount)
12378 {
12379  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12380  size_t lostAllocationCount = 0;
12381  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12382  {
12383  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12384  VMA_ASSERT(pBlock);
12385  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12386  }
12387  if(pLostAllocationCount != VMA_NULL)
12388  {
12389  *pLostAllocationCount = lostAllocationCount;
12390  }
12391 }
12392 
12393 VkResult VmaBlockVector::CheckCorruption()
12394 {
12395  if(!IsCorruptionDetectionEnabled())
12396  {
12397  return VK_ERROR_FEATURE_NOT_PRESENT;
12398  }
12399 
12400  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12401  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12402  {
12403  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12404  VMA_ASSERT(pBlock);
12405  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12406  if(res != VK_SUCCESS)
12407  {
12408  return res;
12409  }
12410  }
12411  return VK_SUCCESS;
12412 }
12413 
12414 void VmaBlockVector::AddStats(VmaStats* pStats)
12415 {
12416  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12417  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12418 
12419  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12420 
12421  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12422  {
12423  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12424  VMA_ASSERT(pBlock);
12425  VMA_HEAVY_ASSERT(pBlock->Validate());
12426  VmaStatInfo allocationStatInfo;
12427  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12428  VmaAddStatInfo(pStats->total, allocationStatInfo);
12429  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12430  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12431  }
12432 }
12433 
12435 // VmaDefragmentationAlgorithm_Generic members definition
12436 
12437 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12438  VmaAllocator hAllocator,
12439  VmaBlockVector* pBlockVector,
12440  uint32_t currentFrameIndex,
12441  bool overlappingMoveSupported) :
12442  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12443  m_AllAllocations(false),
12444  m_AllocationCount(0),
12445  m_BytesMoved(0),
12446  m_AllocationsMoved(0),
12447  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12448 {
12449  // Create block info for each block.
12450  const size_t blockCount = m_pBlockVector->m_Blocks.size();
12451  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12452  {
12453  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12454  pBlockInfo->m_OriginalBlockIndex = blockIndex;
12455  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12456  m_Blocks.push_back(pBlockInfo);
12457  }
12458 
12459  // Sort them by m_pBlock pointer value.
12460  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12461 }
12462 
12463 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12464 {
12465  for(size_t i = m_Blocks.size(); i--; )
12466  {
12467  vma_delete(m_hAllocator, m_Blocks[i]);
12468  }
12469 }
12470 
12471 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12472 {
12473  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12474  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12475  {
12476  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12477  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12478  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12479  {
12480  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12481  (*it)->m_Allocations.push_back(allocInfo);
12482  }
12483  else
12484  {
12485  VMA_ASSERT(0);
12486  }
12487 
12488  ++m_AllocationCount;
12489  }
12490 }
12491 
12492 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12493  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12494  VkDeviceSize maxBytesToMove,
12495  uint32_t maxAllocationsToMove)
12496 {
12497  if(m_Blocks.empty())
12498  {
12499  return VK_SUCCESS;
12500  }
12501 
12502  // This is a choice based on research.
12503  // Option 1:
12504  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12505  // Option 2:
12506  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12507  // Option 3:
12508  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12509 
12510  size_t srcBlockMinIndex = 0;
12511  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12512  /*
12513  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12514  {
12515  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12516  if(blocksWithNonMovableCount > 0)
12517  {
12518  srcBlockMinIndex = blocksWithNonMovableCount - 1;
12519  }
12520  }
12521  */
12522 
12523  size_t srcBlockIndex = m_Blocks.size() - 1;
12524  size_t srcAllocIndex = SIZE_MAX;
12525  for(;;)
12526  {
12527  // 1. Find next allocation to move.
12528  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12529  // 1.2. Then start from last to first m_Allocations.
12530  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12531  {
12532  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12533  {
12534  // Finished: no more allocations to process.
12535  if(srcBlockIndex == srcBlockMinIndex)
12536  {
12537  return VK_SUCCESS;
12538  }
12539  else
12540  {
12541  --srcBlockIndex;
12542  srcAllocIndex = SIZE_MAX;
12543  }
12544  }
12545  else
12546  {
12547  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12548  }
12549  }
12550 
12551  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12552  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12553 
12554  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12555  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12556  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12557  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12558 
12559  // 2. Try to find new place for this allocation in preceding or current block.
12560  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12561  {
12562  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12563  VmaAllocationRequest dstAllocRequest;
12564  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12565  m_CurrentFrameIndex,
12566  m_pBlockVector->GetFrameInUseCount(),
12567  m_pBlockVector->GetBufferImageGranularity(),
12568  size,
12569  alignment,
12570  false, // upperAddress
12571  suballocType,
12572  false, // canMakeOtherLost
12573  strategy,
12574  &dstAllocRequest) &&
12575  MoveMakesSense(
12576  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12577  {
12578  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12579 
12580  // Reached limit on number of allocations or bytes to move.
12581  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12582  (m_BytesMoved + size > maxBytesToMove))
12583  {
12584  return VK_SUCCESS;
12585  }
12586 
12587  VmaDefragmentationMove move;
12588  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12589  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12590  move.srcOffset = srcOffset;
12591  move.dstOffset = dstAllocRequest.offset;
12592  move.size = size;
12593  moves.push_back(move);
12594 
12595  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12596  dstAllocRequest,
12597  suballocType,
12598  size,
12599  false, // upperAddress
12600  allocInfo.m_hAllocation);
12601  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12602 
12603  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12604 
12605  if(allocInfo.m_pChanged != VMA_NULL)
12606  {
12607  *allocInfo.m_pChanged = VK_TRUE;
12608  }
12609 
12610  ++m_AllocationsMoved;
12611  m_BytesMoved += size;
12612 
12613  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12614 
12615  break;
12616  }
12617  }
12618 
12619  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
12620 
12621  if(srcAllocIndex > 0)
12622  {
12623  --srcAllocIndex;
12624  }
12625  else
12626  {
12627  if(srcBlockIndex > 0)
12628  {
12629  --srcBlockIndex;
12630  srcAllocIndex = SIZE_MAX;
12631  }
12632  else
12633  {
12634  return VK_SUCCESS;
12635  }
12636  }
12637  }
12638 }
12639 
12640 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
12641 {
12642  size_t result = 0;
12643  for(size_t i = 0; i < m_Blocks.size(); ++i)
12644  {
12645  if(m_Blocks[i]->m_HasNonMovableAllocations)
12646  {
12647  ++result;
12648  }
12649  }
12650  return result;
12651 }
12652 
12653 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12654  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12655  VkDeviceSize maxBytesToMove,
12656  uint32_t maxAllocationsToMove)
12657 {
12658  if(!m_AllAllocations && m_AllocationCount == 0)
12659  {
12660  return VK_SUCCESS;
12661  }
12662 
12663  const size_t blockCount = m_Blocks.size();
12664  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12665  {
12666  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12667 
12668  if(m_AllAllocations)
12669  {
12670  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12671  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12672  it != pMetadata->m_Suballocations.end();
12673  ++it)
12674  {
12675  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12676  {
12677  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12678  pBlockInfo->m_Allocations.push_back(allocInfo);
12679  }
12680  }
12681  }
12682 
12683  pBlockInfo->CalcHasNonMovableAllocations();
12684 
12685  // This is a choice based on research.
12686  // Option 1:
12687  pBlockInfo->SortAllocationsByOffsetDescending();
12688  // Option 2:
12689  //pBlockInfo->SortAllocationsBySizeDescending();
12690  }
12691 
12692  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
12693  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12694 
12695  // This is a choice based on research.
12696  const uint32_t roundCount = 2;
12697 
12698  // Execute defragmentation rounds (the main part).
12699  VkResult result = VK_SUCCESS;
12700  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12701  {
12702  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12703  }
12704 
12705  return result;
12706 }
12707 
12708 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12709  size_t dstBlockIndex, VkDeviceSize dstOffset,
12710  size_t srcBlockIndex, VkDeviceSize srcOffset)
12711 {
12712  if(dstBlockIndex < srcBlockIndex)
12713  {
12714  return true;
12715  }
12716  if(dstBlockIndex > srcBlockIndex)
12717  {
12718  return false;
12719  }
12720  if(dstOffset < srcOffset)
12721  {
12722  return true;
12723  }
12724  return false;
12725 }
12726 
12728 // VmaDefragmentationAlgorithm_Fast
12729 
12730 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12731  VmaAllocator hAllocator,
12732  VmaBlockVector* pBlockVector,
12733  uint32_t currentFrameIndex,
12734  bool overlappingMoveSupported) :
12735  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12736  m_OverlappingMoveSupported(overlappingMoveSupported),
12737  m_AllocationCount(0),
12738  m_AllAllocations(false),
12739  m_BytesMoved(0),
12740  m_AllocationsMoved(0),
12741  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12742 {
12743  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12744 
12745 }
12746 
12747 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12748 {
12749 }
12750 
12751 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12752  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12753  VkDeviceSize maxBytesToMove,
12754  uint32_t maxAllocationsToMove)
12755 {
12756  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12757 
12758  const size_t blockCount = m_pBlockVector->GetBlockCount();
12759  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12760  {
12761  return VK_SUCCESS;
12762  }
12763 
12764  PreprocessMetadata();
12765 
12766  // Sort blocks in order from most destination.
12767 
12768  m_BlockInfos.resize(blockCount);
12769  for(size_t i = 0; i < blockCount; ++i)
12770  {
12771  m_BlockInfos[i].origBlockIndex = i;
12772  }
12773 
12774  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
12775  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
12776  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
12777  });
12778 
12779  // THE MAIN ALGORITHM
12780 
12781  FreeSpaceDatabase freeSpaceDb;
12782 
12783  size_t dstBlockInfoIndex = 0;
12784  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12785  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12786  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12787  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
12788  VkDeviceSize dstOffset = 0;
12789 
12790  bool end = false;
12791  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
12792  {
12793  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
12794  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
12795  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
12796  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
12797  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
12798  {
12799  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
12800  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
12801  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
12802  if(m_AllocationsMoved == maxAllocationsToMove ||
12803  m_BytesMoved + srcAllocSize > maxBytesToMove)
12804  {
12805  end = true;
12806  break;
12807  }
12808  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
12809 
12810  // Try to place it in one of free spaces from the database.
12811  size_t freeSpaceInfoIndex;
12812  VkDeviceSize dstAllocOffset;
12813  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
12814  freeSpaceInfoIndex, dstAllocOffset))
12815  {
12816  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
12817  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
12818  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
12819  VkDeviceSize freeSpaceBlockSize = pFreeSpaceMetadata->GetSize();
12820 
12821  // Same block
12822  if(freeSpaceInfoIndex == srcBlockInfoIndex)
12823  {
12824  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12825 
12826  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12827 
12828  VmaSuballocation suballoc = *srcSuballocIt;
12829  suballoc.offset = dstAllocOffset;
12830  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
12831  m_BytesMoved += srcAllocSize;
12832  ++m_AllocationsMoved;
12833 
12834  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12835  ++nextSuballocIt;
12836  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12837  srcSuballocIt = nextSuballocIt;
12838 
12839  InsertSuballoc(pFreeSpaceMetadata, suballoc);
12840 
12841  VmaDefragmentationMove move = {
12842  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12843  srcAllocOffset, dstAllocOffset,
12844  srcAllocSize };
12845  moves.push_back(move);
12846  }
12847  // Different block
12848  else
12849  {
12850  // MOVE OPTION 2: Move the allocation to a different block.
12851 
12852  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
12853 
12854  VmaSuballocation suballoc = *srcSuballocIt;
12855  suballoc.offset = dstAllocOffset;
12856  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
12857  m_BytesMoved += srcAllocSize;
12858  ++m_AllocationsMoved;
12859 
12860  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12861  ++nextSuballocIt;
12862  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12863  srcSuballocIt = nextSuballocIt;
12864 
12865  InsertSuballoc(pFreeSpaceMetadata, suballoc);
12866 
12867  VmaDefragmentationMove move = {
12868  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12869  srcAllocOffset, dstAllocOffset,
12870  srcAllocSize };
12871  moves.push_back(move);
12872  }
12873  }
12874  else
12875  {
12876  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
12877 
12878  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
12879  while(dstBlockInfoIndex < srcBlockInfoIndex &&
12880  dstAllocOffset + srcAllocSize > dstBlockSize)
12881  {
12882  // But before that, register remaining free space at the end of dst block.
12883  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
12884 
12885  ++dstBlockInfoIndex;
12886  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12887  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12888  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12889  dstBlockSize = pDstMetadata->GetSize();
12890  dstOffset = 0;
12891  dstAllocOffset = 0;
12892  }
12893 
12894  // Same block
12895  if(dstBlockInfoIndex == srcBlockInfoIndex)
12896  {
12897  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12898 
12899  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
12900 
12901  bool skipOver = overlap;
12902  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
12903  {
12904  // If destination and source place overlap, skip if it would move it
12905  // by only < 1/64 of its size.
12906  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
12907  }
12908 
12909  if(skipOver)
12910  {
12911  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
12912 
12913  dstOffset = srcAllocOffset + srcAllocSize;
12914  ++srcSuballocIt;
12915  }
12916  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12917  else
12918  {
12919  srcSuballocIt->offset = dstAllocOffset;
12920  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
12921  dstOffset = dstAllocOffset + srcAllocSize;
12922  m_BytesMoved += srcAllocSize;
12923  ++m_AllocationsMoved;
12924  ++srcSuballocIt;
12925  VmaDefragmentationMove move = {
12926  srcOrigBlockIndex, dstOrigBlockIndex,
12927  srcAllocOffset, dstAllocOffset,
12928  srcAllocSize };
12929  moves.push_back(move);
12930  }
12931  }
12932  // Different block
12933  else
12934  {
12935  // MOVE OPTION 2: Move the allocation to a different block.
12936 
12937  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
12938  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
12939 
12940  VmaSuballocation suballoc = *srcSuballocIt;
12941  suballoc.offset = dstAllocOffset;
12942  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
12943  dstOffset = dstAllocOffset + srcAllocSize;
12944  m_BytesMoved += srcAllocSize;
12945  ++m_AllocationsMoved;
12946 
12947  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12948  ++nextSuballocIt;
12949  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12950  srcSuballocIt = nextSuballocIt;
12951 
12952  pDstMetadata->m_Suballocations.push_back(suballoc);
12953 
12954  VmaDefragmentationMove move = {
12955  srcOrigBlockIndex, dstOrigBlockIndex,
12956  srcAllocOffset, dstAllocOffset,
12957  srcAllocSize };
12958  moves.push_back(move);
12959  }
12960  }
12961  }
12962  }
12963 
12964  m_BlockInfos.clear();
12965 
12966  PostprocessMetadata();
12967 
12968  return VK_SUCCESS;
12969 }
12970 
12971 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
12972 {
12973  const size_t blockCount = m_pBlockVector->GetBlockCount();
12974  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12975  {
12976  VmaBlockMetadata_Generic* const pMetadata =
12977  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
12978  pMetadata->m_FreeCount = 0;
12979  pMetadata->m_SumFreeSize = pMetadata->GetSize();
12980  pMetadata->m_FreeSuballocationsBySize.clear();
12981  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
12982  it != pMetadata->m_Suballocations.end(); )
12983  {
12984  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
12985  {
12986  VmaSuballocationList::iterator nextIt = it;
12987  ++nextIt;
12988  pMetadata->m_Suballocations.erase(it);
12989  it = nextIt;
12990  }
12991  else
12992  {
12993  ++it;
12994  }
12995  }
12996  }
12997 }
12998 
12999 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13000 {
13001  const size_t blockCount = m_pBlockVector->GetBlockCount();
13002  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13003  {
13004  VmaBlockMetadata_Generic* const pMetadata =
13005  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13006  const VkDeviceSize blockSize = pMetadata->GetSize();
13007 
13008  // No allocations in this block - entire area is free.
13009  if(pMetadata->m_Suballocations.empty())
13010  {
13011  pMetadata->m_FreeCount = 1;
13012  //pMetadata->m_SumFreeSize is already set to blockSize.
13013  VmaSuballocation suballoc = {
13014  0, // offset
13015  blockSize, // size
13016  VMA_NULL, // hAllocation
13017  VMA_SUBALLOCATION_TYPE_FREE };
13018  pMetadata->m_Suballocations.push_back(suballoc);
13019  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13020  }
13021  // There are some allocations in this block.
13022  else
13023  {
13024  VkDeviceSize offset = 0;
13025  VmaSuballocationList::iterator it;
13026  for(it = pMetadata->m_Suballocations.begin();
13027  it != pMetadata->m_Suballocations.end();
13028  ++it)
13029  {
13030  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13031  VMA_ASSERT(it->offset >= offset);
13032 
13033  // Need to insert preceding free space.
13034  if(it->offset > offset)
13035  {
13036  ++pMetadata->m_FreeCount;
13037  const VkDeviceSize freeSize = it->offset - offset;
13038  VmaSuballocation suballoc = {
13039  offset, // offset
13040  freeSize, // size
13041  VMA_NULL, // hAllocation
13042  VMA_SUBALLOCATION_TYPE_FREE };
13043  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13044  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13045  {
13046  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13047  }
13048  }
13049 
13050  pMetadata->m_SumFreeSize -= it->size;
13051  offset = it->offset + it->size;
13052  }
13053 
13054  // Need to insert trailing free space.
13055  if(offset < blockSize)
13056  {
13057  ++pMetadata->m_FreeCount;
13058  const VkDeviceSize freeSize = blockSize - offset;
13059  VmaSuballocation suballoc = {
13060  offset, // offset
13061  freeSize, // size
13062  VMA_NULL, // hAllocation
13063  VMA_SUBALLOCATION_TYPE_FREE };
13064  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13065  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13066  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13067  {
13068  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13069  }
13070  }
13071 
13072  VMA_SORT(
13073  pMetadata->m_FreeSuballocationsBySize.begin(),
13074  pMetadata->m_FreeSuballocationsBySize.end(),
13075  VmaSuballocationItemSizeLess());
13076  }
13077 
13078  VMA_HEAVY_ASSERT(pMetadata->Validate());
13079  }
13080 }
13081 
13082 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13083 {
13084  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13085  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13086  while(it != pMetadata->m_Suballocations.end())
13087  {
13088  if(it->offset < suballoc.offset)
13089  {
13090  ++it;
13091  }
13092  }
13093  pMetadata->m_Suballocations.insert(it, suballoc);
13094 }
13095 
13097 // VmaBlockVectorDefragmentationContext
13098 
13099 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13100  VmaAllocator hAllocator,
13101  VmaPool hCustomPool,
13102  VmaBlockVector* pBlockVector,
13103  uint32_t currFrameIndex,
13104  uint32_t algorithmFlags) :
13105  res(VK_SUCCESS),
13106  mutexLocked(false),
13107  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13108  m_hAllocator(hAllocator),
13109  m_hCustomPool(hCustomPool),
13110  m_pBlockVector(pBlockVector),
13111  m_CurrFrameIndex(currFrameIndex),
13112  m_AlgorithmFlags(algorithmFlags),
13113  m_pAlgorithm(VMA_NULL),
13114  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13115  m_AllAllocations(false)
13116 {
13117 }
13118 
13119 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13120 {
13121  vma_delete(m_hAllocator, m_pAlgorithm);
13122 }
13123 
13124 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13125 {
13126  AllocInfo info = { hAlloc, pChanged };
13127  m_Allocations.push_back(info);
13128 }
13129 
13130 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13131 {
13132  const bool allAllocations = m_AllAllocations ||
13133  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13134 
13135  /********************************
13136  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13137  ********************************/
13138 
13139  /*
13140  Fast algorithm is supported only when certain criteria are met:
13141  - VMA_DEBUG_MARGIN is 0.
13142  - All allocations in this block vector are moveable.
13143  - There is no possibility of image/buffer granularity conflict.
13144  */
13145  if(VMA_DEBUG_MARGIN == 0 &&
13146  allAllocations &&
13147  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13148  {
13149  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13150  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13151  }
13152  else
13153  {
13154  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13155  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13156  }
13157 
13158  if(allAllocations)
13159  {
13160  m_pAlgorithm->AddAll();
13161  }
13162  else
13163  {
13164  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13165  {
13166  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13167  }
13168  }
13169 }
13170 
13172 // VmaDefragmentationContext
13173 
13174 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13175  VmaAllocator hAllocator,
13176  uint32_t currFrameIndex,
13177  uint32_t flags,
13178  VmaDefragmentationStats* pStats) :
13179  m_hAllocator(hAllocator),
13180  m_CurrFrameIndex(currFrameIndex),
13181  m_Flags(flags),
13182  m_pStats(pStats),
13183  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13184 {
13185  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13186 }
13187 
13188 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13189 {
13190  for(size_t i = m_CustomPoolContexts.size(); i--; )
13191  {
13192  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13193  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13194  vma_delete(m_hAllocator, pBlockVectorCtx);
13195  }
13196  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13197  {
13198  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13199  if(pBlockVectorCtx)
13200  {
13201  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13202  vma_delete(m_hAllocator, pBlockVectorCtx);
13203  }
13204  }
13205 }
13206 
13207 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13208 {
13209  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13210  {
13211  VmaPool pool = pPools[poolIndex];
13212  VMA_ASSERT(pool);
13213  // Pools with algorithm other than default are not defragmented.
13214  if(pool->m_BlockVector.GetAlgorithm() == 0)
13215  {
13216  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13217 
13218  for(size_t i = m_CustomPoolContexts.size(); i--; )
13219  {
13220  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13221  {
13222  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13223  break;
13224  }
13225  }
13226 
13227  if(!pBlockVectorDefragCtx)
13228  {
13229  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13230  m_hAllocator,
13231  pool,
13232  &pool->m_BlockVector,
13233  m_CurrFrameIndex,
13234  m_Flags);
13235  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13236  }
13237 
13238  pBlockVectorDefragCtx->AddAll();
13239  }
13240  }
13241 }
13242 
13243 void VmaDefragmentationContext_T::AddAllocations(
13244  uint32_t allocationCount,
13245  VmaAllocation* pAllocations,
13246  VkBool32* pAllocationsChanged)
13247 {
13248  // Dispatch pAllocations among defragmentators. Create them when necessary.
13249  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13250  {
13251  const VmaAllocation hAlloc = pAllocations[allocIndex];
13252  VMA_ASSERT(hAlloc);
13253  // DedicatedAlloc cannot be defragmented.
13254  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13255  // Lost allocation cannot be defragmented.
13256  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13257  {
13258  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13259 
13260  const VmaPool hAllocPool = hAlloc->GetPool();
13261  // This allocation belongs to custom pool.
13262  if(hAllocPool != VK_NULL_HANDLE)
13263  {
13264  // Pools with algorithm other than default are not defragmented.
13265  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13266  {
13267  for(size_t i = m_CustomPoolContexts.size(); i--; )
13268  {
13269  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13270  {
13271  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13272  break;
13273  }
13274  }
13275  if(!pBlockVectorDefragCtx)
13276  {
13277  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13278  m_hAllocator,
13279  hAllocPool,
13280  &hAllocPool->m_BlockVector,
13281  m_CurrFrameIndex,
13282  m_Flags);
13283  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13284  }
13285  }
13286  }
13287  // This allocation belongs to default pool.
13288  else
13289  {
13290  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13291  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13292  if(!pBlockVectorDefragCtx)
13293  {
13294  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13295  m_hAllocator,
13296  VMA_NULL, // hCustomPool
13297  m_hAllocator->m_pBlockVectors[memTypeIndex],
13298  m_CurrFrameIndex,
13299  m_Flags);
13300  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13301  }
13302  }
13303 
13304  if(pBlockVectorDefragCtx)
13305  {
13306  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13307  &pAllocationsChanged[allocIndex] : VMA_NULL;
13308  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13309  }
13310  }
13311  }
13312 }
13313 
13314 VkResult VmaDefragmentationContext_T::Defragment(
13315  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13316  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13317  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13318 {
13319  if(pStats)
13320  {
13321  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13322  }
13323 
13324  if(commandBuffer == VK_NULL_HANDLE)
13325  {
13326  maxGpuBytesToMove = 0;
13327  maxGpuAllocationsToMove = 0;
13328  }
13329 
13330  VkResult res = VK_SUCCESS;
13331 
13332  // Process default pools.
13333  for(uint32_t memTypeIndex = 0;
13334  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13335  ++memTypeIndex)
13336  {
13337  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13338  if(pBlockVectorCtx)
13339  {
13340  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13341  pBlockVectorCtx->GetBlockVector()->Defragment(
13342  pBlockVectorCtx,
13343  pStats,
13344  maxCpuBytesToMove, maxCpuAllocationsToMove,
13345  maxGpuBytesToMove, maxGpuAllocationsToMove,
13346  commandBuffer);
13347  if(pBlockVectorCtx->res != VK_SUCCESS)
13348  {
13349  res = pBlockVectorCtx->res;
13350  }
13351  }
13352  }
13353 
13354  // Process custom pools.
13355  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13356  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13357  ++customCtxIndex)
13358  {
13359  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13360  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13361  pBlockVectorCtx->GetBlockVector()->Defragment(
13362  pBlockVectorCtx,
13363  pStats,
13364  maxCpuBytesToMove, maxCpuAllocationsToMove,
13365  maxGpuBytesToMove, maxGpuAllocationsToMove,
13366  commandBuffer);
13367  if(pBlockVectorCtx->res != VK_SUCCESS)
13368  {
13369  res = pBlockVectorCtx->res;
13370  }
13371  }
13372 
13373  return res;
13374 }
13375 
13377 // VmaRecorder
13378 
13379 #if VMA_RECORDING_ENABLED
13380 
13381 VmaRecorder::VmaRecorder() :
13382  m_UseMutex(true),
13383  m_Flags(0),
13384  m_File(VMA_NULL),
13385  m_Freq(INT64_MAX),
13386  m_StartCounter(INT64_MAX)
13387 {
13388 }
13389 
13390 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13391 {
13392  m_UseMutex = useMutex;
13393  m_Flags = settings.flags;
13394 
13395  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13396  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13397 
13398  // Open file for writing.
13399  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13400  if(err != 0)
13401  {
13402  return VK_ERROR_INITIALIZATION_FAILED;
13403  }
13404 
13405  // Write header.
13406  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13407  fprintf(m_File, "%s\n", "1,5");
13408 
13409  return VK_SUCCESS;
13410 }
13411 
13412 VmaRecorder::~VmaRecorder()
13413 {
13414  if(m_File != VMA_NULL)
13415  {
13416  fclose(m_File);
13417  }
13418 }
13419 
13420 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13421 {
13422  CallParams callParams;
13423  GetBasicParams(callParams);
13424 
13425  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13426  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13427  Flush();
13428 }
13429 
13430 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13431 {
13432  CallParams callParams;
13433  GetBasicParams(callParams);
13434 
13435  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13436  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13437  Flush();
13438 }
13439 
13440 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13441 {
13442  CallParams callParams;
13443  GetBasicParams(callParams);
13444 
13445  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13446  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13447  createInfo.memoryTypeIndex,
13448  createInfo.flags,
13449  createInfo.blockSize,
13450  (uint64_t)createInfo.minBlockCount,
13451  (uint64_t)createInfo.maxBlockCount,
13452  createInfo.frameInUseCount,
13453  pool);
13454  Flush();
13455 }
13456 
13457 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13458 {
13459  CallParams callParams;
13460  GetBasicParams(callParams);
13461 
13462  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13463  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13464  pool);
13465  Flush();
13466 }
13467 
13468 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13469  const VkMemoryRequirements& vkMemReq,
13470  const VmaAllocationCreateInfo& createInfo,
13471  VmaAllocation allocation)
13472 {
13473  CallParams callParams;
13474  GetBasicParams(callParams);
13475 
13476  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13477  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13478  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13479  vkMemReq.size,
13480  vkMemReq.alignment,
13481  vkMemReq.memoryTypeBits,
13482  createInfo.flags,
13483  createInfo.usage,
13484  createInfo.requiredFlags,
13485  createInfo.preferredFlags,
13486  createInfo.memoryTypeBits,
13487  createInfo.pool,
13488  allocation,
13489  userDataStr.GetString());
13490  Flush();
13491 }
13492 
13493 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
13494  const VkMemoryRequirements& vkMemReq,
13495  const VmaAllocationCreateInfo& createInfo,
13496  uint64_t allocationCount,
13497  const VmaAllocation* pAllocations)
13498 {
13499  CallParams callParams;
13500  GetBasicParams(callParams);
13501 
13502  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13503  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13504  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
13505  vkMemReq.size,
13506  vkMemReq.alignment,
13507  vkMemReq.memoryTypeBits,
13508  createInfo.flags,
13509  createInfo.usage,
13510  createInfo.requiredFlags,
13511  createInfo.preferredFlags,
13512  createInfo.memoryTypeBits,
13513  createInfo.pool);
13514  PrintPointerList(allocationCount, pAllocations);
13515  fprintf(m_File, ",%s\n", userDataStr.GetString());
13516  Flush();
13517 }
13518 
13519 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13520  const VkMemoryRequirements& vkMemReq,
13521  bool requiresDedicatedAllocation,
13522  bool prefersDedicatedAllocation,
13523  const VmaAllocationCreateInfo& createInfo,
13524  VmaAllocation allocation)
13525 {
13526  CallParams callParams;
13527  GetBasicParams(callParams);
13528 
13529  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13530  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13531  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13532  vkMemReq.size,
13533  vkMemReq.alignment,
13534  vkMemReq.memoryTypeBits,
13535  requiresDedicatedAllocation ? 1 : 0,
13536  prefersDedicatedAllocation ? 1 : 0,
13537  createInfo.flags,
13538  createInfo.usage,
13539  createInfo.requiredFlags,
13540  createInfo.preferredFlags,
13541  createInfo.memoryTypeBits,
13542  createInfo.pool,
13543  allocation,
13544  userDataStr.GetString());
13545  Flush();
13546 }
13547 
13548 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13549  const VkMemoryRequirements& vkMemReq,
13550  bool requiresDedicatedAllocation,
13551  bool prefersDedicatedAllocation,
13552  const VmaAllocationCreateInfo& createInfo,
13553  VmaAllocation allocation)
13554 {
13555  CallParams callParams;
13556  GetBasicParams(callParams);
13557 
13558  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13559  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13560  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13561  vkMemReq.size,
13562  vkMemReq.alignment,
13563  vkMemReq.memoryTypeBits,
13564  requiresDedicatedAllocation ? 1 : 0,
13565  prefersDedicatedAllocation ? 1 : 0,
13566  createInfo.flags,
13567  createInfo.usage,
13568  createInfo.requiredFlags,
13569  createInfo.preferredFlags,
13570  createInfo.memoryTypeBits,
13571  createInfo.pool,
13572  allocation,
13573  userDataStr.GetString());
13574  Flush();
13575 }
13576 
13577 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13578  VmaAllocation allocation)
13579 {
13580  CallParams callParams;
13581  GetBasicParams(callParams);
13582 
13583  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13584  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13585  allocation);
13586  Flush();
13587 }
13588 
13589 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
13590  uint64_t allocationCount,
13591  const VmaAllocation* pAllocations)
13592 {
13593  CallParams callParams;
13594  GetBasicParams(callParams);
13595 
13596  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13597  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
13598  PrintPointerList(allocationCount, pAllocations);
13599  fprintf(m_File, "\n");
13600  Flush();
13601 }
13602 
13603 void VmaRecorder::RecordResizeAllocation(
13604  uint32_t frameIndex,
13605  VmaAllocation allocation,
13606  VkDeviceSize newSize)
13607 {
13608  CallParams callParams;
13609  GetBasicParams(callParams);
13610 
13611  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13612  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
13613  allocation, newSize);
13614  Flush();
13615 }
13616 
13617 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13618  VmaAllocation allocation,
13619  const void* pUserData)
13620 {
13621  CallParams callParams;
13622  GetBasicParams(callParams);
13623 
13624  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13625  UserDataString userDataStr(
13626  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
13627  pUserData);
13628  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13629  allocation,
13630  userDataStr.GetString());
13631  Flush();
13632 }
13633 
13634 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13635  VmaAllocation allocation)
13636 {
13637  CallParams callParams;
13638  GetBasicParams(callParams);
13639 
13640  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13641  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13642  allocation);
13643  Flush();
13644 }
13645 
13646 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13647  VmaAllocation allocation)
13648 {
13649  CallParams callParams;
13650  GetBasicParams(callParams);
13651 
13652  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13653  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13654  allocation);
13655  Flush();
13656 }
13657 
13658 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13659  VmaAllocation allocation)
13660 {
13661  CallParams callParams;
13662  GetBasicParams(callParams);
13663 
13664  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13665  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13666  allocation);
13667  Flush();
13668 }
13669 
13670 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13671  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13672 {
13673  CallParams callParams;
13674  GetBasicParams(callParams);
13675 
13676  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13677  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13678  allocation,
13679  offset,
13680  size);
13681  Flush();
13682 }
13683 
13684 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13685  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13686 {
13687  CallParams callParams;
13688  GetBasicParams(callParams);
13689 
13690  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13691  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13692  allocation,
13693  offset,
13694  size);
13695  Flush();
13696 }
13697 
13698 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13699  const VkBufferCreateInfo& bufCreateInfo,
13700  const VmaAllocationCreateInfo& allocCreateInfo,
13701  VmaAllocation allocation)
13702 {
13703  CallParams callParams;
13704  GetBasicParams(callParams);
13705 
13706  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13707  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13708  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13709  bufCreateInfo.flags,
13710  bufCreateInfo.size,
13711  bufCreateInfo.usage,
13712  bufCreateInfo.sharingMode,
13713  allocCreateInfo.flags,
13714  allocCreateInfo.usage,
13715  allocCreateInfo.requiredFlags,
13716  allocCreateInfo.preferredFlags,
13717  allocCreateInfo.memoryTypeBits,
13718  allocCreateInfo.pool,
13719  allocation,
13720  userDataStr.GetString());
13721  Flush();
13722 }
13723 
13724 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13725  const VkImageCreateInfo& imageCreateInfo,
13726  const VmaAllocationCreateInfo& allocCreateInfo,
13727  VmaAllocation allocation)
13728 {
13729  CallParams callParams;
13730  GetBasicParams(callParams);
13731 
13732  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13733  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13734  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13735  imageCreateInfo.flags,
13736  imageCreateInfo.imageType,
13737  imageCreateInfo.format,
13738  imageCreateInfo.extent.width,
13739  imageCreateInfo.extent.height,
13740  imageCreateInfo.extent.depth,
13741  imageCreateInfo.mipLevels,
13742  imageCreateInfo.arrayLayers,
13743  imageCreateInfo.samples,
13744  imageCreateInfo.tiling,
13745  imageCreateInfo.usage,
13746  imageCreateInfo.sharingMode,
13747  imageCreateInfo.initialLayout,
13748  allocCreateInfo.flags,
13749  allocCreateInfo.usage,
13750  allocCreateInfo.requiredFlags,
13751  allocCreateInfo.preferredFlags,
13752  allocCreateInfo.memoryTypeBits,
13753  allocCreateInfo.pool,
13754  allocation,
13755  userDataStr.GetString());
13756  Flush();
13757 }
13758 
13759 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13760  VmaAllocation allocation)
13761 {
13762  CallParams callParams;
13763  GetBasicParams(callParams);
13764 
13765  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13766  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13767  allocation);
13768  Flush();
13769 }
13770 
13771 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
13772  VmaAllocation allocation)
13773 {
13774  CallParams callParams;
13775  GetBasicParams(callParams);
13776 
13777  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13778  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
13779  allocation);
13780  Flush();
13781 }
13782 
13783 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
13784  VmaAllocation allocation)
13785 {
13786  CallParams callParams;
13787  GetBasicParams(callParams);
13788 
13789  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13790  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13791  allocation);
13792  Flush();
13793 }
13794 
13795 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
13796  VmaAllocation allocation)
13797 {
13798  CallParams callParams;
13799  GetBasicParams(callParams);
13800 
13801  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13802  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
13803  allocation);
13804  Flush();
13805 }
13806 
13807 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
13808  VmaPool pool)
13809 {
13810  CallParams callParams;
13811  GetBasicParams(callParams);
13812 
13813  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13814  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
13815  pool);
13816  Flush();
13817 }
13818 
13819 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
13820  const VmaDefragmentationInfo2& info,
13822 {
13823  CallParams callParams;
13824  GetBasicParams(callParams);
13825 
13826  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13827  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
13828  info.flags);
13829  PrintPointerList(info.allocationCount, info.pAllocations);
13830  fprintf(m_File, ",");
13831  PrintPointerList(info.poolCount, info.pPools);
13832  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
13833  info.maxCpuBytesToMove,
13835  info.maxGpuBytesToMove,
13837  info.commandBuffer,
13838  ctx);
13839  Flush();
13840 }
13841 
13842 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
13844 {
13845  CallParams callParams;
13846  GetBasicParams(callParams);
13847 
13848  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13849  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
13850  ctx);
13851  Flush();
13852 }
13853 
13854 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
13855 {
13856  if(pUserData != VMA_NULL)
13857  {
13858  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
13859  {
13860  m_Str = (const char*)pUserData;
13861  }
13862  else
13863  {
13864  sprintf_s(m_PtrStr, "%p", pUserData);
13865  m_Str = m_PtrStr;
13866  }
13867  }
13868  else
13869  {
13870  m_Str = "";
13871  }
13872 }
13873 
13874 void VmaRecorder::WriteConfiguration(
13875  const VkPhysicalDeviceProperties& devProps,
13876  const VkPhysicalDeviceMemoryProperties& memProps,
13877  bool dedicatedAllocationExtensionEnabled)
13878 {
13879  fprintf(m_File, "Config,Begin\n");
13880 
13881  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
13882  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
13883  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
13884  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
13885  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
13886  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
13887 
13888  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
13889  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
13890  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
13891 
13892  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
13893  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
13894  {
13895  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
13896  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
13897  }
13898  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
13899  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
13900  {
13901  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
13902  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
13903  }
13904 
13905  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
13906 
13907  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
13908  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
13909  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
13910  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
13911  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
13912  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
13913  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
13914  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
13915  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
13916 
13917  fprintf(m_File, "Config,End\n");
13918 }
13919 
13920 void VmaRecorder::GetBasicParams(CallParams& outParams)
13921 {
13922  outParams.threadId = GetCurrentThreadId();
13923 
13924  LARGE_INTEGER counter;
13925  QueryPerformanceCounter(&counter);
13926  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
13927 }
13928 
13929 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
13930 {
13931  if(count)
13932  {
13933  fprintf(m_File, "%p", pItems[0]);
13934  for(uint64_t i = 1; i < count; ++i)
13935  {
13936  fprintf(m_File, " %p", pItems[i]);
13937  }
13938  }
13939 }
13940 
13941 void VmaRecorder::Flush()
13942 {
13943  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
13944  {
13945  fflush(m_File);
13946  }
13947 }
13948 
13949 #endif // #if VMA_RECORDING_ENABLED
13950 
13952 // VmaAllocator_T
13953 
13954 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
13955  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
13956  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
13957  m_hDevice(pCreateInfo->device),
13958  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
13959  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
13960  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
13961  m_PreferredLargeHeapBlockSize(0),
13962  m_PhysicalDevice(pCreateInfo->physicalDevice),
13963  m_CurrentFrameIndex(0),
13964  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
13965  m_NextPoolId(0)
13967  ,m_pRecorder(VMA_NULL)
13968 #endif
13969 {
13970  if(VMA_DEBUG_DETECT_CORRUPTION)
13971  {
13972  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
13973  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
13974  }
13975 
13976  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
13977 
13978 #if !(VMA_DEDICATED_ALLOCATION)
13980  {
13981  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
13982  }
13983 #endif
13984 
13985  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
13986  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
13987  memset(&m_MemProps, 0, sizeof(m_MemProps));
13988 
13989  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
13990  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
13991 
13992  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
13993  {
13994  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
13995  }
13996 
13997  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
13998  {
13999  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14000  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14001  }
14002 
14003  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14004 
14005  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14006  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14007 
14008  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14009  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14010  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14011  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14012 
14013  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14014  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14015 
14016  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14017  {
14018  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14019  {
14020  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14021  if(limit != VK_WHOLE_SIZE)
14022  {
14023  m_HeapSizeLimit[heapIndex] = limit;
14024  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14025  {
14026  m_MemProps.memoryHeaps[heapIndex].size = limit;
14027  }
14028  }
14029  }
14030  }
14031 
14032  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14033  {
14034  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14035 
14036  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14037  this,
14038  memTypeIndex,
14039  preferredBlockSize,
14040  0,
14041  SIZE_MAX,
14042  GetBufferImageGranularity(),
14043  pCreateInfo->frameInUseCount,
14044  false, // isCustomPool
14045  false, // explicitBlockSize
14046  false); // linearAlgorithm
14047  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14048  // becase minBlockCount is 0.
14049  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14050 
14051  }
14052 }
14053 
14054 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14055 {
14056  VkResult res = VK_SUCCESS;
14057 
14058  if(pCreateInfo->pRecordSettings != VMA_NULL &&
14059  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14060  {
14061 #if VMA_RECORDING_ENABLED
14062  m_pRecorder = vma_new(this, VmaRecorder)();
14063  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14064  if(res != VK_SUCCESS)
14065  {
14066  return res;
14067  }
14068  m_pRecorder->WriteConfiguration(
14069  m_PhysicalDeviceProperties,
14070  m_MemProps,
14071  m_UseKhrDedicatedAllocation);
14072  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14073 #else
14074  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14075  return VK_ERROR_FEATURE_NOT_PRESENT;
14076 #endif
14077  }
14078 
14079  return res;
14080 }
14081 
14082 VmaAllocator_T::~VmaAllocator_T()
14083 {
14084 #if VMA_RECORDING_ENABLED
14085  if(m_pRecorder != VMA_NULL)
14086  {
14087  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14088  vma_delete(this, m_pRecorder);
14089  }
14090 #endif
14091 
14092  VMA_ASSERT(m_Pools.empty());
14093 
14094  for(size_t i = GetMemoryTypeCount(); i--; )
14095  {
14096  vma_delete(this, m_pDedicatedAllocations[i]);
14097  vma_delete(this, m_pBlockVectors[i]);
14098  }
14099 }
14100 
14101 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14102 {
14103 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14104  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
14105  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
14106  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
14107  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
14108  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
14109  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
14110  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
14111  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
14112  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
14113  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
14114  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
14115  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
14116  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
14117  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
14118  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
14119  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
14120  m_VulkanFunctions.vkCmdCopyBuffer = &vkCmdCopyBuffer;
14121 #if VMA_DEDICATED_ALLOCATION
14122  if(m_UseKhrDedicatedAllocation)
14123  {
14124  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14125  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14126  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14127  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14128  }
14129 #endif // #if VMA_DEDICATED_ALLOCATION
14130 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14131 
14132 #define VMA_COPY_IF_NOT_NULL(funcName) \
14133  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14134 
14135  if(pVulkanFunctions != VMA_NULL)
14136  {
14137  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14138  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14139  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14140  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14141  VMA_COPY_IF_NOT_NULL(vkMapMemory);
14142  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14143  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14144  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14145  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14146  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14147  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14148  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14149  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14150  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14151  VMA_COPY_IF_NOT_NULL(vkCreateImage);
14152  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14153  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14154 #if VMA_DEDICATED_ALLOCATION
14155  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14156  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14157 #endif
14158  }
14159 
14160 #undef VMA_COPY_IF_NOT_NULL
14161 
14162  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14163  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14164  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14165  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14166  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14167  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14168  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14169  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14170  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14171  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14172  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14173  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14174  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14175  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14176  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14177  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14178  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14179  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14180  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14181 #if VMA_DEDICATED_ALLOCATION
14182  if(m_UseKhrDedicatedAllocation)
14183  {
14184  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14185  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14186  }
14187 #endif
14188 }
14189 
14190 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14191 {
14192  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14193  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14194  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14195  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
14196 }
14197 
14198 VkResult VmaAllocator_T::AllocateMemoryOfType(
14199  VkDeviceSize size,
14200  VkDeviceSize alignment,
14201  bool dedicatedAllocation,
14202  VkBuffer dedicatedBuffer,
14203  VkImage dedicatedImage,
14204  const VmaAllocationCreateInfo& createInfo,
14205  uint32_t memTypeIndex,
14206  VmaSuballocationType suballocType,
14207  size_t allocationCount,
14208  VmaAllocation* pAllocations)
14209 {
14210  VMA_ASSERT(pAllocations != VMA_NULL);
14211  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, vkMemReq.size);
14212 
14213  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14214 
14215  // If memory type is not HOST_VISIBLE, disable MAPPED.
14216  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14217  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14218  {
14219  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14220  }
14221 
14222  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14223  VMA_ASSERT(blockVector);
14224 
14225  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14226  bool preferDedicatedMemory =
14227  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14228  dedicatedAllocation ||
14229  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14230  size > preferredBlockSize / 2;
14231 
14232  if(preferDedicatedMemory &&
14233  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14234  finalCreateInfo.pool == VK_NULL_HANDLE)
14235  {
14237  }
14238 
14239  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14240  {
14241  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14242  {
14243  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14244  }
14245  else
14246  {
14247  return AllocateDedicatedMemory(
14248  size,
14249  suballocType,
14250  memTypeIndex,
14251  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14252  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14253  finalCreateInfo.pUserData,
14254  dedicatedBuffer,
14255  dedicatedImage,
14256  allocationCount,
14257  pAllocations);
14258  }
14259  }
14260  else
14261  {
14262  VkResult res = blockVector->Allocate(
14263  VK_NULL_HANDLE, // hCurrentPool
14264  m_CurrentFrameIndex.load(),
14265  size,
14266  alignment,
14267  finalCreateInfo,
14268  suballocType,
14269  allocationCount,
14270  pAllocations);
14271  if(res == VK_SUCCESS)
14272  {
14273  return res;
14274  }
14275 
14276  // 5. Try dedicated memory.
14277  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14278  {
14279  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14280  }
14281  else
14282  {
14283  res = AllocateDedicatedMemory(
14284  size,
14285  suballocType,
14286  memTypeIndex,
14287  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14288  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14289  finalCreateInfo.pUserData,
14290  dedicatedBuffer,
14291  dedicatedImage,
14292  allocationCount,
14293  pAllocations);
14294  if(res == VK_SUCCESS)
14295  {
14296  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14297  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14298  return VK_SUCCESS;
14299  }
14300  else
14301  {
14302  // Everything failed: Return error code.
14303  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14304  return res;
14305  }
14306  }
14307  }
14308 }
14309 
14310 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14311  VkDeviceSize size,
14312  VmaSuballocationType suballocType,
14313  uint32_t memTypeIndex,
14314  bool map,
14315  bool isUserDataString,
14316  void* pUserData,
14317  VkBuffer dedicatedBuffer,
14318  VkImage dedicatedImage,
14319  size_t allocationCount,
14320  VmaAllocation* pAllocations)
14321 {
14322  VMA_ASSERT(allocationCount > 0 && pAllocations);
14323 
14324  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14325  allocInfo.memoryTypeIndex = memTypeIndex;
14326  allocInfo.allocationSize = size;
14327 
14328 #if VMA_DEDICATED_ALLOCATION
14329  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14330  if(m_UseKhrDedicatedAllocation)
14331  {
14332  if(dedicatedBuffer != VK_NULL_HANDLE)
14333  {
14334  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14335  dedicatedAllocInfo.buffer = dedicatedBuffer;
14336  allocInfo.pNext = &dedicatedAllocInfo;
14337  }
14338  else if(dedicatedImage != VK_NULL_HANDLE)
14339  {
14340  dedicatedAllocInfo.image = dedicatedImage;
14341  allocInfo.pNext = &dedicatedAllocInfo;
14342  }
14343  }
14344 #endif // #if VMA_DEDICATED_ALLOCATION
14345 
14346  size_t allocIndex;
14347  VkResult res;
14348  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14349  {
14350  res = AllocateDedicatedMemoryPage(
14351  size,
14352  suballocType,
14353  memTypeIndex,
14354  allocInfo,
14355  map,
14356  isUserDataString,
14357  pUserData,
14358  pAllocations + allocIndex);
14359  if(res != VK_SUCCESS)
14360  {
14361  break;
14362  }
14363  }
14364 
14365  if(res == VK_SUCCESS)
14366  {
14367  // Register them in m_pDedicatedAllocations.
14368  {
14369  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14370  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14371  VMA_ASSERT(pDedicatedAllocations);
14372  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14373  {
14374  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
14375  }
14376  }
14377 
14378  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14379  }
14380  else
14381  {
14382  // Free all already created allocations.
14383  while(allocIndex--)
14384  {
14385  VmaAllocation currAlloc = pAllocations[allocIndex];
14386  VkDeviceMemory hMemory = currAlloc->GetMemory();
14387 
14388  /*
14389  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14390  before vkFreeMemory.
14391 
14392  if(currAlloc->GetMappedData() != VMA_NULL)
14393  {
14394  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14395  }
14396  */
14397 
14398  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14399 
14400  currAlloc->SetUserData(this, VMA_NULL);
14401  vma_delete(this, currAlloc);
14402  }
14403 
14404  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14405  }
14406 
14407  return res;
14408 }
14409 
14410 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14411  VkDeviceSize size,
14412  VmaSuballocationType suballocType,
14413  uint32_t memTypeIndex,
14414  const VkMemoryAllocateInfo& allocInfo,
14415  bool map,
14416  bool isUserDataString,
14417  void* pUserData,
14418  VmaAllocation* pAllocation)
14419 {
14420  VkDeviceMemory hMemory = VK_NULL_HANDLE;
14421  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14422  if(res < 0)
14423  {
14424  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14425  return res;
14426  }
14427 
14428  void* pMappedData = VMA_NULL;
14429  if(map)
14430  {
14431  res = (*m_VulkanFunctions.vkMapMemory)(
14432  m_hDevice,
14433  hMemory,
14434  0,
14435  VK_WHOLE_SIZE,
14436  0,
14437  &pMappedData);
14438  if(res < 0)
14439  {
14440  VMA_DEBUG_LOG(" vkMapMemory FAILED");
14441  FreeVulkanMemory(memTypeIndex, size, hMemory);
14442  return res;
14443  }
14444  }
14445 
14446  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
14447  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14448  (*pAllocation)->SetUserData(this, pUserData);
14449  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14450  {
14451  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14452  }
14453 
14454  return VK_SUCCESS;
14455 }
14456 
14457 void VmaAllocator_T::GetBufferMemoryRequirements(
14458  VkBuffer hBuffer,
14459  VkMemoryRequirements& memReq,
14460  bool& requiresDedicatedAllocation,
14461  bool& prefersDedicatedAllocation) const
14462 {
14463 #if VMA_DEDICATED_ALLOCATION
14464  if(m_UseKhrDedicatedAllocation)
14465  {
14466  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14467  memReqInfo.buffer = hBuffer;
14468 
14469  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14470 
14471  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14472  memReq2.pNext = &memDedicatedReq;
14473 
14474  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14475 
14476  memReq = memReq2.memoryRequirements;
14477  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14478  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14479  }
14480  else
14481 #endif // #if VMA_DEDICATED_ALLOCATION
14482  {
14483  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14484  requiresDedicatedAllocation = false;
14485  prefersDedicatedAllocation = false;
14486  }
14487 }
14488 
14489 void VmaAllocator_T::GetImageMemoryRequirements(
14490  VkImage hImage,
14491  VkMemoryRequirements& memReq,
14492  bool& requiresDedicatedAllocation,
14493  bool& prefersDedicatedAllocation) const
14494 {
14495 #if VMA_DEDICATED_ALLOCATION
14496  if(m_UseKhrDedicatedAllocation)
14497  {
14498  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14499  memReqInfo.image = hImage;
14500 
14501  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14502 
14503  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14504  memReq2.pNext = &memDedicatedReq;
14505 
14506  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14507 
14508  memReq = memReq2.memoryRequirements;
14509  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14510  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14511  }
14512  else
14513 #endif // #if VMA_DEDICATED_ALLOCATION
14514  {
14515  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14516  requiresDedicatedAllocation = false;
14517  prefersDedicatedAllocation = false;
14518  }
14519 }
14520 
14521 VkResult VmaAllocator_T::AllocateMemory(
14522  const VkMemoryRequirements& vkMemReq,
14523  bool requiresDedicatedAllocation,
14524  bool prefersDedicatedAllocation,
14525  VkBuffer dedicatedBuffer,
14526  VkImage dedicatedImage,
14527  const VmaAllocationCreateInfo& createInfo,
14528  VmaSuballocationType suballocType,
14529  size_t allocationCount,
14530  VmaAllocation* pAllocations)
14531 {
14532  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14533 
14534  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14535 
14536  if(vkMemReq.size == 0)
14537  {
14538  return VK_ERROR_VALIDATION_FAILED_EXT;
14539  }
14540  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14541  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14542  {
14543  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14544  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14545  }
14546  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14548  {
14549  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14550  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14551  }
14552  if(requiresDedicatedAllocation)
14553  {
14554  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14555  {
14556  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14557  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14558  }
14559  if(createInfo.pool != VK_NULL_HANDLE)
14560  {
14561  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
14562  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14563  }
14564  }
14565  if((createInfo.pool != VK_NULL_HANDLE) &&
14566  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
14567  {
14568  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14569  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14570  }
14571 
14572  if(createInfo.pool != VK_NULL_HANDLE)
14573  {
14574  const VkDeviceSize alignmentForPool = VMA_MAX(
14575  vkMemReq.alignment,
14576  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
14577  return createInfo.pool->m_BlockVector.Allocate(
14578  createInfo.pool,
14579  m_CurrentFrameIndex.load(),
14580  vkMemReq.size,
14581  alignmentForPool,
14582  createInfo,
14583  suballocType,
14584  allocationCount,
14585  pAllocations);
14586  }
14587  else
14588  {
14589  // Bit mask of memory Vulkan types acceptable for this allocation.
14590  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14591  uint32_t memTypeIndex = UINT32_MAX;
14592  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14593  if(res == VK_SUCCESS)
14594  {
14595  VkDeviceSize alignmentForMemType = VMA_MAX(
14596  vkMemReq.alignment,
14597  GetMemoryTypeMinAlignment(memTypeIndex));
14598 
14599  res = AllocateMemoryOfType(
14600  vkMemReq.size,
14601  alignmentForMemType,
14602  requiresDedicatedAllocation || prefersDedicatedAllocation,
14603  dedicatedBuffer,
14604  dedicatedImage,
14605  createInfo,
14606  memTypeIndex,
14607  suballocType,
14608  allocationCount,
14609  pAllocations);
14610  // Succeeded on first try.
14611  if(res == VK_SUCCESS)
14612  {
14613  return res;
14614  }
14615  // Allocation from this memory type failed. Try other compatible memory types.
14616  else
14617  {
14618  for(;;)
14619  {
14620  // Remove old memTypeIndex from list of possibilities.
14621  memoryTypeBits &= ~(1u << memTypeIndex);
14622  // Find alternative memTypeIndex.
14623  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14624  if(res == VK_SUCCESS)
14625  {
14626  alignmentForMemType = VMA_MAX(
14627  vkMemReq.alignment,
14628  GetMemoryTypeMinAlignment(memTypeIndex));
14629 
14630  res = AllocateMemoryOfType(
14631  vkMemReq.size,
14632  alignmentForMemType,
14633  requiresDedicatedAllocation || prefersDedicatedAllocation,
14634  dedicatedBuffer,
14635  dedicatedImage,
14636  createInfo,
14637  memTypeIndex,
14638  suballocType,
14639  allocationCount,
14640  pAllocations);
14641  // Allocation from this alternative memory type succeeded.
14642  if(res == VK_SUCCESS)
14643  {
14644  return res;
14645  }
14646  // else: Allocation from this memory type failed. Try next one - next loop iteration.
14647  }
14648  // No other matching memory type index could be found.
14649  else
14650  {
14651  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
14652  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14653  }
14654  }
14655  }
14656  }
14657  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
14658  else
14659  return res;
14660  }
14661 }
14662 
14663 void VmaAllocator_T::FreeMemory(
14664  size_t allocationCount,
14665  const VmaAllocation* pAllocations)
14666 {
14667  VMA_ASSERT(pAllocations);
14668 
14669  for(size_t allocIndex = allocationCount; allocIndex--; )
14670  {
14671  VmaAllocation allocation = pAllocations[allocIndex];
14672 
14673  if(allocation != VK_NULL_HANDLE)
14674  {
14675  if(TouchAllocation(allocation))
14676  {
14677  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14678  {
14679  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14680  }
14681 
14682  switch(allocation->GetType())
14683  {
14684  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14685  {
14686  VmaBlockVector* pBlockVector = VMA_NULL;
14687  VmaPool hPool = allocation->GetPool();
14688  if(hPool != VK_NULL_HANDLE)
14689  {
14690  pBlockVector = &hPool->m_BlockVector;
14691  }
14692  else
14693  {
14694  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14695  pBlockVector = m_pBlockVectors[memTypeIndex];
14696  }
14697  pBlockVector->Free(allocation);
14698  }
14699  break;
14700  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14701  FreeDedicatedMemory(allocation);
14702  break;
14703  default:
14704  VMA_ASSERT(0);
14705  }
14706  }
14707 
14708  allocation->SetUserData(this, VMA_NULL);
14709  vma_delete(this, allocation);
14710  }
14711  }
14712 }
14713 
14714 VkResult VmaAllocator_T::ResizeAllocation(
14715  const VmaAllocation alloc,
14716  VkDeviceSize newSize)
14717 {
14718  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14719  {
14720  return VK_ERROR_VALIDATION_FAILED_EXT;
14721  }
14722  if(newSize == alloc->GetSize())
14723  {
14724  return VK_SUCCESS;
14725  }
14726 
14727  switch(alloc->GetType())
14728  {
14729  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14730  return VK_ERROR_FEATURE_NOT_PRESENT;
14731  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14732  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
14733  {
14734  alloc->ChangeSize(newSize);
14735  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
14736  return VK_SUCCESS;
14737  }
14738  else
14739  {
14740  return VK_ERROR_OUT_OF_POOL_MEMORY;
14741  }
14742  default:
14743  VMA_ASSERT(0);
14744  return VK_ERROR_VALIDATION_FAILED_EXT;
14745  }
14746 }
14747 
14748 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
14749 {
14750  // Initialize.
14751  InitStatInfo(pStats->total);
14752  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
14753  InitStatInfo(pStats->memoryType[i]);
14754  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14755  InitStatInfo(pStats->memoryHeap[i]);
14756 
14757  // Process default pools.
14758  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14759  {
14760  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14761  VMA_ASSERT(pBlockVector);
14762  pBlockVector->AddStats(pStats);
14763  }
14764 
14765  // Process custom pools.
14766  {
14767  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14768  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14769  {
14770  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
14771  }
14772  }
14773 
14774  // Process dedicated allocations.
14775  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14776  {
14777  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14778  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14779  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
14780  VMA_ASSERT(pDedicatedAllocVector);
14781  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
14782  {
14783  VmaStatInfo allocationStatInfo;
14784  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
14785  VmaAddStatInfo(pStats->total, allocationStatInfo);
14786  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
14787  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
14788  }
14789  }
14790 
14791  // Postprocess.
14792  VmaPostprocessCalcStatInfo(pStats->total);
14793  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
14794  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
14795  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
14796  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
14797 }
14798 
14799 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
14800 
14801 VkResult VmaAllocator_T::DefragmentationBegin(
14802  const VmaDefragmentationInfo2& info,
14803  VmaDefragmentationStats* pStats,
14804  VmaDefragmentationContext* pContext)
14805 {
14806  if(info.pAllocationsChanged != VMA_NULL)
14807  {
14808  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
14809  }
14810 
14811  *pContext = vma_new(this, VmaDefragmentationContext_T)(
14812  this, m_CurrentFrameIndex.load(), info.flags, pStats);
14813 
14814  (*pContext)->AddPools(info.poolCount, info.pPools);
14815  (*pContext)->AddAllocations(
14817 
14818  VkResult res = (*pContext)->Defragment(
14821  info.commandBuffer, pStats);
14822 
14823  if(res != VK_NOT_READY)
14824  {
14825  vma_delete(this, *pContext);
14826  *pContext = VMA_NULL;
14827  }
14828 
14829  return res;
14830 }
14831 
14832 VkResult VmaAllocator_T::DefragmentationEnd(
14833  VmaDefragmentationContext context)
14834 {
14835  vma_delete(this, context);
14836  return VK_SUCCESS;
14837 }
14838 
14839 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
14840 {
14841  if(hAllocation->CanBecomeLost())
14842  {
14843  /*
14844  Warning: This is a carefully designed algorithm.
14845  Do not modify unless you really know what you're doing :)
14846  */
14847  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14848  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14849  for(;;)
14850  {
14851  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14852  {
14853  pAllocationInfo->memoryType = UINT32_MAX;
14854  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
14855  pAllocationInfo->offset = 0;
14856  pAllocationInfo->size = hAllocation->GetSize();
14857  pAllocationInfo->pMappedData = VMA_NULL;
14858  pAllocationInfo->pUserData = hAllocation->GetUserData();
14859  return;
14860  }
14861  else if(localLastUseFrameIndex == localCurrFrameIndex)
14862  {
14863  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
14864  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
14865  pAllocationInfo->offset = hAllocation->GetOffset();
14866  pAllocationInfo->size = hAllocation->GetSize();
14867  pAllocationInfo->pMappedData = VMA_NULL;
14868  pAllocationInfo->pUserData = hAllocation->GetUserData();
14869  return;
14870  }
14871  else // Last use time earlier than current time.
14872  {
14873  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14874  {
14875  localLastUseFrameIndex = localCurrFrameIndex;
14876  }
14877  }
14878  }
14879  }
14880  else
14881  {
14882 #if VMA_STATS_STRING_ENABLED
14883  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14884  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14885  for(;;)
14886  {
14887  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14888  if(localLastUseFrameIndex == localCurrFrameIndex)
14889  {
14890  break;
14891  }
14892  else // Last use time earlier than current time.
14893  {
14894  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14895  {
14896  localLastUseFrameIndex = localCurrFrameIndex;
14897  }
14898  }
14899  }
14900 #endif
14901 
14902  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
14903  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
14904  pAllocationInfo->offset = hAllocation->GetOffset();
14905  pAllocationInfo->size = hAllocation->GetSize();
14906  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
14907  pAllocationInfo->pUserData = hAllocation->GetUserData();
14908  }
14909 }
14910 
14911 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
14912 {
14913  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
14914  if(hAllocation->CanBecomeLost())
14915  {
14916  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14917  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14918  for(;;)
14919  {
14920  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14921  {
14922  return false;
14923  }
14924  else if(localLastUseFrameIndex == localCurrFrameIndex)
14925  {
14926  return true;
14927  }
14928  else // Last use time earlier than current time.
14929  {
14930  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14931  {
14932  localLastUseFrameIndex = localCurrFrameIndex;
14933  }
14934  }
14935  }
14936  }
14937  else
14938  {
14939 #if VMA_STATS_STRING_ENABLED
14940  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14941  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14942  for(;;)
14943  {
14944  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14945  if(localLastUseFrameIndex == localCurrFrameIndex)
14946  {
14947  break;
14948  }
14949  else // Last use time earlier than current time.
14950  {
14951  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14952  {
14953  localLastUseFrameIndex = localCurrFrameIndex;
14954  }
14955  }
14956  }
14957 #endif
14958 
14959  return true;
14960  }
14961 }
14962 
14963 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
14964 {
14965  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
14966 
14967  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
14968 
14969  if(newCreateInfo.maxBlockCount == 0)
14970  {
14971  newCreateInfo.maxBlockCount = SIZE_MAX;
14972  }
14973  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
14974  {
14975  return VK_ERROR_INITIALIZATION_FAILED;
14976  }
14977 
14978  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
14979 
14980  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
14981 
14982  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
14983  if(res != VK_SUCCESS)
14984  {
14985  vma_delete(this, *pPool);
14986  *pPool = VMA_NULL;
14987  return res;
14988  }
14989 
14990  // Add to m_Pools.
14991  {
14992  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
14993  (*pPool)->SetId(m_NextPoolId++);
14994  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
14995  }
14996 
14997  return VK_SUCCESS;
14998 }
14999 
15000 void VmaAllocator_T::DestroyPool(VmaPool pool)
15001 {
15002  // Remove from m_Pools.
15003  {
15004  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15005  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15006  VMA_ASSERT(success && "Pool not found in Allocator.");
15007  }
15008 
15009  vma_delete(this, pool);
15010 }
15011 
15012 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15013 {
15014  pool->m_BlockVector.GetPoolStats(pPoolStats);
15015 }
15016 
15017 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15018 {
15019  m_CurrentFrameIndex.store(frameIndex);
15020 }
15021 
15022 void VmaAllocator_T::MakePoolAllocationsLost(
15023  VmaPool hPool,
15024  size_t* pLostAllocationCount)
15025 {
15026  hPool->m_BlockVector.MakePoolAllocationsLost(
15027  m_CurrentFrameIndex.load(),
15028  pLostAllocationCount);
15029 }
15030 
15031 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15032 {
15033  return hPool->m_BlockVector.CheckCorruption();
15034 }
15035 
15036 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15037 {
15038  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15039 
15040  // Process default pools.
15041  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15042  {
15043  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15044  {
15045  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15046  VMA_ASSERT(pBlockVector);
15047  VkResult localRes = pBlockVector->CheckCorruption();
15048  switch(localRes)
15049  {
15050  case VK_ERROR_FEATURE_NOT_PRESENT:
15051  break;
15052  case VK_SUCCESS:
15053  finalRes = VK_SUCCESS;
15054  break;
15055  default:
15056  return localRes;
15057  }
15058  }
15059  }
15060 
15061  // Process custom pools.
15062  {
15063  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15064  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15065  {
15066  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15067  {
15068  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15069  switch(localRes)
15070  {
15071  case VK_ERROR_FEATURE_NOT_PRESENT:
15072  break;
15073  case VK_SUCCESS:
15074  finalRes = VK_SUCCESS;
15075  break;
15076  default:
15077  return localRes;
15078  }
15079  }
15080  }
15081  }
15082 
15083  return finalRes;
15084 }
15085 
15086 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15087 {
15088  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
15089  (*pAllocation)->InitLost();
15090 }
15091 
15092 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15093 {
15094  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15095 
15096  VkResult res;
15097  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15098  {
15099  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15100  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
15101  {
15102  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15103  if(res == VK_SUCCESS)
15104  {
15105  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
15106  }
15107  }
15108  else
15109  {
15110  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
15111  }
15112  }
15113  else
15114  {
15115  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15116  }
15117 
15118  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15119  {
15120  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15121  }
15122 
15123  return res;
15124 }
15125 
15126 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15127 {
15128  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15129  {
15130  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15131  }
15132 
15133  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15134 
15135  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15136  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15137  {
15138  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15139  m_HeapSizeLimit[heapIndex] += size;
15140  }
15141 }
15142 
15143 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15144 {
15145  if(hAllocation->CanBecomeLost())
15146  {
15147  return VK_ERROR_MEMORY_MAP_FAILED;
15148  }
15149 
15150  switch(hAllocation->GetType())
15151  {
15152  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15153  {
15154  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15155  char *pBytes = VMA_NULL;
15156  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15157  if(res == VK_SUCCESS)
15158  {
15159  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15160  hAllocation->BlockAllocMap();
15161  }
15162  return res;
15163  }
15164  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15165  return hAllocation->DedicatedAllocMap(this, ppData);
15166  default:
15167  VMA_ASSERT(0);
15168  return VK_ERROR_MEMORY_MAP_FAILED;
15169  }
15170 }
15171 
15172 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15173 {
15174  switch(hAllocation->GetType())
15175  {
15176  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15177  {
15178  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15179  hAllocation->BlockAllocUnmap();
15180  pBlock->Unmap(this, 1);
15181  }
15182  break;
15183  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15184  hAllocation->DedicatedAllocUnmap(this);
15185  break;
15186  default:
15187  VMA_ASSERT(0);
15188  }
15189 }
15190 
15191 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
15192 {
15193  VkResult res = VK_SUCCESS;
15194  switch(hAllocation->GetType())
15195  {
15196  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15197  res = GetVulkanFunctions().vkBindBufferMemory(
15198  m_hDevice,
15199  hBuffer,
15200  hAllocation->GetMemory(),
15201  0); //memoryOffset
15202  break;
15203  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15204  {
15205  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15206  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
15207  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
15208  break;
15209  }
15210  default:
15211  VMA_ASSERT(0);
15212  }
15213  return res;
15214 }
15215 
15216 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
15217 {
15218  VkResult res = VK_SUCCESS;
15219  switch(hAllocation->GetType())
15220  {
15221  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15222  res = GetVulkanFunctions().vkBindImageMemory(
15223  m_hDevice,
15224  hImage,
15225  hAllocation->GetMemory(),
15226  0); //memoryOffset
15227  break;
15228  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15229  {
15230  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15231  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
15232  res = pBlock->BindImageMemory(this, hAllocation, hImage);
15233  break;
15234  }
15235  default:
15236  VMA_ASSERT(0);
15237  }
15238  return res;
15239 }
15240 
15241 void VmaAllocator_T::FlushOrInvalidateAllocation(
15242  VmaAllocation hAllocation,
15243  VkDeviceSize offset, VkDeviceSize size,
15244  VMA_CACHE_OPERATION op)
15245 {
15246  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
15247  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15248  {
15249  const VkDeviceSize allocationSize = hAllocation->GetSize();
15250  VMA_ASSERT(offset <= allocationSize);
15251 
15252  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15253 
15254  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
15255  memRange.memory = hAllocation->GetMemory();
15256 
15257  switch(hAllocation->GetType())
15258  {
15259  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15260  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15261  if(size == VK_WHOLE_SIZE)
15262  {
15263  memRange.size = allocationSize - memRange.offset;
15264  }
15265  else
15266  {
15267  VMA_ASSERT(offset + size <= allocationSize);
15268  memRange.size = VMA_MIN(
15269  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
15270  allocationSize - memRange.offset);
15271  }
15272  break;
15273 
15274  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15275  {
15276  // 1. Still within this allocation.
15277  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15278  if(size == VK_WHOLE_SIZE)
15279  {
15280  size = allocationSize - offset;
15281  }
15282  else
15283  {
15284  VMA_ASSERT(offset + size <= allocationSize);
15285  }
15286  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
15287 
15288  // 2. Adjust to whole block.
15289  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
15290  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15291  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
15292  memRange.offset += allocationOffset;
15293  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
15294 
15295  break;
15296  }
15297 
15298  default:
15299  VMA_ASSERT(0);
15300  }
15301 
15302  switch(op)
15303  {
15304  case VMA_CACHE_FLUSH:
15305  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15306  break;
15307  case VMA_CACHE_INVALIDATE:
15308  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15309  break;
15310  default:
15311  VMA_ASSERT(0);
15312  }
15313  }
15314  // else: Just ignore this call.
15315 }
15316 
15317 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
15318 {
15319  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15320 
15321  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15322  {
15323  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15324  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15325  VMA_ASSERT(pDedicatedAllocations);
15326  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
15327  VMA_ASSERT(success);
15328  }
15329 
15330  VkDeviceMemory hMemory = allocation->GetMemory();
15331 
15332  /*
15333  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15334  before vkFreeMemory.
15335 
15336  if(allocation->GetMappedData() != VMA_NULL)
15337  {
15338  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15339  }
15340  */
15341 
15342  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15343 
15344  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15345 }
15346 
15347 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15348 {
15349  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15350  !hAllocation->CanBecomeLost() &&
15351  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15352  {
15353  void* pData = VMA_NULL;
15354  VkResult res = Map(hAllocation, &pData);
15355  if(res == VK_SUCCESS)
15356  {
15357  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15358  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15359  Unmap(hAllocation);
15360  }
15361  else
15362  {
15363  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15364  }
15365  }
15366 }
15367 
15368 #if VMA_STATS_STRING_ENABLED
15369 
15370 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15371 {
15372  bool dedicatedAllocationsStarted = false;
15373  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15374  {
15375  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15376  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15377  VMA_ASSERT(pDedicatedAllocVector);
15378  if(pDedicatedAllocVector->empty() == false)
15379  {
15380  if(dedicatedAllocationsStarted == false)
15381  {
15382  dedicatedAllocationsStarted = true;
15383  json.WriteString("DedicatedAllocations");
15384  json.BeginObject();
15385  }
15386 
15387  json.BeginString("Type ");
15388  json.ContinueString(memTypeIndex);
15389  json.EndString();
15390 
15391  json.BeginArray();
15392 
15393  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15394  {
15395  json.BeginObject(true);
15396  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
15397  hAlloc->PrintParameters(json);
15398  json.EndObject();
15399  }
15400 
15401  json.EndArray();
15402  }
15403  }
15404  if(dedicatedAllocationsStarted)
15405  {
15406  json.EndObject();
15407  }
15408 
15409  {
15410  bool allocationsStarted = false;
15411  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15412  {
15413  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
15414  {
15415  if(allocationsStarted == false)
15416  {
15417  allocationsStarted = true;
15418  json.WriteString("DefaultPools");
15419  json.BeginObject();
15420  }
15421 
15422  json.BeginString("Type ");
15423  json.ContinueString(memTypeIndex);
15424  json.EndString();
15425 
15426  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15427  }
15428  }
15429  if(allocationsStarted)
15430  {
15431  json.EndObject();
15432  }
15433  }
15434 
15435  // Custom pools
15436  {
15437  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15438  const size_t poolCount = m_Pools.size();
15439  if(poolCount > 0)
15440  {
15441  json.WriteString("Pools");
15442  json.BeginObject();
15443  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15444  {
15445  json.BeginString();
15446  json.ContinueString(m_Pools[poolIndex]->GetId());
15447  json.EndString();
15448 
15449  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15450  }
15451  json.EndObject();
15452  }
15453  }
15454 }
15455 
15456 #endif // #if VMA_STATS_STRING_ENABLED
15457 
15459 // Public interface
15460 
15461 VkResult vmaCreateAllocator(
15462  const VmaAllocatorCreateInfo* pCreateInfo,
15463  VmaAllocator* pAllocator)
15464 {
15465  VMA_ASSERT(pCreateInfo && pAllocator);
15466  VMA_DEBUG_LOG("vmaCreateAllocator");
15467  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
15468  return (*pAllocator)->Init(pCreateInfo);
15469 }
15470 
15471 void vmaDestroyAllocator(
15472  VmaAllocator allocator)
15473 {
15474  if(allocator != VK_NULL_HANDLE)
15475  {
15476  VMA_DEBUG_LOG("vmaDestroyAllocator");
15477  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15478  vma_delete(&allocationCallbacks, allocator);
15479  }
15480 }
15481 
15483  VmaAllocator allocator,
15484  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15485 {
15486  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15487  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15488 }
15489 
15491  VmaAllocator allocator,
15492  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15493 {
15494  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15495  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15496 }
15497 
15499  VmaAllocator allocator,
15500  uint32_t memoryTypeIndex,
15501  VkMemoryPropertyFlags* pFlags)
15502 {
15503  VMA_ASSERT(allocator && pFlags);
15504  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15505  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15506 }
15507 
15509  VmaAllocator allocator,
15510  uint32_t frameIndex)
15511 {
15512  VMA_ASSERT(allocator);
15513  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15514 
15515  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15516 
15517  allocator->SetCurrentFrameIndex(frameIndex);
15518 }
15519 
15520 void vmaCalculateStats(
15521  VmaAllocator allocator,
15522  VmaStats* pStats)
15523 {
15524  VMA_ASSERT(allocator && pStats);
15525  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15526  allocator->CalculateStats(pStats);
15527 }
15528 
15529 #if VMA_STATS_STRING_ENABLED
15530 
15531 void vmaBuildStatsString(
15532  VmaAllocator allocator,
15533  char** ppStatsString,
15534  VkBool32 detailedMap)
15535 {
15536  VMA_ASSERT(allocator && ppStatsString);
15537  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15538 
15539  VmaStringBuilder sb(allocator);
15540  {
15541  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15542  json.BeginObject();
15543 
15544  VmaStats stats;
15545  allocator->CalculateStats(&stats);
15546 
15547  json.WriteString("Total");
15548  VmaPrintStatInfo(json, stats.total);
15549 
15550  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15551  {
15552  json.BeginString("Heap ");
15553  json.ContinueString(heapIndex);
15554  json.EndString();
15555  json.BeginObject();
15556 
15557  json.WriteString("Size");
15558  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15559 
15560  json.WriteString("Flags");
15561  json.BeginArray(true);
15562  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15563  {
15564  json.WriteString("DEVICE_LOCAL");
15565  }
15566  json.EndArray();
15567 
15568  if(stats.memoryHeap[heapIndex].blockCount > 0)
15569  {
15570  json.WriteString("Stats");
15571  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
15572  }
15573 
15574  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15575  {
15576  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15577  {
15578  json.BeginString("Type ");
15579  json.ContinueString(typeIndex);
15580  json.EndString();
15581 
15582  json.BeginObject();
15583 
15584  json.WriteString("Flags");
15585  json.BeginArray(true);
15586  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15587  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15588  {
15589  json.WriteString("DEVICE_LOCAL");
15590  }
15591  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15592  {
15593  json.WriteString("HOST_VISIBLE");
15594  }
15595  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15596  {
15597  json.WriteString("HOST_COHERENT");
15598  }
15599  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15600  {
15601  json.WriteString("HOST_CACHED");
15602  }
15603  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15604  {
15605  json.WriteString("LAZILY_ALLOCATED");
15606  }
15607  json.EndArray();
15608 
15609  if(stats.memoryType[typeIndex].blockCount > 0)
15610  {
15611  json.WriteString("Stats");
15612  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
15613  }
15614 
15615  json.EndObject();
15616  }
15617  }
15618 
15619  json.EndObject();
15620  }
15621  if(detailedMap == VK_TRUE)
15622  {
15623  allocator->PrintDetailedMap(json);
15624  }
15625 
15626  json.EndObject();
15627  }
15628 
15629  const size_t len = sb.GetLength();
15630  char* const pChars = vma_new_array(allocator, char, len + 1);
15631  if(len > 0)
15632  {
15633  memcpy(pChars, sb.GetData(), len);
15634  }
15635  pChars[len] = '\0';
15636  *ppStatsString = pChars;
15637 }
15638 
15639 void vmaFreeStatsString(
15640  VmaAllocator allocator,
15641  char* pStatsString)
15642 {
15643  if(pStatsString != VMA_NULL)
15644  {
15645  VMA_ASSERT(allocator);
15646  size_t len = strlen(pStatsString);
15647  vma_delete_array(allocator, pStatsString, len + 1);
15648  }
15649 }
15650 
15651 #endif // #if VMA_STATS_STRING_ENABLED
15652 
15653 /*
15654 This function is not protected by any mutex because it just reads immutable data.
15655 */
15656 VkResult vmaFindMemoryTypeIndex(
15657  VmaAllocator allocator,
15658  uint32_t memoryTypeBits,
15659  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15660  uint32_t* pMemoryTypeIndex)
15661 {
15662  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15663  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15664  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15665 
15666  if(pAllocationCreateInfo->memoryTypeBits != 0)
15667  {
15668  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
15669  }
15670 
15671  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
15672  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
15673 
15674  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
15675  if(mapped)
15676  {
15677  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15678  }
15679 
15680  // Convert usage to requiredFlags and preferredFlags.
15681  switch(pAllocationCreateInfo->usage)
15682  {
15684  break;
15686  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15687  {
15688  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15689  }
15690  break;
15692  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15693  break;
15695  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15696  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15697  {
15698  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15699  }
15700  break;
15702  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15703  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
15704  break;
15705  default:
15706  break;
15707  }
15708 
15709  *pMemoryTypeIndex = UINT32_MAX;
15710  uint32_t minCost = UINT32_MAX;
15711  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
15712  memTypeIndex < allocator->GetMemoryTypeCount();
15713  ++memTypeIndex, memTypeBit <<= 1)
15714  {
15715  // This memory type is acceptable according to memoryTypeBits bitmask.
15716  if((memTypeBit & memoryTypeBits) != 0)
15717  {
15718  const VkMemoryPropertyFlags currFlags =
15719  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
15720  // This memory type contains requiredFlags.
15721  if((requiredFlags & ~currFlags) == 0)
15722  {
15723  // Calculate cost as number of bits from preferredFlags not present in this memory type.
15724  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
15725  // Remember memory type with lowest cost.
15726  if(currCost < minCost)
15727  {
15728  *pMemoryTypeIndex = memTypeIndex;
15729  if(currCost == 0)
15730  {
15731  return VK_SUCCESS;
15732  }
15733  minCost = currCost;
15734  }
15735  }
15736  }
15737  }
15738  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
15739 }
15740 
15742  VmaAllocator allocator,
15743  const VkBufferCreateInfo* pBufferCreateInfo,
15744  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15745  uint32_t* pMemoryTypeIndex)
15746 {
15747  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15748  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
15749  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15750  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15751 
15752  const VkDevice hDev = allocator->m_hDevice;
15753  VkBuffer hBuffer = VK_NULL_HANDLE;
15754  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
15755  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
15756  if(res == VK_SUCCESS)
15757  {
15758  VkMemoryRequirements memReq = {};
15759  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
15760  hDev, hBuffer, &memReq);
15761 
15762  res = vmaFindMemoryTypeIndex(
15763  allocator,
15764  memReq.memoryTypeBits,
15765  pAllocationCreateInfo,
15766  pMemoryTypeIndex);
15767 
15768  allocator->GetVulkanFunctions().vkDestroyBuffer(
15769  hDev, hBuffer, allocator->GetAllocationCallbacks());
15770  }
15771  return res;
15772 }
15773 
15775  VmaAllocator allocator,
15776  const VkImageCreateInfo* pImageCreateInfo,
15777  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15778  uint32_t* pMemoryTypeIndex)
15779 {
15780  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15781  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
15782  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15783  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15784 
15785  const VkDevice hDev = allocator->m_hDevice;
15786  VkImage hImage = VK_NULL_HANDLE;
15787  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
15788  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
15789  if(res == VK_SUCCESS)
15790  {
15791  VkMemoryRequirements memReq = {};
15792  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
15793  hDev, hImage, &memReq);
15794 
15795  res = vmaFindMemoryTypeIndex(
15796  allocator,
15797  memReq.memoryTypeBits,
15798  pAllocationCreateInfo,
15799  pMemoryTypeIndex);
15800 
15801  allocator->GetVulkanFunctions().vkDestroyImage(
15802  hDev, hImage, allocator->GetAllocationCallbacks());
15803  }
15804  return res;
15805 }
15806 
15807 VkResult vmaCreatePool(
15808  VmaAllocator allocator,
15809  const VmaPoolCreateInfo* pCreateInfo,
15810  VmaPool* pPool)
15811 {
15812  VMA_ASSERT(allocator && pCreateInfo && pPool);
15813 
15814  VMA_DEBUG_LOG("vmaCreatePool");
15815 
15816  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15817 
15818  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
15819 
15820 #if VMA_RECORDING_ENABLED
15821  if(allocator->GetRecorder() != VMA_NULL)
15822  {
15823  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
15824  }
15825 #endif
15826 
15827  return res;
15828 }
15829 
15830 void vmaDestroyPool(
15831  VmaAllocator allocator,
15832  VmaPool pool)
15833 {
15834  VMA_ASSERT(allocator);
15835 
15836  if(pool == VK_NULL_HANDLE)
15837  {
15838  return;
15839  }
15840 
15841  VMA_DEBUG_LOG("vmaDestroyPool");
15842 
15843  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15844 
15845 #if VMA_RECORDING_ENABLED
15846  if(allocator->GetRecorder() != VMA_NULL)
15847  {
15848  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
15849  }
15850 #endif
15851 
15852  allocator->DestroyPool(pool);
15853 }
15854 
15855 void vmaGetPoolStats(
15856  VmaAllocator allocator,
15857  VmaPool pool,
15858  VmaPoolStats* pPoolStats)
15859 {
15860  VMA_ASSERT(allocator && pool && pPoolStats);
15861 
15862  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15863 
15864  allocator->GetPoolStats(pool, pPoolStats);
15865 }
15866 
15868  VmaAllocator allocator,
15869  VmaPool pool,
15870  size_t* pLostAllocationCount)
15871 {
15872  VMA_ASSERT(allocator && pool);
15873 
15874  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15875 
15876 #if VMA_RECORDING_ENABLED
15877  if(allocator->GetRecorder() != VMA_NULL)
15878  {
15879  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
15880  }
15881 #endif
15882 
15883  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
15884 }
15885 
15886 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
15887 {
15888  VMA_ASSERT(allocator && pool);
15889 
15890  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15891 
15892  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
15893 
15894  return allocator->CheckPoolCorruption(pool);
15895 }
15896 
15897 VkResult vmaAllocateMemory(
15898  VmaAllocator allocator,
15899  const VkMemoryRequirements* pVkMemoryRequirements,
15900  const VmaAllocationCreateInfo* pCreateInfo,
15901  VmaAllocation* pAllocation,
15902  VmaAllocationInfo* pAllocationInfo)
15903 {
15904  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
15905 
15906  VMA_DEBUG_LOG("vmaAllocateMemory");
15907 
15908  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15909 
15910  VkResult result = allocator->AllocateMemory(
15911  *pVkMemoryRequirements,
15912  false, // requiresDedicatedAllocation
15913  false, // prefersDedicatedAllocation
15914  VK_NULL_HANDLE, // dedicatedBuffer
15915  VK_NULL_HANDLE, // dedicatedImage
15916  *pCreateInfo,
15917  VMA_SUBALLOCATION_TYPE_UNKNOWN,
15918  1, // allocationCount
15919  pAllocation);
15920 
15921 #if VMA_RECORDING_ENABLED
15922  if(allocator->GetRecorder() != VMA_NULL)
15923  {
15924  allocator->GetRecorder()->RecordAllocateMemory(
15925  allocator->GetCurrentFrameIndex(),
15926  *pVkMemoryRequirements,
15927  *pCreateInfo,
15928  *pAllocation);
15929  }
15930 #endif
15931 
15932  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
15933  {
15934  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
15935  }
15936 
15937  return result;
15938 }
15939 
15940 VkResult vmaAllocateMemoryPages(
15941  VmaAllocator allocator,
15942  const VkMemoryRequirements* pVkMemoryRequirements,
15943  const VmaAllocationCreateInfo* pCreateInfo,
15944  size_t allocationCount,
15945  VmaAllocation* pAllocations,
15946  VmaAllocationInfo* pAllocationInfo)
15947 {
15948  if(allocationCount == 0)
15949  {
15950  return VK_SUCCESS;
15951  }
15952 
15953  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
15954 
15955  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
15956 
15957  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15958 
15959  VkResult result = allocator->AllocateMemory(
15960  *pVkMemoryRequirements,
15961  false, // requiresDedicatedAllocation
15962  false, // prefersDedicatedAllocation
15963  VK_NULL_HANDLE, // dedicatedBuffer
15964  VK_NULL_HANDLE, // dedicatedImage
15965  *pCreateInfo,
15966  VMA_SUBALLOCATION_TYPE_UNKNOWN,
15967  allocationCount,
15968  pAllocations);
15969 
15970 #if VMA_RECORDING_ENABLED
15971  if(allocator->GetRecorder() != VMA_NULL)
15972  {
15973  allocator->GetRecorder()->RecordAllocateMemoryPages(
15974  allocator->GetCurrentFrameIndex(),
15975  *pVkMemoryRequirements,
15976  *pCreateInfo,
15977  (uint64_t)allocationCount,
15978  pAllocations);
15979  }
15980 #endif
15981 
15982  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
15983  {
15984  for(size_t i = 0; i < allocationCount; ++i)
15985  {
15986  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
15987  }
15988  }
15989 
15990  return result;
15991 }
15992 
15994  VmaAllocator allocator,
15995  VkBuffer buffer,
15996  const VmaAllocationCreateInfo* pCreateInfo,
15997  VmaAllocation* pAllocation,
15998  VmaAllocationInfo* pAllocationInfo)
15999 {
16000  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16001 
16002  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16003 
16004  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16005 
16006  VkMemoryRequirements vkMemReq = {};
16007  bool requiresDedicatedAllocation = false;
16008  bool prefersDedicatedAllocation = false;
16009  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16010  requiresDedicatedAllocation,
16011  prefersDedicatedAllocation);
16012 
16013  VkResult result = allocator->AllocateMemory(
16014  vkMemReq,
16015  requiresDedicatedAllocation,
16016  prefersDedicatedAllocation,
16017  buffer, // dedicatedBuffer
16018  VK_NULL_HANDLE, // dedicatedImage
16019  *pCreateInfo,
16020  VMA_SUBALLOCATION_TYPE_BUFFER,
16021  1, // allocationCount
16022  pAllocation);
16023 
16024 #if VMA_RECORDING_ENABLED
16025  if(allocator->GetRecorder() != VMA_NULL)
16026  {
16027  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16028  allocator->GetCurrentFrameIndex(),
16029  vkMemReq,
16030  requiresDedicatedAllocation,
16031  prefersDedicatedAllocation,
16032  *pCreateInfo,
16033  *pAllocation);
16034  }
16035 #endif
16036 
16037  if(pAllocationInfo && result == VK_SUCCESS)
16038  {
16039  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16040  }
16041 
16042  return result;
16043 }
16044 
16045 VkResult vmaAllocateMemoryForImage(
16046  VmaAllocator allocator,
16047  VkImage image,
16048  const VmaAllocationCreateInfo* pCreateInfo,
16049  VmaAllocation* pAllocation,
16050  VmaAllocationInfo* pAllocationInfo)
16051 {
16052  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16053 
16054  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
16055 
16056  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16057 
16058  VkMemoryRequirements vkMemReq = {};
16059  bool requiresDedicatedAllocation = false;
16060  bool prefersDedicatedAllocation = false;
16061  allocator->GetImageMemoryRequirements(image, vkMemReq,
16062  requiresDedicatedAllocation, prefersDedicatedAllocation);
16063 
16064  VkResult result = allocator->AllocateMemory(
16065  vkMemReq,
16066  requiresDedicatedAllocation,
16067  prefersDedicatedAllocation,
16068  VK_NULL_HANDLE, // dedicatedBuffer
16069  image, // dedicatedImage
16070  *pCreateInfo,
16071  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16072  1, // allocationCount
16073  pAllocation);
16074 
16075 #if VMA_RECORDING_ENABLED
16076  if(allocator->GetRecorder() != VMA_NULL)
16077  {
16078  allocator->GetRecorder()->RecordAllocateMemoryForImage(
16079  allocator->GetCurrentFrameIndex(),
16080  vkMemReq,
16081  requiresDedicatedAllocation,
16082  prefersDedicatedAllocation,
16083  *pCreateInfo,
16084  *pAllocation);
16085  }
16086 #endif
16087 
16088  if(pAllocationInfo && result == VK_SUCCESS)
16089  {
16090  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16091  }
16092 
16093  return result;
16094 }
16095 
16096 void vmaFreeMemory(
16097  VmaAllocator allocator,
16098  VmaAllocation allocation)
16099 {
16100  VMA_ASSERT(allocator);
16101 
16102  if(allocation == VK_NULL_HANDLE)
16103  {
16104  return;
16105  }
16106 
16107  VMA_DEBUG_LOG("vmaFreeMemory");
16108 
16109  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16110 
16111 #if VMA_RECORDING_ENABLED
16112  if(allocator->GetRecorder() != VMA_NULL)
16113  {
16114  allocator->GetRecorder()->RecordFreeMemory(
16115  allocator->GetCurrentFrameIndex(),
16116  allocation);
16117  }
16118 #endif
16119 
16120  allocator->FreeMemory(
16121  1, // allocationCount
16122  &allocation);
16123 }
16124 
16125 void vmaFreeMemoryPages(
16126  VmaAllocator allocator,
16127  size_t allocationCount,
16128  VmaAllocation* pAllocations)
16129 {
16130  if(allocationCount == 0)
16131  {
16132  return;
16133  }
16134 
16135  VMA_ASSERT(allocator);
16136 
16137  VMA_DEBUG_LOG("vmaFreeMemoryPages");
16138 
16139  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16140 
16141 #if VMA_RECORDING_ENABLED
16142  if(allocator->GetRecorder() != VMA_NULL)
16143  {
16144  allocator->GetRecorder()->RecordFreeMemoryPages(
16145  allocator->GetCurrentFrameIndex(),
16146  (uint64_t)allocationCount,
16147  pAllocations);
16148  }
16149 #endif
16150 
16151  allocator->FreeMemory(allocationCount, pAllocations);
16152 }
16153 
16154 VkResult vmaResizeAllocation(
16155  VmaAllocator allocator,
16156  VmaAllocation allocation,
16157  VkDeviceSize newSize)
16158 {
16159  VMA_ASSERT(allocator && allocation);
16160 
16161  VMA_DEBUG_LOG("vmaResizeAllocation");
16162 
16163  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16164 
16165 #if VMA_RECORDING_ENABLED
16166  if(allocator->GetRecorder() != VMA_NULL)
16167  {
16168  allocator->GetRecorder()->RecordResizeAllocation(
16169  allocator->GetCurrentFrameIndex(),
16170  allocation,
16171  newSize);
16172  }
16173 #endif
16174 
16175  return allocator->ResizeAllocation(allocation, newSize);
16176 }
16177 
16179  VmaAllocator allocator,
16180  VmaAllocation allocation,
16181  VmaAllocationInfo* pAllocationInfo)
16182 {
16183  VMA_ASSERT(allocator && allocation && pAllocationInfo);
16184 
16185  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16186 
16187 #if VMA_RECORDING_ENABLED
16188  if(allocator->GetRecorder() != VMA_NULL)
16189  {
16190  allocator->GetRecorder()->RecordGetAllocationInfo(
16191  allocator->GetCurrentFrameIndex(),
16192  allocation);
16193  }
16194 #endif
16195 
16196  allocator->GetAllocationInfo(allocation, pAllocationInfo);
16197 }
16198 
16199 VkBool32 vmaTouchAllocation(
16200  VmaAllocator allocator,
16201  VmaAllocation allocation)
16202 {
16203  VMA_ASSERT(allocator && allocation);
16204 
16205  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16206 
16207 #if VMA_RECORDING_ENABLED
16208  if(allocator->GetRecorder() != VMA_NULL)
16209  {
16210  allocator->GetRecorder()->RecordTouchAllocation(
16211  allocator->GetCurrentFrameIndex(),
16212  allocation);
16213  }
16214 #endif
16215 
16216  return allocator->TouchAllocation(allocation);
16217 }
16218 
16220  VmaAllocator allocator,
16221  VmaAllocation allocation,
16222  void* pUserData)
16223 {
16224  VMA_ASSERT(allocator && allocation);
16225 
16226  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16227 
16228  allocation->SetUserData(allocator, pUserData);
16229 
16230 #if VMA_RECORDING_ENABLED
16231  if(allocator->GetRecorder() != VMA_NULL)
16232  {
16233  allocator->GetRecorder()->RecordSetAllocationUserData(
16234  allocator->GetCurrentFrameIndex(),
16235  allocation,
16236  pUserData);
16237  }
16238 #endif
16239 }
16240 
16242  VmaAllocator allocator,
16243  VmaAllocation* pAllocation)
16244 {
16245  VMA_ASSERT(allocator && pAllocation);
16246 
16247  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
16248 
16249  allocator->CreateLostAllocation(pAllocation);
16250 
16251 #if VMA_RECORDING_ENABLED
16252  if(allocator->GetRecorder() != VMA_NULL)
16253  {
16254  allocator->GetRecorder()->RecordCreateLostAllocation(
16255  allocator->GetCurrentFrameIndex(),
16256  *pAllocation);
16257  }
16258 #endif
16259 }
16260 
16261 VkResult vmaMapMemory(
16262  VmaAllocator allocator,
16263  VmaAllocation allocation,
16264  void** ppData)
16265 {
16266  VMA_ASSERT(allocator && allocation && ppData);
16267 
16268  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16269 
16270  VkResult res = allocator->Map(allocation, ppData);
16271 
16272 #if VMA_RECORDING_ENABLED
16273  if(allocator->GetRecorder() != VMA_NULL)
16274  {
16275  allocator->GetRecorder()->RecordMapMemory(
16276  allocator->GetCurrentFrameIndex(),
16277  allocation);
16278  }
16279 #endif
16280 
16281  return res;
16282 }
16283 
16284 void vmaUnmapMemory(
16285  VmaAllocator allocator,
16286  VmaAllocation allocation)
16287 {
16288  VMA_ASSERT(allocator && allocation);
16289 
16290  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16291 
16292 #if VMA_RECORDING_ENABLED
16293  if(allocator->GetRecorder() != VMA_NULL)
16294  {
16295  allocator->GetRecorder()->RecordUnmapMemory(
16296  allocator->GetCurrentFrameIndex(),
16297  allocation);
16298  }
16299 #endif
16300 
16301  allocator->Unmap(allocation);
16302 }
16303 
16304 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16305 {
16306  VMA_ASSERT(allocator && allocation);
16307 
16308  VMA_DEBUG_LOG("vmaFlushAllocation");
16309 
16310  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16311 
16312  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
16313 
16314 #if VMA_RECORDING_ENABLED
16315  if(allocator->GetRecorder() != VMA_NULL)
16316  {
16317  allocator->GetRecorder()->RecordFlushAllocation(
16318  allocator->GetCurrentFrameIndex(),
16319  allocation, offset, size);
16320  }
16321 #endif
16322 }
16323 
16324 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16325 {
16326  VMA_ASSERT(allocator && allocation);
16327 
16328  VMA_DEBUG_LOG("vmaInvalidateAllocation");
16329 
16330  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16331 
16332  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
16333 
16334 #if VMA_RECORDING_ENABLED
16335  if(allocator->GetRecorder() != VMA_NULL)
16336  {
16337  allocator->GetRecorder()->RecordInvalidateAllocation(
16338  allocator->GetCurrentFrameIndex(),
16339  allocation, offset, size);
16340  }
16341 #endif
16342 }
16343 
16344 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
16345 {
16346  VMA_ASSERT(allocator);
16347 
16348  VMA_DEBUG_LOG("vmaCheckCorruption");
16349 
16350  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16351 
16352  return allocator->CheckCorruption(memoryTypeBits);
16353 }
16354 
16355 VkResult vmaDefragment(
16356  VmaAllocator allocator,
16357  VmaAllocation* pAllocations,
16358  size_t allocationCount,
16359  VkBool32* pAllocationsChanged,
16360  const VmaDefragmentationInfo *pDefragmentationInfo,
16361  VmaDefragmentationStats* pDefragmentationStats)
16362 {
16363  // Deprecated interface, reimplemented using new one.
16364 
16365  VmaDefragmentationInfo2 info2 = {};
16366  info2.allocationCount = (uint32_t)allocationCount;
16367  info2.pAllocations = pAllocations;
16368  info2.pAllocationsChanged = pAllocationsChanged;
16369  if(pDefragmentationInfo != VMA_NULL)
16370  {
16371  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
16372  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
16373  }
16374  else
16375  {
16376  info2.maxCpuAllocationsToMove = UINT32_MAX;
16377  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
16378  }
16379  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
16380 
16382  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
16383  if(res == VK_NOT_READY)
16384  {
16385  res = vmaDefragmentationEnd( allocator, ctx);
16386  }
16387  return res;
16388 }
16389 
16390 VkResult vmaDefragmentationBegin(
16391  VmaAllocator allocator,
16392  const VmaDefragmentationInfo2* pInfo,
16393  VmaDefragmentationStats* pStats,
16394  VmaDefragmentationContext *pContext)
16395 {
16396  VMA_ASSERT(allocator && pInfo && pContext);
16397 
16398  // Degenerate case: Nothing to defragment.
16399  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
16400  {
16401  return VK_SUCCESS;
16402  }
16403 
16404  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
16405  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
16406  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
16407  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
16408 
16409  VMA_DEBUG_LOG("vmaDefragmentationBegin");
16410 
16411  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16412 
16413  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
16414 
16415 #if VMA_RECORDING_ENABLED
16416  if(allocator->GetRecorder() != VMA_NULL)
16417  {
16418  allocator->GetRecorder()->RecordDefragmentationBegin(
16419  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
16420  }
16421 #endif
16422 
16423  return res;
16424 }
16425 
16426 VkResult vmaDefragmentationEnd(
16427  VmaAllocator allocator,
16428  VmaDefragmentationContext context)
16429 {
16430  VMA_ASSERT(allocator);
16431 
16432  VMA_DEBUG_LOG("vmaDefragmentationEnd");
16433 
16434  if(context != VK_NULL_HANDLE)
16435  {
16436  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16437 
16438 #if VMA_RECORDING_ENABLED
16439  if(allocator->GetRecorder() != VMA_NULL)
16440  {
16441  allocator->GetRecorder()->RecordDefragmentationEnd(
16442  allocator->GetCurrentFrameIndex(), context);
16443  }
16444 #endif
16445 
16446  return allocator->DefragmentationEnd(context);
16447  }
16448  else
16449  {
16450  return VK_SUCCESS;
16451  }
16452 }
16453 
16454 VkResult vmaBindBufferMemory(
16455  VmaAllocator allocator,
16456  VmaAllocation allocation,
16457  VkBuffer buffer)
16458 {
16459  VMA_ASSERT(allocator && allocation && buffer);
16460 
16461  VMA_DEBUG_LOG("vmaBindBufferMemory");
16462 
16463  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16464 
16465  return allocator->BindBufferMemory(allocation, buffer);
16466 }
16467 
16468 VkResult vmaBindImageMemory(
16469  VmaAllocator allocator,
16470  VmaAllocation allocation,
16471  VkImage image)
16472 {
16473  VMA_ASSERT(allocator && allocation && image);
16474 
16475  VMA_DEBUG_LOG("vmaBindImageMemory");
16476 
16477  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16478 
16479  return allocator->BindImageMemory(allocation, image);
16480 }
16481 
16482 VkResult vmaCreateBuffer(
16483  VmaAllocator allocator,
16484  const VkBufferCreateInfo* pBufferCreateInfo,
16485  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16486  VkBuffer* pBuffer,
16487  VmaAllocation* pAllocation,
16488  VmaAllocationInfo* pAllocationInfo)
16489 {
16490  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16491 
16492  if(pBufferCreateInfo->size == 0)
16493  {
16494  return VK_ERROR_VALIDATION_FAILED_EXT;
16495  }
16496 
16497  VMA_DEBUG_LOG("vmaCreateBuffer");
16498 
16499  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16500 
16501  *pBuffer = VK_NULL_HANDLE;
16502  *pAllocation = VK_NULL_HANDLE;
16503 
16504  // 1. Create VkBuffer.
16505  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16506  allocator->m_hDevice,
16507  pBufferCreateInfo,
16508  allocator->GetAllocationCallbacks(),
16509  pBuffer);
16510  if(res >= 0)
16511  {
16512  // 2. vkGetBufferMemoryRequirements.
16513  VkMemoryRequirements vkMemReq = {};
16514  bool requiresDedicatedAllocation = false;
16515  bool prefersDedicatedAllocation = false;
16516  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16517  requiresDedicatedAllocation, prefersDedicatedAllocation);
16518 
16519  // Make sure alignment requirements for specific buffer usages reported
16520  // in Physical Device Properties are included in alignment reported by memory requirements.
16521  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16522  {
16523  VMA_ASSERT(vkMemReq.alignment %
16524  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16525  }
16526  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16527  {
16528  VMA_ASSERT(vkMemReq.alignment %
16529  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16530  }
16531  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16532  {
16533  VMA_ASSERT(vkMemReq.alignment %
16534  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16535  }
16536 
16537  // 3. Allocate memory using allocator.
16538  res = allocator->AllocateMemory(
16539  vkMemReq,
16540  requiresDedicatedAllocation,
16541  prefersDedicatedAllocation,
16542  *pBuffer, // dedicatedBuffer
16543  VK_NULL_HANDLE, // dedicatedImage
16544  *pAllocationCreateInfo,
16545  VMA_SUBALLOCATION_TYPE_BUFFER,
16546  1, // allocationCount
16547  pAllocation);
16548 
16549 #if VMA_RECORDING_ENABLED
16550  if(allocator->GetRecorder() != VMA_NULL)
16551  {
16552  allocator->GetRecorder()->RecordCreateBuffer(
16553  allocator->GetCurrentFrameIndex(),
16554  *pBufferCreateInfo,
16555  *pAllocationCreateInfo,
16556  *pAllocation);
16557  }
16558 #endif
16559 
16560  if(res >= 0)
16561  {
16562  // 3. Bind buffer with memory.
16563  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
16564  if(res >= 0)
16565  {
16566  // All steps succeeded.
16567  #if VMA_STATS_STRING_ENABLED
16568  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16569  #endif
16570  if(pAllocationInfo != VMA_NULL)
16571  {
16572  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16573  }
16574 
16575  return VK_SUCCESS;
16576  }
16577  allocator->FreeMemory(
16578  1, // allocationCount
16579  pAllocation);
16580  *pAllocation = VK_NULL_HANDLE;
16581  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16582  *pBuffer = VK_NULL_HANDLE;
16583  return res;
16584  }
16585  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16586  *pBuffer = VK_NULL_HANDLE;
16587  return res;
16588  }
16589  return res;
16590 }
16591 
16592 void vmaDestroyBuffer(
16593  VmaAllocator allocator,
16594  VkBuffer buffer,
16595  VmaAllocation allocation)
16596 {
16597  VMA_ASSERT(allocator);
16598 
16599  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16600  {
16601  return;
16602  }
16603 
16604  VMA_DEBUG_LOG("vmaDestroyBuffer");
16605 
16606  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16607 
16608 #if VMA_RECORDING_ENABLED
16609  if(allocator->GetRecorder() != VMA_NULL)
16610  {
16611  allocator->GetRecorder()->RecordDestroyBuffer(
16612  allocator->GetCurrentFrameIndex(),
16613  allocation);
16614  }
16615 #endif
16616 
16617  if(buffer != VK_NULL_HANDLE)
16618  {
16619  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16620  }
16621 
16622  if(allocation != VK_NULL_HANDLE)
16623  {
16624  allocator->FreeMemory(
16625  1, // allocationCount
16626  &allocation);
16627  }
16628 }
16629 
16630 VkResult vmaCreateImage(
16631  VmaAllocator allocator,
16632  const VkImageCreateInfo* pImageCreateInfo,
16633  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16634  VkImage* pImage,
16635  VmaAllocation* pAllocation,
16636  VmaAllocationInfo* pAllocationInfo)
16637 {
16638  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16639 
16640  if(pImageCreateInfo->extent.width == 0 ||
16641  pImageCreateInfo->extent.height == 0 ||
16642  pImageCreateInfo->extent.depth == 0 ||
16643  pImageCreateInfo->mipLevels == 0 ||
16644  pImageCreateInfo->arrayLayers == 0)
16645  {
16646  return VK_ERROR_VALIDATION_FAILED_EXT;
16647  }
16648 
16649  VMA_DEBUG_LOG("vmaCreateImage");
16650 
16651  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16652 
16653  *pImage = VK_NULL_HANDLE;
16654  *pAllocation = VK_NULL_HANDLE;
16655 
16656  // 1. Create VkImage.
16657  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16658  allocator->m_hDevice,
16659  pImageCreateInfo,
16660  allocator->GetAllocationCallbacks(),
16661  pImage);
16662  if(res >= 0)
16663  {
16664  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16665  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16666  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16667 
16668  // 2. Allocate memory using allocator.
16669  VkMemoryRequirements vkMemReq = {};
16670  bool requiresDedicatedAllocation = false;
16671  bool prefersDedicatedAllocation = false;
16672  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16673  requiresDedicatedAllocation, prefersDedicatedAllocation);
16674 
16675  res = allocator->AllocateMemory(
16676  vkMemReq,
16677  requiresDedicatedAllocation,
16678  prefersDedicatedAllocation,
16679  VK_NULL_HANDLE, // dedicatedBuffer
16680  *pImage, // dedicatedImage
16681  *pAllocationCreateInfo,
16682  suballocType,
16683  1, // allocationCount
16684  pAllocation);
16685 
16686 #if VMA_RECORDING_ENABLED
16687  if(allocator->GetRecorder() != VMA_NULL)
16688  {
16689  allocator->GetRecorder()->RecordCreateImage(
16690  allocator->GetCurrentFrameIndex(),
16691  *pImageCreateInfo,
16692  *pAllocationCreateInfo,
16693  *pAllocation);
16694  }
16695 #endif
16696 
16697  if(res >= 0)
16698  {
16699  // 3. Bind image with memory.
16700  res = allocator->BindImageMemory(*pAllocation, *pImage);
16701  if(res >= 0)
16702  {
16703  // All steps succeeded.
16704  #if VMA_STATS_STRING_ENABLED
16705  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
16706  #endif
16707  if(pAllocationInfo != VMA_NULL)
16708  {
16709  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16710  }
16711 
16712  return VK_SUCCESS;
16713  }
16714  allocator->FreeMemory(
16715  1, // allocationCount
16716  pAllocation);
16717  *pAllocation = VK_NULL_HANDLE;
16718  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16719  *pImage = VK_NULL_HANDLE;
16720  return res;
16721  }
16722  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16723  *pImage = VK_NULL_HANDLE;
16724  return res;
16725  }
16726  return res;
16727 }
16728 
16729 void vmaDestroyImage(
16730  VmaAllocator allocator,
16731  VkImage image,
16732  VmaAllocation allocation)
16733 {
16734  VMA_ASSERT(allocator);
16735 
16736  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16737  {
16738  return;
16739  }
16740 
16741  VMA_DEBUG_LOG("vmaDestroyImage");
16742 
16743  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16744 
16745 #if VMA_RECORDING_ENABLED
16746  if(allocator->GetRecorder() != VMA_NULL)
16747  {
16748  allocator->GetRecorder()->RecordDestroyImage(
16749  allocator->GetCurrentFrameIndex(),
16750  allocation);
16751  }
16752 #endif
16753 
16754  if(image != VK_NULL_HANDLE)
16755  {
16756  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
16757  }
16758  if(allocation != VK_NULL_HANDLE)
16759  {
16760  allocator->FreeMemory(
16761  1, // allocationCount
16762  &allocation);
16763  }
16764 }
16765 
16766 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1723
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2026
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1781
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side...
Definition: vk_mem_alloc.h:2823
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1755
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2351
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1735
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1983
Definition: vk_mem_alloc.h:2086
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:2776
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1727
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2451
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1778
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2859
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2240
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1622
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2332
Definition: vk_mem_alloc.h:2063
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:2779
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1716
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2139
Definition: vk_mem_alloc.h:2010
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1790
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2268
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1844
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1775
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2014
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1916
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1732
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:2813
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1915
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2863
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1807
VmaStatInfo total
Definition: vk_mem_alloc.h:1925
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2871
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2123
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2854
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1733
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1658
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1784
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2282
Definition: vk_mem_alloc.h:2276
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:1739
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1851
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2461
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1728
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1753
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2160
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2302
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:2338
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1714
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2285
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2828
VmaMemoryUsage
Definition: vk_mem_alloc.h:1961
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:2788
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2849
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:2867
Definition: vk_mem_alloc.h:2000
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2147
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1731
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1921
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1664
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:2767
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
Definition: vk_mem_alloc.h:2765
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:2794
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1685
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1757
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1690
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2869
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2134
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2348
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1724
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1904
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
Definition: vk_mem_alloc.h:2297
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1677
Definition: vk_mem_alloc.h:2272
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2070
Represents Opaque object that represents started defragmentation process.
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1917
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1681
Definition: vk_mem_alloc.h:2097
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2288
Definition: vk_mem_alloc.h:2009
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1730
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2129
Definition: vk_mem_alloc.h:2120
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1907
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1726
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2310
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1793
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2341
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2118
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2818
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2153
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1832
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1923
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:2050
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1916
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1737
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1763
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use...
Definition: vk_mem_alloc.h:2764
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:2842
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1679
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1736
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2324
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1729
Definition: vk_mem_alloc.h:2081
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1771
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2475
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1787
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1916
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1913
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2329
Parameters for defragmentation.
Definition: vk_mem_alloc.h:2773
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions...
Definition: vk_mem_alloc.h:2090
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:2456
Definition: vk_mem_alloc.h:2104
Definition: vk_mem_alloc.h:2116
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2865
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1722
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1911
Definition: vk_mem_alloc.h:1966
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2278
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1760
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1909
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1734
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1738
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2037
Definition: vk_mem_alloc.h:2111
Definition: vk_mem_alloc.h:1993
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2470
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1712
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1725
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2257
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Tries to resize an allocation in place, if there is enough free memory after it.
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2437
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2101
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2222
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1917
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1747
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1924
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2335
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1917
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side...
Definition: vk_mem_alloc.h:2833
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2442
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:2797