Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1635 /*
1636 Define this macro to 0/1 to disable/enable support for recording functionality,
1637 available through VmaAllocatorCreateInfo::pRecordSettings.
1638 */
1639 #ifndef VMA_RECORDING_ENABLED
1640  #ifdef _WIN32
1641  #define VMA_RECORDING_ENABLED 1
1642  #else
1643  #define VMA_RECORDING_ENABLED 0
1644  #endif
1645 #endif
1646 
1647 #ifndef NOMINMAX
1648  #define NOMINMAX // For windows.h
1649 #endif
1650 
1651 #ifndef VULKAN_H_
1652  #include <vulkan/vulkan.h>
1653 #endif
1654 
1655 #if VMA_RECORDING_ENABLED
1656  #include <windows.h>
1657 #endif
1658 
1659 #if !defined(VMA_DEDICATED_ALLOCATION)
1660  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1661  #define VMA_DEDICATED_ALLOCATION 1
1662  #else
1663  #define VMA_DEDICATED_ALLOCATION 0
1664  #endif
1665 #endif
1666 
1676 VK_DEFINE_HANDLE(VmaAllocator)
1677 
1678 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1680  VmaAllocator allocator,
1681  uint32_t memoryType,
1682  VkDeviceMemory memory,
1683  VkDeviceSize size);
1685 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1686  VmaAllocator allocator,
1687  uint32_t memoryType,
1688  VkDeviceMemory memory,
1689  VkDeviceSize size);
1690 
1704 
1734 
1737 typedef VkFlags VmaAllocatorCreateFlags;
1738 
1743 typedef struct VmaVulkanFunctions {
1744  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1745  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1746  PFN_vkAllocateMemory vkAllocateMemory;
1747  PFN_vkFreeMemory vkFreeMemory;
1748  PFN_vkMapMemory vkMapMemory;
1749  PFN_vkUnmapMemory vkUnmapMemory;
1750  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1751  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1752  PFN_vkBindBufferMemory vkBindBufferMemory;
1753  PFN_vkBindImageMemory vkBindImageMemory;
1754  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1755  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1756  PFN_vkCreateBuffer vkCreateBuffer;
1757  PFN_vkDestroyBuffer vkDestroyBuffer;
1758  PFN_vkCreateImage vkCreateImage;
1759  PFN_vkDestroyImage vkDestroyImage;
1760  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1761 #if VMA_DEDICATED_ALLOCATION
1762  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1763  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1764 #endif
1766 
1768 typedef enum VmaRecordFlagBits {
1775 
1778 typedef VkFlags VmaRecordFlags;
1779 
1781 typedef struct VmaRecordSettings
1782 {
1792  const char* pFilePath;
1794 
1797 {
1801 
1802  VkPhysicalDevice physicalDevice;
1804 
1805  VkDevice device;
1807 
1810 
1811  const VkAllocationCallbacks* pAllocationCallbacks;
1813 
1853  const VkDeviceSize* pHeapSizeLimit;
1874 
1876 VkResult vmaCreateAllocator(
1877  const VmaAllocatorCreateInfo* pCreateInfo,
1878  VmaAllocator* pAllocator);
1879 
1881 void vmaDestroyAllocator(
1882  VmaAllocator allocator);
1883 
1889  VmaAllocator allocator,
1890  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1891 
1897  VmaAllocator allocator,
1898  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1899 
1907  VmaAllocator allocator,
1908  uint32_t memoryTypeIndex,
1909  VkMemoryPropertyFlags* pFlags);
1910 
1920  VmaAllocator allocator,
1921  uint32_t frameIndex);
1922 
1925 typedef struct VmaStatInfo
1926 {
1928  uint32_t blockCount;
1934  VkDeviceSize usedBytes;
1936  VkDeviceSize unusedBytes;
1939 } VmaStatInfo;
1940 
1942 typedef struct VmaStats
1943 {
1944  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1945  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1947 } VmaStats;
1948 
1950 void vmaCalculateStats(
1951  VmaAllocator allocator,
1952  VmaStats* pStats);
1953 
1954 #define VMA_STATS_STRING_ENABLED 1
1955 
1956 #if VMA_STATS_STRING_ENABLED
1957 
1959 
1961 void vmaBuildStatsString(
1962  VmaAllocator allocator,
1963  char** ppStatsString,
1964  VkBool32 detailedMap);
1965 
1966 void vmaFreeStatsString(
1967  VmaAllocator allocator,
1968  char* pStatsString);
1969 
1970 #endif // #if VMA_STATS_STRING_ENABLED
1971 
1980 VK_DEFINE_HANDLE(VmaPool)
1981 
1982 typedef enum VmaMemoryUsage
1983 {
2032 } VmaMemoryUsage;
2033 
2043 
2098 
2114 
2124 
2131 
2135 
2137 {
2150  VkMemoryPropertyFlags requiredFlags;
2155  VkMemoryPropertyFlags preferredFlags;
2163  uint32_t memoryTypeBits;
2176  void* pUserData;
2178 
2195 VkResult vmaFindMemoryTypeIndex(
2196  VmaAllocator allocator,
2197  uint32_t memoryTypeBits,
2198  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2199  uint32_t* pMemoryTypeIndex);
2200 
2214  VmaAllocator allocator,
2215  const VkBufferCreateInfo* pBufferCreateInfo,
2216  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2217  uint32_t* pMemoryTypeIndex);
2218 
2232  VmaAllocator allocator,
2233  const VkImageCreateInfo* pImageCreateInfo,
2234  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2235  uint32_t* pMemoryTypeIndex);
2236 
2257 
2274 
2285 
2291 
2294 typedef VkFlags VmaPoolCreateFlags;
2295 
2298 typedef struct VmaPoolCreateInfo {
2313  VkDeviceSize blockSize;
2342 
2345 typedef struct VmaPoolStats {
2348  VkDeviceSize size;
2351  VkDeviceSize unusedSize;
2364  VkDeviceSize unusedRangeSizeMax;
2367  size_t blockCount;
2368 } VmaPoolStats;
2369 
2376 VkResult vmaCreatePool(
2377  VmaAllocator allocator,
2378  const VmaPoolCreateInfo* pCreateInfo,
2379  VmaPool* pPool);
2380 
2383 void vmaDestroyPool(
2384  VmaAllocator allocator,
2385  VmaPool pool);
2386 
2393 void vmaGetPoolStats(
2394  VmaAllocator allocator,
2395  VmaPool pool,
2396  VmaPoolStats* pPoolStats);
2397 
2405  VmaAllocator allocator,
2406  VmaPool pool,
2407  size_t* pLostAllocationCount);
2408 
2423 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2424 
2449 VK_DEFINE_HANDLE(VmaAllocation)
2450 
2451 
2453 typedef struct VmaAllocationInfo {
2458  uint32_t memoryType;
2467  VkDeviceMemory deviceMemory;
2472  VkDeviceSize offset;
2477  VkDeviceSize size;
2491  void* pUserData;
2493 
2504 VkResult vmaAllocateMemory(
2505  VmaAllocator allocator,
2506  const VkMemoryRequirements* pVkMemoryRequirements,
2507  const VmaAllocationCreateInfo* pCreateInfo,
2508  VmaAllocation* pAllocation,
2509  VmaAllocationInfo* pAllocationInfo);
2510 
2530 VkResult vmaAllocateMemoryPages(
2531  VmaAllocator allocator,
2532  const VkMemoryRequirements* pVkMemoryRequirements,
2533  const VmaAllocationCreateInfo* pCreateInfo,
2534  size_t allocationCount,
2535  VmaAllocation* pAllocations,
2536  VmaAllocationInfo* pAllocationInfo);
2537 
2545  VmaAllocator allocator,
2546  VkBuffer buffer,
2547  const VmaAllocationCreateInfo* pCreateInfo,
2548  VmaAllocation* pAllocation,
2549  VmaAllocationInfo* pAllocationInfo);
2550 
2552 VkResult vmaAllocateMemoryForImage(
2553  VmaAllocator allocator,
2554  VkImage image,
2555  const VmaAllocationCreateInfo* pCreateInfo,
2556  VmaAllocation* pAllocation,
2557  VmaAllocationInfo* pAllocationInfo);
2558 
2563 void vmaFreeMemory(
2564  VmaAllocator allocator,
2565  VmaAllocation allocation);
2566 
2577 void vmaFreeMemoryPages(
2578  VmaAllocator allocator,
2579  size_t allocationCount,
2580  VmaAllocation* pAllocations);
2581 
2602 VkResult vmaResizeAllocation(
2603  VmaAllocator allocator,
2604  VmaAllocation allocation,
2605  VkDeviceSize newSize);
2606 
2624  VmaAllocator allocator,
2625  VmaAllocation allocation,
2626  VmaAllocationInfo* pAllocationInfo);
2627 
2642 VkBool32 vmaTouchAllocation(
2643  VmaAllocator allocator,
2644  VmaAllocation allocation);
2645 
2660  VmaAllocator allocator,
2661  VmaAllocation allocation,
2662  void* pUserData);
2663 
2675  VmaAllocator allocator,
2676  VmaAllocation* pAllocation);
2677 
2712 VkResult vmaMapMemory(
2713  VmaAllocator allocator,
2714  VmaAllocation allocation,
2715  void** ppData);
2716 
2721 void vmaUnmapMemory(
2722  VmaAllocator allocator,
2723  VmaAllocation allocation);
2724 
2737 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2738 
2751 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2752 
2769 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2770 
2777 VK_DEFINE_HANDLE(VmaDefragmentationContext)
2778 
2779 typedef enum VmaDefragmentationFlagBits {
2783 typedef VkFlags VmaDefragmentationFlags;
2784 
2789 typedef struct VmaDefragmentationInfo2 {
2813  uint32_t poolCount;
2834  VkDeviceSize maxCpuBytesToMove;
2844  VkDeviceSize maxGpuBytesToMove;
2858  VkCommandBuffer commandBuffer;
2860 
2865 typedef struct VmaDefragmentationInfo {
2870  VkDeviceSize maxBytesToMove;
2877 
2879 typedef struct VmaDefragmentationStats {
2881  VkDeviceSize bytesMoved;
2883  VkDeviceSize bytesFreed;
2889 
2916 VkResult vmaDefragmentationBegin(
2917  VmaAllocator allocator,
2918  const VmaDefragmentationInfo2* pInfo,
2919  VmaDefragmentationStats* pStats,
2920  VmaDefragmentationContext *pContext);
2921 
2927 VkResult vmaDefragmentationEnd(
2928  VmaAllocator allocator,
2929  VmaDefragmentationContext context);
2930 
2971 VkResult vmaDefragment(
2972  VmaAllocator allocator,
2973  VmaAllocation* pAllocations,
2974  size_t allocationCount,
2975  VkBool32* pAllocationsChanged,
2976  const VmaDefragmentationInfo *pDefragmentationInfo,
2977  VmaDefragmentationStats* pDefragmentationStats);
2978 
2991 VkResult vmaBindBufferMemory(
2992  VmaAllocator allocator,
2993  VmaAllocation allocation,
2994  VkBuffer buffer);
2995 
3008 VkResult vmaBindImageMemory(
3009  VmaAllocator allocator,
3010  VmaAllocation allocation,
3011  VkImage image);
3012 
3039 VkResult vmaCreateBuffer(
3040  VmaAllocator allocator,
3041  const VkBufferCreateInfo* pBufferCreateInfo,
3042  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3043  VkBuffer* pBuffer,
3044  VmaAllocation* pAllocation,
3045  VmaAllocationInfo* pAllocationInfo);
3046 
3058 void vmaDestroyBuffer(
3059  VmaAllocator allocator,
3060  VkBuffer buffer,
3061  VmaAllocation allocation);
3062 
3064 VkResult vmaCreateImage(
3065  VmaAllocator allocator,
3066  const VkImageCreateInfo* pImageCreateInfo,
3067  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3068  VkImage* pImage,
3069  VmaAllocation* pAllocation,
3070  VmaAllocationInfo* pAllocationInfo);
3071 
3083 void vmaDestroyImage(
3084  VmaAllocator allocator,
3085  VkImage image,
3086  VmaAllocation allocation);
3087 
3088 #ifdef __cplusplus
3089 }
3090 #endif
3091 
3092 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3093 
3094 // For Visual Studio IntelliSense.
3095 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3096 #define VMA_IMPLEMENTATION
3097 #endif
3098 
3099 #ifdef VMA_IMPLEMENTATION
3100 #undef VMA_IMPLEMENTATION
3101 
3102 #include <cstdint>
3103 #include <cstdlib>
3104 #include <cstring>
3105 
3106 /*******************************************************************************
3107 CONFIGURATION SECTION
3108 
3109 Define some of these macros before each #include of this header or change them
3110 here if you need other then default behavior depending on your environment.
3111 */
3112 
3113 /*
3114 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3115 internally, like:
3116 
3117  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3118 
3119 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3120 VmaAllocatorCreateInfo::pVulkanFunctions.
3121 */
3122 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3123 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3124 #endif
3125 
3126 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3127 //#define VMA_USE_STL_CONTAINERS 1
3128 
3129 /* Set this macro to 1 to make the library including and using STL containers:
3130 std::pair, std::vector, std::list, std::unordered_map.
3131 
3132 Set it to 0 or undefined to make the library using its own implementation of
3133 the containers.
3134 */
3135 #if VMA_USE_STL_CONTAINERS
3136  #define VMA_USE_STL_VECTOR 1
3137  #define VMA_USE_STL_UNORDERED_MAP 1
3138  #define VMA_USE_STL_LIST 1
3139 #endif
3140 
3141 #ifndef VMA_USE_STL_SHARED_MUTEX
3142  // Compiler conforms to C++17.
3143  #if __cplusplus >= 201703L
3144  #define VMA_USE_STL_SHARED_MUTEX 1
3145  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3146  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3147  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3148  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3149  #define VMA_USE_STL_SHARED_MUTEX 1
3150  #else
3151  #define VMA_USE_STL_SHARED_MUTEX 0
3152  #endif
3153 #endif
3154 
3155 #if VMA_USE_STL_VECTOR
3156  #include <vector>
3157 #endif
3158 
3159 #if VMA_USE_STL_UNORDERED_MAP
3160  #include <unordered_map>
3161 #endif
3162 
3163 #if VMA_USE_STL_LIST
3164  #include <list>
3165 #endif
3166 
3167 /*
3168 Following headers are used in this CONFIGURATION section only, so feel free to
3169 remove them if not needed.
3170 */
3171 #include <cassert> // for assert
3172 #include <algorithm> // for min, max
3173 #include <mutex>
3174 #include <atomic> // for std::atomic
3175 
3176 #ifndef VMA_NULL
3177  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3178  #define VMA_NULL nullptr
3179 #endif
3180 
3181 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3182 #include <cstdlib>
3183 void *aligned_alloc(size_t alignment, size_t size)
3184 {
3185  // alignment must be >= sizeof(void*)
3186  if(alignment < sizeof(void*))
3187  {
3188  alignment = sizeof(void*);
3189  }
3190 
3191  return memalign(alignment, size);
3192 }
3193 #elif defined(__APPLE__) || defined(__ANDROID__)
3194 #include <cstdlib>
3195 void *aligned_alloc(size_t alignment, size_t size)
3196 {
3197  // alignment must be >= sizeof(void*)
3198  if(alignment < sizeof(void*))
3199  {
3200  alignment = sizeof(void*);
3201  }
3202 
3203  void *pointer;
3204  if(posix_memalign(&pointer, alignment, size) == 0)
3205  return pointer;
3206  return VMA_NULL;
3207 }
3208 #endif
3209 
3210 // If your compiler is not compatible with C++11 and definition of
3211 // aligned_alloc() function is missing, uncommeting following line may help:
3212 
3213 //#include <malloc.h>
3214 
3215 // Normal assert to check for programmer's errors, especially in Debug configuration.
3216 #ifndef VMA_ASSERT
3217  #ifdef _DEBUG
3218  #define VMA_ASSERT(expr) assert(expr)
3219  #else
3220  #define VMA_ASSERT(expr)
3221  #endif
3222 #endif
3223 
3224 // Assert that will be called very often, like inside data structures e.g. operator[].
3225 // Making it non-empty can make program slow.
3226 #ifndef VMA_HEAVY_ASSERT
3227  #ifdef _DEBUG
3228  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3229  #else
3230  #define VMA_HEAVY_ASSERT(expr)
3231  #endif
3232 #endif
3233 
3234 #ifndef VMA_ALIGN_OF
3235  #define VMA_ALIGN_OF(type) (__alignof(type))
3236 #endif
3237 
3238 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3239  #if defined(_WIN32)
3240  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3241  #else
3242  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3243  #endif
3244 #endif
3245 
3246 #ifndef VMA_SYSTEM_FREE
3247  #if defined(_WIN32)
3248  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3249  #else
3250  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3251  #endif
3252 #endif
3253 
3254 #ifndef VMA_MIN
3255  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3256 #endif
3257 
3258 #ifndef VMA_MAX
3259  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3260 #endif
3261 
3262 #ifndef VMA_SWAP
3263  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3264 #endif
3265 
3266 #ifndef VMA_SORT
3267  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3268 #endif
3269 
3270 #ifndef VMA_DEBUG_LOG
3271  #define VMA_DEBUG_LOG(format, ...)
3272  /*
3273  #define VMA_DEBUG_LOG(format, ...) do { \
3274  printf(format, __VA_ARGS__); \
3275  printf("\n"); \
3276  } while(false)
3277  */
3278 #endif
3279 
3280 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3281 #if VMA_STATS_STRING_ENABLED
3282  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3283  {
3284  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3285  }
3286  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3287  {
3288  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3289  }
3290  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3291  {
3292  snprintf(outStr, strLen, "%p", ptr);
3293  }
3294 #endif
3295 
3296 #ifndef VMA_MUTEX
3297  class VmaMutex
3298  {
3299  public:
3300  void Lock() { m_Mutex.lock(); }
3301  void Unlock() { m_Mutex.unlock(); }
3302  private:
3303  std::mutex m_Mutex;
3304  };
3305  #define VMA_MUTEX VmaMutex
3306 #endif
3307 
3308 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3309 #ifndef VMA_RW_MUTEX
3310  #if VMA_USE_STL_SHARED_MUTEX
3311  // Use std::shared_mutex from C++17.
3312  #include <shared_mutex>
3313  class VmaRWMutex
3314  {
3315  public:
3316  void LockRead() { m_Mutex.lock_shared(); }
3317  void UnlockRead() { m_Mutex.unlock_shared(); }
3318  void LockWrite() { m_Mutex.lock(); }
3319  void UnlockWrite() { m_Mutex.unlock(); }
3320  private:
3321  std::shared_mutex m_Mutex;
3322  };
3323  #define VMA_RW_MUTEX VmaRWMutex
3324  #elif defined(_WIN32)
3325  // Use SRWLOCK from WinAPI.
3326  class VmaRWMutex
3327  {
3328  public:
3329  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3330  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3331  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3332  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3333  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3334  private:
3335  SRWLOCK m_Lock;
3336  };
3337  #define VMA_RW_MUTEX VmaRWMutex
3338  #else
3339  // Less efficient fallback: Use normal mutex.
3340  class VmaRWMutex
3341  {
3342  public:
3343  void LockRead() { m_Mutex.Lock(); }
3344  void UnlockRead() { m_Mutex.Unlock(); }
3345  void LockWrite() { m_Mutex.Lock(); }
3346  void UnlockWrite() { m_Mutex.Unlock(); }
3347  private:
3348  VMA_MUTEX m_Mutex;
3349  };
3350  #define VMA_RW_MUTEX VmaRWMutex
3351  #endif // #if VMA_USE_STL_SHARED_MUTEX
3352 #endif // #ifndef VMA_RW_MUTEX
3353 
3354 /*
3355 If providing your own implementation, you need to implement a subset of std::atomic:
3356 
3357 - Constructor(uint32_t desired)
3358 - uint32_t load() const
3359 - void store(uint32_t desired)
3360 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3361 */
3362 #ifndef VMA_ATOMIC_UINT32
3363  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3364 #endif
3365 
3366 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3367 
3371  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3372 #endif
3373 
3374 #ifndef VMA_DEBUG_ALIGNMENT
3375 
3379  #define VMA_DEBUG_ALIGNMENT (1)
3380 #endif
3381 
3382 #ifndef VMA_DEBUG_MARGIN
3383 
3387  #define VMA_DEBUG_MARGIN (0)
3388 #endif
3389 
3390 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3391 
3395  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3396 #endif
3397 
3398 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3399 
3404  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3405 #endif
3406 
3407 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3408 
3412  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3413 #endif
3414 
3415 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3416 
3420  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3421 #endif
3422 
3423 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3424  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3426 #endif
3427 
3428 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3429  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3431 #endif
3432 
3433 #ifndef VMA_CLASS_NO_COPY
3434  #define VMA_CLASS_NO_COPY(className) \
3435  private: \
3436  className(const className&) = delete; \
3437  className& operator=(const className&) = delete;
3438 #endif
3439 
3440 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3441 
3442 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3443 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3444 
3445 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3446 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3447 
3448 /*******************************************************************************
3449 END OF CONFIGURATION
3450 */
3451 
3452 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3453 
3454 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3455  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3456 
3457 // Returns number of bits set to 1 in (v).
3458 static inline uint32_t VmaCountBitsSet(uint32_t v)
3459 {
3460  uint32_t c = v - ((v >> 1) & 0x55555555);
3461  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3462  c = ((c >> 4) + c) & 0x0F0F0F0F;
3463  c = ((c >> 8) + c) & 0x00FF00FF;
3464  c = ((c >> 16) + c) & 0x0000FFFF;
3465  return c;
3466 }
3467 
3468 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3469 // Use types like uint32_t, uint64_t as T.
3470 template <typename T>
3471 static inline T VmaAlignUp(T val, T align)
3472 {
3473  return (val + align - 1) / align * align;
3474 }
3475 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3476 // Use types like uint32_t, uint64_t as T.
3477 template <typename T>
3478 static inline T VmaAlignDown(T val, T align)
3479 {
3480  return val / align * align;
3481 }
3482 
3483 // Division with mathematical rounding to nearest number.
3484 template <typename T>
3485 static inline T VmaRoundDiv(T x, T y)
3486 {
3487  return (x + (y / (T)2)) / y;
3488 }
3489 
3490 /*
3491 Returns true if given number is a power of two.
3492 T must be unsigned integer number or signed integer but always nonnegative.
3493 For 0 returns true.
3494 */
3495 template <typename T>
3496 inline bool VmaIsPow2(T x)
3497 {
3498  return (x & (x-1)) == 0;
3499 }
3500 
3501 // Returns smallest power of 2 greater or equal to v.
3502 static inline uint32_t VmaNextPow2(uint32_t v)
3503 {
3504  v--;
3505  v |= v >> 1;
3506  v |= v >> 2;
3507  v |= v >> 4;
3508  v |= v >> 8;
3509  v |= v >> 16;
3510  v++;
3511  return v;
3512 }
3513 static inline uint64_t VmaNextPow2(uint64_t v)
3514 {
3515  v--;
3516  v |= v >> 1;
3517  v |= v >> 2;
3518  v |= v >> 4;
3519  v |= v >> 8;
3520  v |= v >> 16;
3521  v |= v >> 32;
3522  v++;
3523  return v;
3524 }
3525 
3526 // Returns largest power of 2 less or equal to v.
3527 static inline uint32_t VmaPrevPow2(uint32_t v)
3528 {
3529  v |= v >> 1;
3530  v |= v >> 2;
3531  v |= v >> 4;
3532  v |= v >> 8;
3533  v |= v >> 16;
3534  v = v ^ (v >> 1);
3535  return v;
3536 }
3537 static inline uint64_t VmaPrevPow2(uint64_t v)
3538 {
3539  v |= v >> 1;
3540  v |= v >> 2;
3541  v |= v >> 4;
3542  v |= v >> 8;
3543  v |= v >> 16;
3544  v |= v >> 32;
3545  v = v ^ (v >> 1);
3546  return v;
3547 }
3548 
3549 static inline bool VmaStrIsEmpty(const char* pStr)
3550 {
3551  return pStr == VMA_NULL || *pStr == '\0';
3552 }
3553 
3554 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3555 {
3556  switch(algorithm)
3557  {
3559  return "Linear";
3561  return "Buddy";
3562  case 0:
3563  return "Default";
3564  default:
3565  VMA_ASSERT(0);
3566  return "";
3567  }
3568 }
3569 
3570 #ifndef VMA_SORT
3571 
3572 template<typename Iterator, typename Compare>
3573 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3574 {
3575  Iterator centerValue = end; --centerValue;
3576  Iterator insertIndex = beg;
3577  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3578  {
3579  if(cmp(*memTypeIndex, *centerValue))
3580  {
3581  if(insertIndex != memTypeIndex)
3582  {
3583  VMA_SWAP(*memTypeIndex, *insertIndex);
3584  }
3585  ++insertIndex;
3586  }
3587  }
3588  if(insertIndex != centerValue)
3589  {
3590  VMA_SWAP(*insertIndex, *centerValue);
3591  }
3592  return insertIndex;
3593 }
3594 
3595 template<typename Iterator, typename Compare>
3596 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3597 {
3598  if(beg < end)
3599  {
3600  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3601  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3602  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3603  }
3604 }
3605 
3606 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3607 
3608 #endif // #ifndef VMA_SORT
3609 
3610 /*
3611 Returns true if two memory blocks occupy overlapping pages.
3612 ResourceA must be in less memory offset than ResourceB.
3613 
3614 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3615 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3616 */
3617 static inline bool VmaBlocksOnSamePage(
3618  VkDeviceSize resourceAOffset,
3619  VkDeviceSize resourceASize,
3620  VkDeviceSize resourceBOffset,
3621  VkDeviceSize pageSize)
3622 {
3623  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3624  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3625  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3626  VkDeviceSize resourceBStart = resourceBOffset;
3627  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3628  return resourceAEndPage == resourceBStartPage;
3629 }
3630 
3631 enum VmaSuballocationType
3632 {
3633  VMA_SUBALLOCATION_TYPE_FREE = 0,
3634  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3635  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3636  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3637  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3638  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3639  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3640 };
3641 
3642 /*
3643 Returns true if given suballocation types could conflict and must respect
3644 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3645 or linear image and another one is optimal image. If type is unknown, behave
3646 conservatively.
3647 */
3648 static inline bool VmaIsBufferImageGranularityConflict(
3649  VmaSuballocationType suballocType1,
3650  VmaSuballocationType suballocType2)
3651 {
3652  if(suballocType1 > suballocType2)
3653  {
3654  VMA_SWAP(suballocType1, suballocType2);
3655  }
3656 
3657  switch(suballocType1)
3658  {
3659  case VMA_SUBALLOCATION_TYPE_FREE:
3660  return false;
3661  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3662  return true;
3663  case VMA_SUBALLOCATION_TYPE_BUFFER:
3664  return
3665  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3666  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3667  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3668  return
3669  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3670  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3671  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3672  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3673  return
3674  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3675  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3676  return false;
3677  default:
3678  VMA_ASSERT(0);
3679  return true;
3680  }
3681 }
3682 
3683 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3684 {
3685  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3686  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3687  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3688  {
3689  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3690  }
3691 }
3692 
3693 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3694 {
3695  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3696  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3697  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3698  {
3699  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3700  {
3701  return false;
3702  }
3703  }
3704  return true;
3705 }
3706 
3707 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3708 struct VmaMutexLock
3709 {
3710  VMA_CLASS_NO_COPY(VmaMutexLock)
3711 public:
3712  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3713  m_pMutex(useMutex ? &mutex : VMA_NULL)
3714  { if(m_pMutex) { m_pMutex->Lock(); } }
3715  ~VmaMutexLock()
3716  { if(m_pMutex) { m_pMutex->Unlock(); } }
3717 private:
3718  VMA_MUTEX* m_pMutex;
3719 };
3720 
3721 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3722 struct VmaMutexLockRead
3723 {
3724  VMA_CLASS_NO_COPY(VmaMutexLockRead)
3725 public:
3726  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3727  m_pMutex(useMutex ? &mutex : VMA_NULL)
3728  { if(m_pMutex) { m_pMutex->LockRead(); } }
3729  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
3730 private:
3731  VMA_RW_MUTEX* m_pMutex;
3732 };
3733 
3734 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3735 struct VmaMutexLockWrite
3736 {
3737  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3738 public:
3739  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
3740  m_pMutex(useMutex ? &mutex : VMA_NULL)
3741  { if(m_pMutex) { m_pMutex->LockWrite(); } }
3742  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3743 private:
3744  VMA_RW_MUTEX* m_pMutex;
3745 };
3746 
3747 #if VMA_DEBUG_GLOBAL_MUTEX
3748  static VMA_MUTEX gDebugGlobalMutex;
3749  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3750 #else
3751  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3752 #endif
3753 
3754 // Minimum size of a free suballocation to register it in the free suballocation collection.
3755 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3756 
3757 /*
3758 Performs binary search and returns iterator to first element that is greater or
3759 equal to (key), according to comparison (cmp).
3760 
3761 Cmp should return true if first argument is less than second argument.
3762 
3763 Returned value is the found element, if present in the collection or place where
3764 new element with value (key) should be inserted.
3765 */
3766 template <typename CmpLess, typename IterT, typename KeyT>
3767 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3768 {
3769  size_t down = 0, up = (end - beg);
3770  while(down < up)
3771  {
3772  const size_t mid = (down + up) / 2;
3773  if(cmp(*(beg+mid), key))
3774  {
3775  down = mid + 1;
3776  }
3777  else
3778  {
3779  up = mid;
3780  }
3781  }
3782  return beg + down;
3783 }
3784 
3785 /*
3786 Returns true if all pointers in the array are not-null and unique.
3787 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3788 T must be pointer type, e.g. VmaAllocation, VmaPool.
3789 */
3790 template<typename T>
3791 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3792 {
3793  for(uint32_t i = 0; i < count; ++i)
3794  {
3795  const T iPtr = arr[i];
3796  if(iPtr == VMA_NULL)
3797  {
3798  return false;
3799  }
3800  for(uint32_t j = i + 1; j < count; ++j)
3801  {
3802  if(iPtr == arr[j])
3803  {
3804  return false;
3805  }
3806  }
3807  }
3808  return true;
3809 }
3810 
3812 // Memory allocation
3813 
3814 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3815 {
3816  if((pAllocationCallbacks != VMA_NULL) &&
3817  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3818  {
3819  return (*pAllocationCallbacks->pfnAllocation)(
3820  pAllocationCallbacks->pUserData,
3821  size,
3822  alignment,
3823  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3824  }
3825  else
3826  {
3827  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3828  }
3829 }
3830 
3831 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3832 {
3833  if((pAllocationCallbacks != VMA_NULL) &&
3834  (pAllocationCallbacks->pfnFree != VMA_NULL))
3835  {
3836  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3837  }
3838  else
3839  {
3840  VMA_SYSTEM_FREE(ptr);
3841  }
3842 }
3843 
3844 template<typename T>
3845 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3846 {
3847  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3848 }
3849 
3850 template<typename T>
3851 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3852 {
3853  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3854 }
3855 
3856 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3857 
3858 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3859 
3860 template<typename T>
3861 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3862 {
3863  ptr->~T();
3864  VmaFree(pAllocationCallbacks, ptr);
3865 }
3866 
3867 template<typename T>
3868 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3869 {
3870  if(ptr != VMA_NULL)
3871  {
3872  for(size_t i = count; i--; )
3873  {
3874  ptr[i].~T();
3875  }
3876  VmaFree(pAllocationCallbacks, ptr);
3877  }
3878 }
3879 
3880 // STL-compatible allocator.
3881 template<typename T>
3882 class VmaStlAllocator
3883 {
3884 public:
3885  const VkAllocationCallbacks* const m_pCallbacks;
3886  typedef T value_type;
3887 
3888  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3889  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3890 
3891  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3892  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3893 
3894  template<typename U>
3895  bool operator==(const VmaStlAllocator<U>& rhs) const
3896  {
3897  return m_pCallbacks == rhs.m_pCallbacks;
3898  }
3899  template<typename U>
3900  bool operator!=(const VmaStlAllocator<U>& rhs) const
3901  {
3902  return m_pCallbacks != rhs.m_pCallbacks;
3903  }
3904 
3905  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3906 };
3907 
3908 #if VMA_USE_STL_VECTOR
3909 
3910 #define VmaVector std::vector
3911 
3912 template<typename T, typename allocatorT>
3913 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3914 {
3915  vec.insert(vec.begin() + index, item);
3916 }
3917 
3918 template<typename T, typename allocatorT>
3919 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3920 {
3921  vec.erase(vec.begin() + index);
3922 }
3923 
3924 #else // #if VMA_USE_STL_VECTOR
3925 
3926 /* Class with interface compatible with subset of std::vector.
3927 T must be POD because constructors and destructors are not called and memcpy is
3928 used for these objects. */
3929 template<typename T, typename AllocatorT>
3930 class VmaVector
3931 {
3932 public:
3933  typedef T value_type;
3934 
3935  VmaVector(const AllocatorT& allocator) :
3936  m_Allocator(allocator),
3937  m_pArray(VMA_NULL),
3938  m_Count(0),
3939  m_Capacity(0)
3940  {
3941  }
3942 
3943  VmaVector(size_t count, const AllocatorT& allocator) :
3944  m_Allocator(allocator),
3945  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3946  m_Count(count),
3947  m_Capacity(count)
3948  {
3949  }
3950 
3951  VmaVector(const VmaVector<T, AllocatorT>& src) :
3952  m_Allocator(src.m_Allocator),
3953  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3954  m_Count(src.m_Count),
3955  m_Capacity(src.m_Count)
3956  {
3957  if(m_Count != 0)
3958  {
3959  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3960  }
3961  }
3962 
3963  ~VmaVector()
3964  {
3965  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3966  }
3967 
3968  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3969  {
3970  if(&rhs != this)
3971  {
3972  resize(rhs.m_Count);
3973  if(m_Count != 0)
3974  {
3975  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3976  }
3977  }
3978  return *this;
3979  }
3980 
3981  bool empty() const { return m_Count == 0; }
3982  size_t size() const { return m_Count; }
3983  T* data() { return m_pArray; }
3984  const T* data() const { return m_pArray; }
3985 
3986  T& operator[](size_t index)
3987  {
3988  VMA_HEAVY_ASSERT(index < m_Count);
3989  return m_pArray[index];
3990  }
3991  const T& operator[](size_t index) const
3992  {
3993  VMA_HEAVY_ASSERT(index < m_Count);
3994  return m_pArray[index];
3995  }
3996 
3997  T& front()
3998  {
3999  VMA_HEAVY_ASSERT(m_Count > 0);
4000  return m_pArray[0];
4001  }
4002  const T& front() const
4003  {
4004  VMA_HEAVY_ASSERT(m_Count > 0);
4005  return m_pArray[0];
4006  }
4007  T& back()
4008  {
4009  VMA_HEAVY_ASSERT(m_Count > 0);
4010  return m_pArray[m_Count - 1];
4011  }
4012  const T& back() const
4013  {
4014  VMA_HEAVY_ASSERT(m_Count > 0);
4015  return m_pArray[m_Count - 1];
4016  }
4017 
4018  void reserve(size_t newCapacity, bool freeMemory = false)
4019  {
4020  newCapacity = VMA_MAX(newCapacity, m_Count);
4021 
4022  if((newCapacity < m_Capacity) && !freeMemory)
4023  {
4024  newCapacity = m_Capacity;
4025  }
4026 
4027  if(newCapacity != m_Capacity)
4028  {
4029  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4030  if(m_Count != 0)
4031  {
4032  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4033  }
4034  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4035  m_Capacity = newCapacity;
4036  m_pArray = newArray;
4037  }
4038  }
4039 
4040  void resize(size_t newCount, bool freeMemory = false)
4041  {
4042  size_t newCapacity = m_Capacity;
4043  if(newCount > m_Capacity)
4044  {
4045  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4046  }
4047  else if(freeMemory)
4048  {
4049  newCapacity = newCount;
4050  }
4051 
4052  if(newCapacity != m_Capacity)
4053  {
4054  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4055  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4056  if(elementsToCopy != 0)
4057  {
4058  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4059  }
4060  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4061  m_Capacity = newCapacity;
4062  m_pArray = newArray;
4063  }
4064 
4065  m_Count = newCount;
4066  }
4067 
4068  void clear(bool freeMemory = false)
4069  {
4070  resize(0, freeMemory);
4071  }
4072 
4073  void insert(size_t index, const T& src)
4074  {
4075  VMA_HEAVY_ASSERT(index <= m_Count);
4076  const size_t oldCount = size();
4077  resize(oldCount + 1);
4078  if(index < oldCount)
4079  {
4080  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4081  }
4082  m_pArray[index] = src;
4083  }
4084 
4085  void remove(size_t index)
4086  {
4087  VMA_HEAVY_ASSERT(index < m_Count);
4088  const size_t oldCount = size();
4089  if(index < oldCount - 1)
4090  {
4091  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4092  }
4093  resize(oldCount - 1);
4094  }
4095 
4096  void push_back(const T& src)
4097  {
4098  const size_t newIndex = size();
4099  resize(newIndex + 1);
4100  m_pArray[newIndex] = src;
4101  }
4102 
4103  void pop_back()
4104  {
4105  VMA_HEAVY_ASSERT(m_Count > 0);
4106  resize(size() - 1);
4107  }
4108 
4109  void push_front(const T& src)
4110  {
4111  insert(0, src);
4112  }
4113 
4114  void pop_front()
4115  {
4116  VMA_HEAVY_ASSERT(m_Count > 0);
4117  remove(0);
4118  }
4119 
4120  typedef T* iterator;
4121 
4122  iterator begin() { return m_pArray; }
4123  iterator end() { return m_pArray + m_Count; }
4124 
4125 private:
4126  AllocatorT m_Allocator;
4127  T* m_pArray;
4128  size_t m_Count;
4129  size_t m_Capacity;
4130 };
4131 
4132 template<typename T, typename allocatorT>
4133 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4134 {
4135  vec.insert(index, item);
4136 }
4137 
4138 template<typename T, typename allocatorT>
4139 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4140 {
4141  vec.remove(index);
4142 }
4143 
4144 #endif // #if VMA_USE_STL_VECTOR
4145 
4146 template<typename CmpLess, typename VectorT>
4147 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4148 {
4149  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4150  vector.data(),
4151  vector.data() + vector.size(),
4152  value,
4153  CmpLess()) - vector.data();
4154  VmaVectorInsert(vector, indexToInsert, value);
4155  return indexToInsert;
4156 }
4157 
4158 template<typename CmpLess, typename VectorT>
4159 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4160 {
4161  CmpLess comparator;
4162  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4163  vector.begin(),
4164  vector.end(),
4165  value,
4166  comparator);
4167  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4168  {
4169  size_t indexToRemove = it - vector.begin();
4170  VmaVectorRemove(vector, indexToRemove);
4171  return true;
4172  }
4173  return false;
4174 }
4175 
4176 template<typename CmpLess, typename IterT, typename KeyT>
4177 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
4178 {
4179  CmpLess comparator;
4180  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4181  beg, end, value, comparator);
4182  if(it == end ||
4183  (!comparator(*it, value) && !comparator(value, *it)))
4184  {
4185  return it;
4186  }
4187  return end;
4188 }
4189 
4191 // class VmaPoolAllocator
4192 
4193 /*
4194 Allocator for objects of type T using a list of arrays (pools) to speed up
4195 allocation. Number of elements that can be allocated is not bounded because
4196 allocator can create multiple blocks.
4197 */
4198 template<typename T>
4199 class VmaPoolAllocator
4200 {
4201  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4202 public:
4203  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
4204  ~VmaPoolAllocator();
4205  void Clear();
4206  T* Alloc();
4207  void Free(T* ptr);
4208 
4209 private:
4210  union Item
4211  {
4212  uint32_t NextFreeIndex;
4213  T Value;
4214  };
4215 
4216  struct ItemBlock
4217  {
4218  Item* pItems;
4219  uint32_t FirstFreeIndex;
4220  };
4221 
4222  const VkAllocationCallbacks* m_pAllocationCallbacks;
4223  size_t m_ItemsPerBlock;
4224  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4225 
4226  ItemBlock& CreateNewBlock();
4227 };
4228 
4229 template<typename T>
4230 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
4231  m_pAllocationCallbacks(pAllocationCallbacks),
4232  m_ItemsPerBlock(itemsPerBlock),
4233  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4234 {
4235  VMA_ASSERT(itemsPerBlock > 0);
4236 }
4237 
4238 template<typename T>
4239 VmaPoolAllocator<T>::~VmaPoolAllocator()
4240 {
4241  Clear();
4242 }
4243 
4244 template<typename T>
4245 void VmaPoolAllocator<T>::Clear()
4246 {
4247  for(size_t i = m_ItemBlocks.size(); i--; )
4248  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
4249  m_ItemBlocks.clear();
4250 }
4251 
4252 template<typename T>
4253 T* VmaPoolAllocator<T>::Alloc()
4254 {
4255  for(size_t i = m_ItemBlocks.size(); i--; )
4256  {
4257  ItemBlock& block = m_ItemBlocks[i];
4258  // This block has some free items: Use first one.
4259  if(block.FirstFreeIndex != UINT32_MAX)
4260  {
4261  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4262  block.FirstFreeIndex = pItem->NextFreeIndex;
4263  return &pItem->Value;
4264  }
4265  }
4266 
4267  // No block has free item: Create new one and use it.
4268  ItemBlock& newBlock = CreateNewBlock();
4269  Item* const pItem = &newBlock.pItems[0];
4270  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4271  return &pItem->Value;
4272 }
4273 
4274 template<typename T>
4275 void VmaPoolAllocator<T>::Free(T* ptr)
4276 {
4277  // Search all memory blocks to find ptr.
4278  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
4279  {
4280  ItemBlock& block = m_ItemBlocks[i];
4281 
4282  // Casting to union.
4283  Item* pItemPtr;
4284  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4285 
4286  // Check if pItemPtr is in address range of this block.
4287  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
4288  {
4289  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4290  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4291  block.FirstFreeIndex = index;
4292  return;
4293  }
4294  }
4295  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4296 }
4297 
4298 template<typename T>
4299 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4300 {
4301  ItemBlock newBlock = {
4302  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
4303 
4304  m_ItemBlocks.push_back(newBlock);
4305 
4306  // Setup singly-linked list of all free items in this block.
4307  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
4308  newBlock.pItems[i].NextFreeIndex = i + 1;
4309  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
4310  return m_ItemBlocks.back();
4311 }
4312 
4314 // class VmaRawList, VmaList
4315 
4316 #if VMA_USE_STL_LIST
4317 
4318 #define VmaList std::list
4319 
4320 #else // #if VMA_USE_STL_LIST
4321 
4322 template<typename T>
4323 struct VmaListItem
4324 {
4325  VmaListItem* pPrev;
4326  VmaListItem* pNext;
4327  T Value;
4328 };
4329 
4330 // Doubly linked list.
4331 template<typename T>
4332 class VmaRawList
4333 {
4334  VMA_CLASS_NO_COPY(VmaRawList)
4335 public:
4336  typedef VmaListItem<T> ItemType;
4337 
4338  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4339  ~VmaRawList();
4340  void Clear();
4341 
4342  size_t GetCount() const { return m_Count; }
4343  bool IsEmpty() const { return m_Count == 0; }
4344 
4345  ItemType* Front() { return m_pFront; }
4346  const ItemType* Front() const { return m_pFront; }
4347  ItemType* Back() { return m_pBack; }
4348  const ItemType* Back() const { return m_pBack; }
4349 
4350  ItemType* PushBack();
4351  ItemType* PushFront();
4352  ItemType* PushBack(const T& value);
4353  ItemType* PushFront(const T& value);
4354  void PopBack();
4355  void PopFront();
4356 
4357  // Item can be null - it means PushBack.
4358  ItemType* InsertBefore(ItemType* pItem);
4359  // Item can be null - it means PushFront.
4360  ItemType* InsertAfter(ItemType* pItem);
4361 
4362  ItemType* InsertBefore(ItemType* pItem, const T& value);
4363  ItemType* InsertAfter(ItemType* pItem, const T& value);
4364 
4365  void Remove(ItemType* pItem);
4366 
4367 private:
4368  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4369  VmaPoolAllocator<ItemType> m_ItemAllocator;
4370  ItemType* m_pFront;
4371  ItemType* m_pBack;
4372  size_t m_Count;
4373 };
4374 
4375 template<typename T>
4376 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4377  m_pAllocationCallbacks(pAllocationCallbacks),
4378  m_ItemAllocator(pAllocationCallbacks, 128),
4379  m_pFront(VMA_NULL),
4380  m_pBack(VMA_NULL),
4381  m_Count(0)
4382 {
4383 }
4384 
4385 template<typename T>
4386 VmaRawList<T>::~VmaRawList()
4387 {
4388  // Intentionally not calling Clear, because that would be unnecessary
4389  // computations to return all items to m_ItemAllocator as free.
4390 }
4391 
4392 template<typename T>
4393 void VmaRawList<T>::Clear()
4394 {
4395  if(IsEmpty() == false)
4396  {
4397  ItemType* pItem = m_pBack;
4398  while(pItem != VMA_NULL)
4399  {
4400  ItemType* const pPrevItem = pItem->pPrev;
4401  m_ItemAllocator.Free(pItem);
4402  pItem = pPrevItem;
4403  }
4404  m_pFront = VMA_NULL;
4405  m_pBack = VMA_NULL;
4406  m_Count = 0;
4407  }
4408 }
4409 
4410 template<typename T>
4411 VmaListItem<T>* VmaRawList<T>::PushBack()
4412 {
4413  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4414  pNewItem->pNext = VMA_NULL;
4415  if(IsEmpty())
4416  {
4417  pNewItem->pPrev = VMA_NULL;
4418  m_pFront = pNewItem;
4419  m_pBack = pNewItem;
4420  m_Count = 1;
4421  }
4422  else
4423  {
4424  pNewItem->pPrev = m_pBack;
4425  m_pBack->pNext = pNewItem;
4426  m_pBack = pNewItem;
4427  ++m_Count;
4428  }
4429  return pNewItem;
4430 }
4431 
4432 template<typename T>
4433 VmaListItem<T>* VmaRawList<T>::PushFront()
4434 {
4435  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4436  pNewItem->pPrev = VMA_NULL;
4437  if(IsEmpty())
4438  {
4439  pNewItem->pNext = VMA_NULL;
4440  m_pFront = pNewItem;
4441  m_pBack = pNewItem;
4442  m_Count = 1;
4443  }
4444  else
4445  {
4446  pNewItem->pNext = m_pFront;
4447  m_pFront->pPrev = pNewItem;
4448  m_pFront = pNewItem;
4449  ++m_Count;
4450  }
4451  return pNewItem;
4452 }
4453 
4454 template<typename T>
4455 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4456 {
4457  ItemType* const pNewItem = PushBack();
4458  pNewItem->Value = value;
4459  return pNewItem;
4460 }
4461 
4462 template<typename T>
4463 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4464 {
4465  ItemType* const pNewItem = PushFront();
4466  pNewItem->Value = value;
4467  return pNewItem;
4468 }
4469 
4470 template<typename T>
4471 void VmaRawList<T>::PopBack()
4472 {
4473  VMA_HEAVY_ASSERT(m_Count > 0);
4474  ItemType* const pBackItem = m_pBack;
4475  ItemType* const pPrevItem = pBackItem->pPrev;
4476  if(pPrevItem != VMA_NULL)
4477  {
4478  pPrevItem->pNext = VMA_NULL;
4479  }
4480  m_pBack = pPrevItem;
4481  m_ItemAllocator.Free(pBackItem);
4482  --m_Count;
4483 }
4484 
4485 template<typename T>
4486 void VmaRawList<T>::PopFront()
4487 {
4488  VMA_HEAVY_ASSERT(m_Count > 0);
4489  ItemType* const pFrontItem = m_pFront;
4490  ItemType* const pNextItem = pFrontItem->pNext;
4491  if(pNextItem != VMA_NULL)
4492  {
4493  pNextItem->pPrev = VMA_NULL;
4494  }
4495  m_pFront = pNextItem;
4496  m_ItemAllocator.Free(pFrontItem);
4497  --m_Count;
4498 }
4499 
4500 template<typename T>
4501 void VmaRawList<T>::Remove(ItemType* pItem)
4502 {
4503  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4504  VMA_HEAVY_ASSERT(m_Count > 0);
4505 
4506  if(pItem->pPrev != VMA_NULL)
4507  {
4508  pItem->pPrev->pNext = pItem->pNext;
4509  }
4510  else
4511  {
4512  VMA_HEAVY_ASSERT(m_pFront == pItem);
4513  m_pFront = pItem->pNext;
4514  }
4515 
4516  if(pItem->pNext != VMA_NULL)
4517  {
4518  pItem->pNext->pPrev = pItem->pPrev;
4519  }
4520  else
4521  {
4522  VMA_HEAVY_ASSERT(m_pBack == pItem);
4523  m_pBack = pItem->pPrev;
4524  }
4525 
4526  m_ItemAllocator.Free(pItem);
4527  --m_Count;
4528 }
4529 
4530 template<typename T>
4531 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4532 {
4533  if(pItem != VMA_NULL)
4534  {
4535  ItemType* const prevItem = pItem->pPrev;
4536  ItemType* const newItem = m_ItemAllocator.Alloc();
4537  newItem->pPrev = prevItem;
4538  newItem->pNext = pItem;
4539  pItem->pPrev = newItem;
4540  if(prevItem != VMA_NULL)
4541  {
4542  prevItem->pNext = newItem;
4543  }
4544  else
4545  {
4546  VMA_HEAVY_ASSERT(m_pFront == pItem);
4547  m_pFront = newItem;
4548  }
4549  ++m_Count;
4550  return newItem;
4551  }
4552  else
4553  return PushBack();
4554 }
4555 
4556 template<typename T>
4557 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4558 {
4559  if(pItem != VMA_NULL)
4560  {
4561  ItemType* const nextItem = pItem->pNext;
4562  ItemType* const newItem = m_ItemAllocator.Alloc();
4563  newItem->pNext = nextItem;
4564  newItem->pPrev = pItem;
4565  pItem->pNext = newItem;
4566  if(nextItem != VMA_NULL)
4567  {
4568  nextItem->pPrev = newItem;
4569  }
4570  else
4571  {
4572  VMA_HEAVY_ASSERT(m_pBack == pItem);
4573  m_pBack = newItem;
4574  }
4575  ++m_Count;
4576  return newItem;
4577  }
4578  else
4579  return PushFront();
4580 }
4581 
4582 template<typename T>
4583 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4584 {
4585  ItemType* const newItem = InsertBefore(pItem);
4586  newItem->Value = value;
4587  return newItem;
4588 }
4589 
4590 template<typename T>
4591 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4592 {
4593  ItemType* const newItem = InsertAfter(pItem);
4594  newItem->Value = value;
4595  return newItem;
4596 }
4597 
4598 template<typename T, typename AllocatorT>
4599 class VmaList
4600 {
4601  VMA_CLASS_NO_COPY(VmaList)
4602 public:
4603  class iterator
4604  {
4605  public:
4606  iterator() :
4607  m_pList(VMA_NULL),
4608  m_pItem(VMA_NULL)
4609  {
4610  }
4611 
4612  T& operator*() const
4613  {
4614  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4615  return m_pItem->Value;
4616  }
4617  T* operator->() const
4618  {
4619  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4620  return &m_pItem->Value;
4621  }
4622 
4623  iterator& operator++()
4624  {
4625  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4626  m_pItem = m_pItem->pNext;
4627  return *this;
4628  }
4629  iterator& operator--()
4630  {
4631  if(m_pItem != VMA_NULL)
4632  {
4633  m_pItem = m_pItem->pPrev;
4634  }
4635  else
4636  {
4637  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4638  m_pItem = m_pList->Back();
4639  }
4640  return *this;
4641  }
4642 
4643  iterator operator++(int)
4644  {
4645  iterator result = *this;
4646  ++*this;
4647  return result;
4648  }
4649  iterator operator--(int)
4650  {
4651  iterator result = *this;
4652  --*this;
4653  return result;
4654  }
4655 
4656  bool operator==(const iterator& rhs) const
4657  {
4658  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4659  return m_pItem == rhs.m_pItem;
4660  }
4661  bool operator!=(const iterator& rhs) const
4662  {
4663  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4664  return m_pItem != rhs.m_pItem;
4665  }
4666 
4667  private:
4668  VmaRawList<T>* m_pList;
4669  VmaListItem<T>* m_pItem;
4670 
4671  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4672  m_pList(pList),
4673  m_pItem(pItem)
4674  {
4675  }
4676 
4677  friend class VmaList<T, AllocatorT>;
4678  };
4679 
4680  class const_iterator
4681  {
4682  public:
4683  const_iterator() :
4684  m_pList(VMA_NULL),
4685  m_pItem(VMA_NULL)
4686  {
4687  }
4688 
4689  const_iterator(const iterator& src) :
4690  m_pList(src.m_pList),
4691  m_pItem(src.m_pItem)
4692  {
4693  }
4694 
4695  const T& operator*() const
4696  {
4697  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4698  return m_pItem->Value;
4699  }
4700  const T* operator->() const
4701  {
4702  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4703  return &m_pItem->Value;
4704  }
4705 
4706  const_iterator& operator++()
4707  {
4708  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4709  m_pItem = m_pItem->pNext;
4710  return *this;
4711  }
4712  const_iterator& operator--()
4713  {
4714  if(m_pItem != VMA_NULL)
4715  {
4716  m_pItem = m_pItem->pPrev;
4717  }
4718  else
4719  {
4720  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4721  m_pItem = m_pList->Back();
4722  }
4723  return *this;
4724  }
4725 
4726  const_iterator operator++(int)
4727  {
4728  const_iterator result = *this;
4729  ++*this;
4730  return result;
4731  }
4732  const_iterator operator--(int)
4733  {
4734  const_iterator result = *this;
4735  --*this;
4736  return result;
4737  }
4738 
4739  bool operator==(const const_iterator& rhs) const
4740  {
4741  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4742  return m_pItem == rhs.m_pItem;
4743  }
4744  bool operator!=(const const_iterator& rhs) const
4745  {
4746  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4747  return m_pItem != rhs.m_pItem;
4748  }
4749 
4750  private:
4751  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4752  m_pList(pList),
4753  m_pItem(pItem)
4754  {
4755  }
4756 
4757  const VmaRawList<T>* m_pList;
4758  const VmaListItem<T>* m_pItem;
4759 
4760  friend class VmaList<T, AllocatorT>;
4761  };
4762 
4763  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4764 
4765  bool empty() const { return m_RawList.IsEmpty(); }
4766  size_t size() const { return m_RawList.GetCount(); }
4767 
4768  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4769  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4770 
4771  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4772  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4773 
4774  void clear() { m_RawList.Clear(); }
4775  void push_back(const T& value) { m_RawList.PushBack(value); }
4776  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4777  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4778 
4779 private:
4780  VmaRawList<T> m_RawList;
4781 };
4782 
4783 #endif // #if VMA_USE_STL_LIST
4784 
4786 // class VmaMap
4787 
4788 // Unused in this version.
4789 #if 0
4790 
4791 #if VMA_USE_STL_UNORDERED_MAP
4792 
4793 #define VmaPair std::pair
4794 
4795 #define VMA_MAP_TYPE(KeyT, ValueT) \
4796  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4797 
4798 #else // #if VMA_USE_STL_UNORDERED_MAP
4799 
4800 template<typename T1, typename T2>
4801 struct VmaPair
4802 {
4803  T1 first;
4804  T2 second;
4805 
4806  VmaPair() : first(), second() { }
4807  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4808 };
4809 
4810 /* Class compatible with subset of interface of std::unordered_map.
4811 KeyT, ValueT must be POD because they will be stored in VmaVector.
4812 */
4813 template<typename KeyT, typename ValueT>
4814 class VmaMap
4815 {
4816 public:
4817  typedef VmaPair<KeyT, ValueT> PairType;
4818  typedef PairType* iterator;
4819 
4820  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4821 
4822  iterator begin() { return m_Vector.begin(); }
4823  iterator end() { return m_Vector.end(); }
4824 
4825  void insert(const PairType& pair);
4826  iterator find(const KeyT& key);
4827  void erase(iterator it);
4828 
4829 private:
4830  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4831 };
4832 
4833 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4834 
4835 template<typename FirstT, typename SecondT>
4836 struct VmaPairFirstLess
4837 {
4838  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4839  {
4840  return lhs.first < rhs.first;
4841  }
4842  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4843  {
4844  return lhs.first < rhsFirst;
4845  }
4846 };
4847 
4848 template<typename KeyT, typename ValueT>
4849 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4850 {
4851  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4852  m_Vector.data(),
4853  m_Vector.data() + m_Vector.size(),
4854  pair,
4855  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4856  VmaVectorInsert(m_Vector, indexToInsert, pair);
4857 }
4858 
4859 template<typename KeyT, typename ValueT>
4860 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4861 {
4862  PairType* it = VmaBinaryFindFirstNotLess(
4863  m_Vector.data(),
4864  m_Vector.data() + m_Vector.size(),
4865  key,
4866  VmaPairFirstLess<KeyT, ValueT>());
4867  if((it != m_Vector.end()) && (it->first == key))
4868  {
4869  return it;
4870  }
4871  else
4872  {
4873  return m_Vector.end();
4874  }
4875 }
4876 
4877 template<typename KeyT, typename ValueT>
4878 void VmaMap<KeyT, ValueT>::erase(iterator it)
4879 {
4880  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4881 }
4882 
4883 #endif // #if VMA_USE_STL_UNORDERED_MAP
4884 
4885 #endif // #if 0
4886 
4888 
4889 class VmaDeviceMemoryBlock;
4890 
4891 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4892 
4893 struct VmaAllocation_T
4894 {
4895  VMA_CLASS_NO_COPY(VmaAllocation_T)
4896 private:
4897  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4898 
4899  enum FLAGS
4900  {
4901  FLAG_USER_DATA_STRING = 0x01,
4902  };
4903 
4904 public:
4905  enum ALLOCATION_TYPE
4906  {
4907  ALLOCATION_TYPE_NONE,
4908  ALLOCATION_TYPE_BLOCK,
4909  ALLOCATION_TYPE_DEDICATED,
4910  };
4911 
4912  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4913  m_Alignment(1),
4914  m_Size(0),
4915  m_pUserData(VMA_NULL),
4916  m_LastUseFrameIndex(currentFrameIndex),
4917  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4918  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4919  m_MapCount(0),
4920  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4921  {
4922 #if VMA_STATS_STRING_ENABLED
4923  m_CreationFrameIndex = currentFrameIndex;
4924  m_BufferImageUsage = 0;
4925 #endif
4926  }
4927 
4928  ~VmaAllocation_T()
4929  {
4930  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4931 
4932  // Check if owned string was freed.
4933  VMA_ASSERT(m_pUserData == VMA_NULL);
4934  }
4935 
4936  void InitBlockAllocation(
4937  VmaPool hPool,
4938  VmaDeviceMemoryBlock* block,
4939  VkDeviceSize offset,
4940  VkDeviceSize alignment,
4941  VkDeviceSize size,
4942  VmaSuballocationType suballocationType,
4943  bool mapped,
4944  bool canBecomeLost)
4945  {
4946  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4947  VMA_ASSERT(block != VMA_NULL);
4948  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4949  m_Alignment = alignment;
4950  m_Size = size;
4951  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4952  m_SuballocationType = (uint8_t)suballocationType;
4953  m_BlockAllocation.m_hPool = hPool;
4954  m_BlockAllocation.m_Block = block;
4955  m_BlockAllocation.m_Offset = offset;
4956  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4957  }
4958 
4959  void InitLost()
4960  {
4961  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4962  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4963  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4964  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4965  m_BlockAllocation.m_Block = VMA_NULL;
4966  m_BlockAllocation.m_Offset = 0;
4967  m_BlockAllocation.m_CanBecomeLost = true;
4968  }
4969 
4970  void ChangeBlockAllocation(
4971  VmaAllocator hAllocator,
4972  VmaDeviceMemoryBlock* block,
4973  VkDeviceSize offset);
4974 
4975  void ChangeSize(VkDeviceSize newSize);
4976  void ChangeOffset(VkDeviceSize newOffset);
4977 
4978  // pMappedData not null means allocation is created with MAPPED flag.
4979  void InitDedicatedAllocation(
4980  uint32_t memoryTypeIndex,
4981  VkDeviceMemory hMemory,
4982  VmaSuballocationType suballocationType,
4983  void* pMappedData,
4984  VkDeviceSize size)
4985  {
4986  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4987  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4988  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4989  m_Alignment = 0;
4990  m_Size = size;
4991  m_SuballocationType = (uint8_t)suballocationType;
4992  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4993  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4994  m_DedicatedAllocation.m_hMemory = hMemory;
4995  m_DedicatedAllocation.m_pMappedData = pMappedData;
4996  }
4997 
4998  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4999  VkDeviceSize GetAlignment() const { return m_Alignment; }
5000  VkDeviceSize GetSize() const { return m_Size; }
5001  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5002  void* GetUserData() const { return m_pUserData; }
5003  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5004  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5005 
5006  VmaDeviceMemoryBlock* GetBlock() const
5007  {
5008  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5009  return m_BlockAllocation.m_Block;
5010  }
5011  VkDeviceSize GetOffset() const;
5012  VkDeviceMemory GetMemory() const;
5013  uint32_t GetMemoryTypeIndex() const;
5014  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5015  void* GetMappedData() const;
5016  bool CanBecomeLost() const;
5017  VmaPool GetPool() const;
5018 
5019  uint32_t GetLastUseFrameIndex() const
5020  {
5021  return m_LastUseFrameIndex.load();
5022  }
5023  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5024  {
5025  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5026  }
5027  /*
5028  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5029  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5030  - Else, returns false.
5031 
5032  If hAllocation is already lost, assert - you should not call it then.
5033  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5034  */
5035  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5036 
5037  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5038  {
5039  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5040  outInfo.blockCount = 1;
5041  outInfo.allocationCount = 1;
5042  outInfo.unusedRangeCount = 0;
5043  outInfo.usedBytes = m_Size;
5044  outInfo.unusedBytes = 0;
5045  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5046  outInfo.unusedRangeSizeMin = UINT64_MAX;
5047  outInfo.unusedRangeSizeMax = 0;
5048  }
5049 
5050  void BlockAllocMap();
5051  void BlockAllocUnmap();
5052  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5053  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5054 
5055 #if VMA_STATS_STRING_ENABLED
5056  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5057  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5058 
5059  void InitBufferImageUsage(uint32_t bufferImageUsage)
5060  {
5061  VMA_ASSERT(m_BufferImageUsage == 0);
5062  m_BufferImageUsage = bufferImageUsage;
5063  }
5064 
5065  void PrintParameters(class VmaJsonWriter& json) const;
5066 #endif
5067 
5068 private:
5069  VkDeviceSize m_Alignment;
5070  VkDeviceSize m_Size;
5071  void* m_pUserData;
5072  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5073  uint8_t m_Type; // ALLOCATION_TYPE
5074  uint8_t m_SuballocationType; // VmaSuballocationType
5075  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5076  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5077  uint8_t m_MapCount;
5078  uint8_t m_Flags; // enum FLAGS
5079 
5080  // Allocation out of VmaDeviceMemoryBlock.
5081  struct BlockAllocation
5082  {
5083  VmaPool m_hPool; // Null if belongs to general memory.
5084  VmaDeviceMemoryBlock* m_Block;
5085  VkDeviceSize m_Offset;
5086  bool m_CanBecomeLost;
5087  };
5088 
5089  // Allocation for an object that has its own private VkDeviceMemory.
5090  struct DedicatedAllocation
5091  {
5092  uint32_t m_MemoryTypeIndex;
5093  VkDeviceMemory m_hMemory;
5094  void* m_pMappedData; // Not null means memory is mapped.
5095  };
5096 
5097  union
5098  {
5099  // Allocation out of VmaDeviceMemoryBlock.
5100  BlockAllocation m_BlockAllocation;
5101  // Allocation for an object that has its own private VkDeviceMemory.
5102  DedicatedAllocation m_DedicatedAllocation;
5103  };
5104 
5105 #if VMA_STATS_STRING_ENABLED
5106  uint32_t m_CreationFrameIndex;
5107  uint32_t m_BufferImageUsage; // 0 if unknown.
5108 #endif
5109 
5110  void FreeUserDataString(VmaAllocator hAllocator);
5111 };
5112 
5113 /*
5114 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5115 allocated memory block or free.
5116 */
5117 struct VmaSuballocation
5118 {
5119  VkDeviceSize offset;
5120  VkDeviceSize size;
5121  VmaAllocation hAllocation;
5122  VmaSuballocationType type;
5123 };
5124 
5125 // Comparator for offsets.
5126 struct VmaSuballocationOffsetLess
5127 {
5128  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5129  {
5130  return lhs.offset < rhs.offset;
5131  }
5132 };
5133 struct VmaSuballocationOffsetGreater
5134 {
5135  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5136  {
5137  return lhs.offset > rhs.offset;
5138  }
5139 };
5140 
5141 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5142 
5143 // Cost of one additional allocation lost, as equivalent in bytes.
5144 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5145 
5146 /*
5147 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5148 
5149 If canMakeOtherLost was false:
5150 - item points to a FREE suballocation.
5151 - itemsToMakeLostCount is 0.
5152 
5153 If canMakeOtherLost was true:
5154 - item points to first of sequence of suballocations, which are either FREE,
5155  or point to VmaAllocations that can become lost.
5156 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5157  the requested allocation to succeed.
5158 */
5159 struct VmaAllocationRequest
5160 {
5161  VkDeviceSize offset;
5162  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5163  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5164  VmaSuballocationList::iterator item;
5165  size_t itemsToMakeLostCount;
5166  void* customData;
5167 
5168  VkDeviceSize CalcCost() const
5169  {
5170  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5171  }
5172 };
5173 
5174 /*
5175 Data structure used for bookkeeping of allocations and unused ranges of memory
5176 in a single VkDeviceMemory block.
5177 */
5178 class VmaBlockMetadata
5179 {
5180 public:
5181  VmaBlockMetadata(VmaAllocator hAllocator);
5182  virtual ~VmaBlockMetadata() { }
5183  virtual void Init(VkDeviceSize size) { m_Size = size; }
5184 
5185  // Validates all data structures inside this object. If not valid, returns false.
5186  virtual bool Validate() const = 0;
5187  VkDeviceSize GetSize() const { return m_Size; }
5188  virtual size_t GetAllocationCount() const = 0;
5189  virtual VkDeviceSize GetSumFreeSize() const = 0;
5190  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5191  // Returns true if this block is empty - contains only single free suballocation.
5192  virtual bool IsEmpty() const = 0;
5193 
5194  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5195  // Shouldn't modify blockCount.
5196  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5197 
5198 #if VMA_STATS_STRING_ENABLED
5199  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5200 #endif
5201 
5202  // Tries to find a place for suballocation with given parameters inside this block.
5203  // If succeeded, fills pAllocationRequest and returns true.
5204  // If failed, returns false.
5205  virtual bool CreateAllocationRequest(
5206  uint32_t currentFrameIndex,
5207  uint32_t frameInUseCount,
5208  VkDeviceSize bufferImageGranularity,
5209  VkDeviceSize allocSize,
5210  VkDeviceSize allocAlignment,
5211  bool upperAddress,
5212  VmaSuballocationType allocType,
5213  bool canMakeOtherLost,
5214  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5215  uint32_t strategy,
5216  VmaAllocationRequest* pAllocationRequest) = 0;
5217 
5218  virtual bool MakeRequestedAllocationsLost(
5219  uint32_t currentFrameIndex,
5220  uint32_t frameInUseCount,
5221  VmaAllocationRequest* pAllocationRequest) = 0;
5222 
5223  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5224 
5225  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5226 
5227  // Makes actual allocation based on request. Request must already be checked and valid.
5228  virtual void Alloc(
5229  const VmaAllocationRequest& request,
5230  VmaSuballocationType type,
5231  VkDeviceSize allocSize,
5232  bool upperAddress,
5233  VmaAllocation hAllocation) = 0;
5234 
5235  // Frees suballocation assigned to given memory region.
5236  virtual void Free(const VmaAllocation allocation) = 0;
5237  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5238 
5239  // Tries to resize (grow or shrink) space for given allocation, in place.
5240  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
5241 
5242 protected:
5243  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5244 
5245 #if VMA_STATS_STRING_ENABLED
5246  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5247  VkDeviceSize unusedBytes,
5248  size_t allocationCount,
5249  size_t unusedRangeCount) const;
5250  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5251  VkDeviceSize offset,
5252  VmaAllocation hAllocation) const;
5253  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5254  VkDeviceSize offset,
5255  VkDeviceSize size) const;
5256  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5257 #endif
5258 
5259 private:
5260  VkDeviceSize m_Size;
5261  const VkAllocationCallbacks* m_pAllocationCallbacks;
5262 };
5263 
5264 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5265  VMA_ASSERT(0 && "Validation failed: " #cond); \
5266  return false; \
5267  } } while(false)
5268 
5269 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5270 {
5271  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5272 public:
5273  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5274  virtual ~VmaBlockMetadata_Generic();
5275  virtual void Init(VkDeviceSize size);
5276 
5277  virtual bool Validate() const;
5278  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5279  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5280  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5281  virtual bool IsEmpty() const;
5282 
5283  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5284  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5285 
5286 #if VMA_STATS_STRING_ENABLED
5287  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5288 #endif
5289 
5290  virtual bool CreateAllocationRequest(
5291  uint32_t currentFrameIndex,
5292  uint32_t frameInUseCount,
5293  VkDeviceSize bufferImageGranularity,
5294  VkDeviceSize allocSize,
5295  VkDeviceSize allocAlignment,
5296  bool upperAddress,
5297  VmaSuballocationType allocType,
5298  bool canMakeOtherLost,
5299  uint32_t strategy,
5300  VmaAllocationRequest* pAllocationRequest);
5301 
5302  virtual bool MakeRequestedAllocationsLost(
5303  uint32_t currentFrameIndex,
5304  uint32_t frameInUseCount,
5305  VmaAllocationRequest* pAllocationRequest);
5306 
5307  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5308 
5309  virtual VkResult CheckCorruption(const void* pBlockData);
5310 
5311  virtual void Alloc(
5312  const VmaAllocationRequest& request,
5313  VmaSuballocationType type,
5314  VkDeviceSize allocSize,
5315  bool upperAddress,
5316  VmaAllocation hAllocation);
5317 
5318  virtual void Free(const VmaAllocation allocation);
5319  virtual void FreeAtOffset(VkDeviceSize offset);
5320 
5321  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
5322 
5324  // For defragmentation
5325 
5326  bool IsBufferImageGranularityConflictPossible(
5327  VkDeviceSize bufferImageGranularity,
5328  VmaSuballocationType& inOutPrevSuballocType) const;
5329 
5330 private:
5331  friend class VmaDefragmentationAlgorithm_Generic;
5332  friend class VmaDefragmentationAlgorithm_Fast;
5333 
5334  uint32_t m_FreeCount;
5335  VkDeviceSize m_SumFreeSize;
5336  VmaSuballocationList m_Suballocations;
5337  // Suballocations that are free and have size greater than certain threshold.
5338  // Sorted by size, ascending.
5339  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5340 
5341  bool ValidateFreeSuballocationList() const;
5342 
5343  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5344  // If yes, fills pOffset and returns true. If no, returns false.
5345  bool CheckAllocation(
5346  uint32_t currentFrameIndex,
5347  uint32_t frameInUseCount,
5348  VkDeviceSize bufferImageGranularity,
5349  VkDeviceSize allocSize,
5350  VkDeviceSize allocAlignment,
5351  VmaSuballocationType allocType,
5352  VmaSuballocationList::const_iterator suballocItem,
5353  bool canMakeOtherLost,
5354  VkDeviceSize* pOffset,
5355  size_t* itemsToMakeLostCount,
5356  VkDeviceSize* pSumFreeSize,
5357  VkDeviceSize* pSumItemSize) const;
5358  // Given free suballocation, it merges it with following one, which must also be free.
5359  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5360  // Releases given suballocation, making it free.
5361  // Merges it with adjacent free suballocations if applicable.
5362  // Returns iterator to new free suballocation at this place.
5363  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5364  // Given free suballocation, it inserts it into sorted list of
5365  // m_FreeSuballocationsBySize if it's suitable.
5366  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5367  // Given free suballocation, it removes it from sorted list of
5368  // m_FreeSuballocationsBySize if it's suitable.
5369  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5370 };
5371 
5372 /*
5373 Allocations and their references in internal data structure look like this:
5374 
5375 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5376 
5377  0 +-------+
5378  | |
5379  | |
5380  | |
5381  +-------+
5382  | Alloc | 1st[m_1stNullItemsBeginCount]
5383  +-------+
5384  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5385  +-------+
5386  | ... |
5387  +-------+
5388  | Alloc | 1st[1st.size() - 1]
5389  +-------+
5390  | |
5391  | |
5392  | |
5393 GetSize() +-------+
5394 
5395 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5396 
5397  0 +-------+
5398  | Alloc | 2nd[0]
5399  +-------+
5400  | Alloc | 2nd[1]
5401  +-------+
5402  | ... |
5403  +-------+
5404  | Alloc | 2nd[2nd.size() - 1]
5405  +-------+
5406  | |
5407  | |
5408  | |
5409  +-------+
5410  | Alloc | 1st[m_1stNullItemsBeginCount]
5411  +-------+
5412  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5413  +-------+
5414  | ... |
5415  +-------+
5416  | Alloc | 1st[1st.size() - 1]
5417  +-------+
5418  | |
5419 GetSize() +-------+
5420 
5421 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5422 
5423  0 +-------+
5424  | |
5425  | |
5426  | |
5427  +-------+
5428  | Alloc | 1st[m_1stNullItemsBeginCount]
5429  +-------+
5430  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5431  +-------+
5432  | ... |
5433  +-------+
5434  | Alloc | 1st[1st.size() - 1]
5435  +-------+
5436  | |
5437  | |
5438  | |
5439  +-------+
5440  | Alloc | 2nd[2nd.size() - 1]
5441  +-------+
5442  | ... |
5443  +-------+
5444  | Alloc | 2nd[1]
5445  +-------+
5446  | Alloc | 2nd[0]
5447 GetSize() +-------+
5448 
5449 */
5450 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5451 {
5452  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5453 public:
5454  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5455  virtual ~VmaBlockMetadata_Linear();
5456  virtual void Init(VkDeviceSize size);
5457 
5458  virtual bool Validate() const;
5459  virtual size_t GetAllocationCount() const;
5460  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5461  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5462  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5463 
5464  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5465  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5466 
5467 #if VMA_STATS_STRING_ENABLED
5468  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5469 #endif
5470 
5471  virtual bool CreateAllocationRequest(
5472  uint32_t currentFrameIndex,
5473  uint32_t frameInUseCount,
5474  VkDeviceSize bufferImageGranularity,
5475  VkDeviceSize allocSize,
5476  VkDeviceSize allocAlignment,
5477  bool upperAddress,
5478  VmaSuballocationType allocType,
5479  bool canMakeOtherLost,
5480  uint32_t strategy,
5481  VmaAllocationRequest* pAllocationRequest);
5482 
5483  virtual bool MakeRequestedAllocationsLost(
5484  uint32_t currentFrameIndex,
5485  uint32_t frameInUseCount,
5486  VmaAllocationRequest* pAllocationRequest);
5487 
5488  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5489 
5490  virtual VkResult CheckCorruption(const void* pBlockData);
5491 
5492  virtual void Alloc(
5493  const VmaAllocationRequest& request,
5494  VmaSuballocationType type,
5495  VkDeviceSize allocSize,
5496  bool upperAddress,
5497  VmaAllocation hAllocation);
5498 
5499  virtual void Free(const VmaAllocation allocation);
5500  virtual void FreeAtOffset(VkDeviceSize offset);
5501 
5502 private:
5503  /*
5504  There are two suballocation vectors, used in ping-pong way.
5505  The one with index m_1stVectorIndex is called 1st.
5506  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5507  2nd can be non-empty only when 1st is not empty.
5508  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5509  */
5510  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5511 
5512  enum SECOND_VECTOR_MODE
5513  {
5514  SECOND_VECTOR_EMPTY,
5515  /*
5516  Suballocations in 2nd vector are created later than the ones in 1st, but they
5517  all have smaller offset.
5518  */
5519  SECOND_VECTOR_RING_BUFFER,
5520  /*
5521  Suballocations in 2nd vector are upper side of double stack.
5522  They all have offsets higher than those in 1st vector.
5523  Top of this stack means smaller offsets, but higher indices in this vector.
5524  */
5525  SECOND_VECTOR_DOUBLE_STACK,
5526  };
5527 
5528  VkDeviceSize m_SumFreeSize;
5529  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5530  uint32_t m_1stVectorIndex;
5531  SECOND_VECTOR_MODE m_2ndVectorMode;
5532 
5533  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5534  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5535  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5536  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5537 
5538  // Number of items in 1st vector with hAllocation = null at the beginning.
5539  size_t m_1stNullItemsBeginCount;
5540  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5541  size_t m_1stNullItemsMiddleCount;
5542  // Number of items in 2nd vector with hAllocation = null.
5543  size_t m_2ndNullItemsCount;
5544 
5545  bool ShouldCompact1st() const;
5546  void CleanupAfterFree();
5547 };
5548 
5549 /*
5550 - GetSize() is the original size of allocated memory block.
5551 - m_UsableSize is this size aligned down to a power of two.
5552  All allocations and calculations happen relative to m_UsableSize.
5553 - GetUnusableSize() is the difference between them.
5554  It is repoted as separate, unused range, not available for allocations.
5555 
5556 Node at level 0 has size = m_UsableSize.
5557 Each next level contains nodes with size 2 times smaller than current level.
5558 m_LevelCount is the maximum number of levels to use in the current object.
5559 */
5560 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5561 {
5562  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5563 public:
5564  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5565  virtual ~VmaBlockMetadata_Buddy();
5566  virtual void Init(VkDeviceSize size);
5567 
5568  virtual bool Validate() const;
5569  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5570  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5571  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5572  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5573 
5574  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5575  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5576 
5577 #if VMA_STATS_STRING_ENABLED
5578  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5579 #endif
5580 
5581  virtual bool CreateAllocationRequest(
5582  uint32_t currentFrameIndex,
5583  uint32_t frameInUseCount,
5584  VkDeviceSize bufferImageGranularity,
5585  VkDeviceSize allocSize,
5586  VkDeviceSize allocAlignment,
5587  bool upperAddress,
5588  VmaSuballocationType allocType,
5589  bool canMakeOtherLost,
5590  uint32_t strategy,
5591  VmaAllocationRequest* pAllocationRequest);
5592 
5593  virtual bool MakeRequestedAllocationsLost(
5594  uint32_t currentFrameIndex,
5595  uint32_t frameInUseCount,
5596  VmaAllocationRequest* pAllocationRequest);
5597 
5598  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5599 
5600  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5601 
5602  virtual void Alloc(
5603  const VmaAllocationRequest& request,
5604  VmaSuballocationType type,
5605  VkDeviceSize allocSize,
5606  bool upperAddress,
5607  VmaAllocation hAllocation);
5608 
5609  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5610  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5611 
5612 private:
5613  static const VkDeviceSize MIN_NODE_SIZE = 32;
5614  static const size_t MAX_LEVELS = 30;
5615 
5616  struct ValidationContext
5617  {
5618  size_t calculatedAllocationCount;
5619  size_t calculatedFreeCount;
5620  VkDeviceSize calculatedSumFreeSize;
5621 
5622  ValidationContext() :
5623  calculatedAllocationCount(0),
5624  calculatedFreeCount(0),
5625  calculatedSumFreeSize(0) { }
5626  };
5627 
5628  struct Node
5629  {
5630  VkDeviceSize offset;
5631  enum TYPE
5632  {
5633  TYPE_FREE,
5634  TYPE_ALLOCATION,
5635  TYPE_SPLIT,
5636  TYPE_COUNT
5637  } type;
5638  Node* parent;
5639  Node* buddy;
5640 
5641  union
5642  {
5643  struct
5644  {
5645  Node* prev;
5646  Node* next;
5647  } free;
5648  struct
5649  {
5650  VmaAllocation alloc;
5651  } allocation;
5652  struct
5653  {
5654  Node* leftChild;
5655  } split;
5656  };
5657  };
5658 
5659  // Size of the memory block aligned down to a power of two.
5660  VkDeviceSize m_UsableSize;
5661  uint32_t m_LevelCount;
5662 
5663  Node* m_Root;
5664  struct {
5665  Node* front;
5666  Node* back;
5667  } m_FreeList[MAX_LEVELS];
5668  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5669  size_t m_AllocationCount;
5670  // Number of nodes in the tree with type == TYPE_FREE.
5671  size_t m_FreeCount;
5672  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5673  VkDeviceSize m_SumFreeSize;
5674 
5675  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5676  void DeleteNode(Node* node);
5677  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5678  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5679  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5680  // Alloc passed just for validation. Can be null.
5681  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5682  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5683  // Adds node to the front of FreeList at given level.
5684  // node->type must be FREE.
5685  // node->free.prev, next can be undefined.
5686  void AddToFreeListFront(uint32_t level, Node* node);
5687  // Removes node from FreeList at given level.
5688  // node->type must be FREE.
5689  // node->free.prev, next stay untouched.
5690  void RemoveFromFreeList(uint32_t level, Node* node);
5691 
5692 #if VMA_STATS_STRING_ENABLED
5693  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5694 #endif
5695 };
5696 
5697 /*
5698 Represents a single block of device memory (`VkDeviceMemory`) with all the
5699 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5700 
5701 Thread-safety: This class must be externally synchronized.
5702 */
5703 class VmaDeviceMemoryBlock
5704 {
5705  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5706 public:
5707  VmaBlockMetadata* m_pMetadata;
5708 
5709  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5710 
5711  ~VmaDeviceMemoryBlock()
5712  {
5713  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5714  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5715  }
5716 
5717  // Always call after construction.
5718  void Init(
5719  VmaAllocator hAllocator,
5720  uint32_t newMemoryTypeIndex,
5721  VkDeviceMemory newMemory,
5722  VkDeviceSize newSize,
5723  uint32_t id,
5724  uint32_t algorithm);
5725  // Always call before destruction.
5726  void Destroy(VmaAllocator allocator);
5727 
5728  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5729  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5730  uint32_t GetId() const { return m_Id; }
5731  void* GetMappedData() const { return m_pMappedData; }
5732 
5733  // Validates all data structures inside this object. If not valid, returns false.
5734  bool Validate() const;
5735 
5736  VkResult CheckCorruption(VmaAllocator hAllocator);
5737 
5738  // ppData can be null.
5739  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5740  void Unmap(VmaAllocator hAllocator, uint32_t count);
5741 
5742  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5743  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5744 
5745  VkResult BindBufferMemory(
5746  const VmaAllocator hAllocator,
5747  const VmaAllocation hAllocation,
5748  VkBuffer hBuffer);
5749  VkResult BindImageMemory(
5750  const VmaAllocator hAllocator,
5751  const VmaAllocation hAllocation,
5752  VkImage hImage);
5753 
5754 private:
5755  uint32_t m_MemoryTypeIndex;
5756  uint32_t m_Id;
5757  VkDeviceMemory m_hMemory;
5758 
5759  /*
5760  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5761  Also protects m_MapCount, m_pMappedData.
5762  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5763  */
5764  VMA_MUTEX m_Mutex;
5765  uint32_t m_MapCount;
5766  void* m_pMappedData;
5767 };
5768 
5769 struct VmaPointerLess
5770 {
5771  bool operator()(const void* lhs, const void* rhs) const
5772  {
5773  return lhs < rhs;
5774  }
5775 };
5776 
5777 struct VmaDefragmentationMove
5778 {
5779  size_t srcBlockIndex;
5780  size_t dstBlockIndex;
5781  VkDeviceSize srcOffset;
5782  VkDeviceSize dstOffset;
5783  VkDeviceSize size;
5784 };
5785 
5786 class VmaDefragmentationAlgorithm;
5787 
5788 /*
5789 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5790 Vulkan memory type.
5791 
5792 Synchronized internally with a mutex.
5793 */
5794 struct VmaBlockVector
5795 {
5796  VMA_CLASS_NO_COPY(VmaBlockVector)
5797 public:
5798  VmaBlockVector(
5799  VmaAllocator hAllocator,
5800  uint32_t memoryTypeIndex,
5801  VkDeviceSize preferredBlockSize,
5802  size_t minBlockCount,
5803  size_t maxBlockCount,
5804  VkDeviceSize bufferImageGranularity,
5805  uint32_t frameInUseCount,
5806  bool isCustomPool,
5807  bool explicitBlockSize,
5808  uint32_t algorithm);
5809  ~VmaBlockVector();
5810 
5811  VkResult CreateMinBlocks();
5812 
5813  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5814  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5815  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5816  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5817  uint32_t GetAlgorithm() const { return m_Algorithm; }
5818 
5819  void GetPoolStats(VmaPoolStats* pStats);
5820 
5821  bool IsEmpty() const { return m_Blocks.empty(); }
5822  bool IsCorruptionDetectionEnabled() const;
5823 
5824  VkResult Allocate(
5825  VmaPool hCurrentPool,
5826  uint32_t currentFrameIndex,
5827  VkDeviceSize size,
5828  VkDeviceSize alignment,
5829  const VmaAllocationCreateInfo& createInfo,
5830  VmaSuballocationType suballocType,
5831  size_t allocationCount,
5832  VmaAllocation* pAllocations);
5833 
5834  void Free(
5835  VmaAllocation hAllocation);
5836 
5837  // Adds statistics of this BlockVector to pStats.
5838  void AddStats(VmaStats* pStats);
5839 
5840 #if VMA_STATS_STRING_ENABLED
5841  void PrintDetailedMap(class VmaJsonWriter& json);
5842 #endif
5843 
5844  void MakePoolAllocationsLost(
5845  uint32_t currentFrameIndex,
5846  size_t* pLostAllocationCount);
5847  VkResult CheckCorruption();
5848 
5849  // Saves results in pCtx->res.
5850  void Defragment(
5851  class VmaBlockVectorDefragmentationContext* pCtx,
5852  VmaDefragmentationStats* pStats,
5853  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
5854  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
5855  VkCommandBuffer commandBuffer);
5856  void DefragmentationEnd(
5857  class VmaBlockVectorDefragmentationContext* pCtx,
5858  VmaDefragmentationStats* pStats);
5859 
5861  // To be used only while the m_Mutex is locked. Used during defragmentation.
5862 
5863  size_t GetBlockCount() const { return m_Blocks.size(); }
5864  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
5865  size_t CalcAllocationCount() const;
5866  bool IsBufferImageGranularityConflictPossible() const;
5867 
5868 private:
5869  friend class VmaDefragmentationAlgorithm_Generic;
5870 
5871  const VmaAllocator m_hAllocator;
5872  const uint32_t m_MemoryTypeIndex;
5873  const VkDeviceSize m_PreferredBlockSize;
5874  const size_t m_MinBlockCount;
5875  const size_t m_MaxBlockCount;
5876  const VkDeviceSize m_BufferImageGranularity;
5877  const uint32_t m_FrameInUseCount;
5878  const bool m_IsCustomPool;
5879  const bool m_ExplicitBlockSize;
5880  const uint32_t m_Algorithm;
5881  /* There can be at most one allocation that is completely empty - a
5882  hysteresis to avoid pessimistic case of alternating creation and destruction
5883  of a VkDeviceMemory. */
5884  bool m_HasEmptyBlock;
5885  VMA_RW_MUTEX m_Mutex;
5886  // Incrementally sorted by sumFreeSize, ascending.
5887  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5888  uint32_t m_NextBlockId;
5889 
5890  VkDeviceSize CalcMaxBlockSize() const;
5891 
5892  // Finds and removes given block from vector.
5893  void Remove(VmaDeviceMemoryBlock* pBlock);
5894 
5895  // Performs single step in sorting m_Blocks. They may not be fully sorted
5896  // after this call.
5897  void IncrementallySortBlocks();
5898 
5899  VkResult AllocatePage(
5900  VmaPool hCurrentPool,
5901  uint32_t currentFrameIndex,
5902  VkDeviceSize size,
5903  VkDeviceSize alignment,
5904  const VmaAllocationCreateInfo& createInfo,
5905  VmaSuballocationType suballocType,
5906  VmaAllocation* pAllocation);
5907 
5908  // To be used only without CAN_MAKE_OTHER_LOST flag.
5909  VkResult AllocateFromBlock(
5910  VmaDeviceMemoryBlock* pBlock,
5911  VmaPool hCurrentPool,
5912  uint32_t currentFrameIndex,
5913  VkDeviceSize size,
5914  VkDeviceSize alignment,
5915  VmaAllocationCreateFlags allocFlags,
5916  void* pUserData,
5917  VmaSuballocationType suballocType,
5918  uint32_t strategy,
5919  VmaAllocation* pAllocation);
5920 
5921  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5922 
5923  // Saves result to pCtx->res.
5924  void ApplyDefragmentationMovesCpu(
5925  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5926  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
5927  // Saves result to pCtx->res.
5928  void ApplyDefragmentationMovesGpu(
5929  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5930  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5931  VkCommandBuffer commandBuffer);
5932 
5933  /*
5934  Used during defragmentation. pDefragmentationStats is optional. It's in/out
5935  - updated with new data.
5936  */
5937  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
5938 };
5939 
5940 struct VmaPool_T
5941 {
5942  VMA_CLASS_NO_COPY(VmaPool_T)
5943 public:
5944  VmaBlockVector m_BlockVector;
5945 
5946  VmaPool_T(
5947  VmaAllocator hAllocator,
5948  const VmaPoolCreateInfo& createInfo,
5949  VkDeviceSize preferredBlockSize);
5950  ~VmaPool_T();
5951 
5952  uint32_t GetId() const { return m_Id; }
5953  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5954 
5955 #if VMA_STATS_STRING_ENABLED
5956  //void PrintDetailedMap(class VmaStringBuilder& sb);
5957 #endif
5958 
5959 private:
5960  uint32_t m_Id;
5961 };
5962 
5963 /*
5964 Performs defragmentation:
5965 
5966 - Updates `pBlockVector->m_pMetadata`.
5967 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
5968 - Does not move actual data, only returns requested moves as `moves`.
5969 */
5970 class VmaDefragmentationAlgorithm
5971 {
5972  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
5973 public:
5974  VmaDefragmentationAlgorithm(
5975  VmaAllocator hAllocator,
5976  VmaBlockVector* pBlockVector,
5977  uint32_t currentFrameIndex) :
5978  m_hAllocator(hAllocator),
5979  m_pBlockVector(pBlockVector),
5980  m_CurrentFrameIndex(currentFrameIndex)
5981  {
5982  }
5983  virtual ~VmaDefragmentationAlgorithm()
5984  {
5985  }
5986 
5987  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
5988  virtual void AddAll() = 0;
5989 
5990  virtual VkResult Defragment(
5991  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5992  VkDeviceSize maxBytesToMove,
5993  uint32_t maxAllocationsToMove) = 0;
5994 
5995  virtual VkDeviceSize GetBytesMoved() const = 0;
5996  virtual uint32_t GetAllocationsMoved() const = 0;
5997 
5998 protected:
5999  VmaAllocator const m_hAllocator;
6000  VmaBlockVector* const m_pBlockVector;
6001  const uint32_t m_CurrentFrameIndex;
6002 
6003  struct AllocationInfo
6004  {
6005  VmaAllocation m_hAllocation;
6006  VkBool32* m_pChanged;
6007 
6008  AllocationInfo() :
6009  m_hAllocation(VK_NULL_HANDLE),
6010  m_pChanged(VMA_NULL)
6011  {
6012  }
6013  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6014  m_hAllocation(hAlloc),
6015  m_pChanged(pChanged)
6016  {
6017  }
6018  };
6019 };
6020 
6021 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6022 {
6023  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6024 public:
6025  VmaDefragmentationAlgorithm_Generic(
6026  VmaAllocator hAllocator,
6027  VmaBlockVector* pBlockVector,
6028  uint32_t currentFrameIndex,
6029  bool overlappingMoveSupported);
6030  virtual ~VmaDefragmentationAlgorithm_Generic();
6031 
6032  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6033  virtual void AddAll() { m_AllAllocations = true; }
6034 
6035  virtual VkResult Defragment(
6036  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6037  VkDeviceSize maxBytesToMove,
6038  uint32_t maxAllocationsToMove);
6039 
6040  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6041  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6042 
6043 private:
6044  uint32_t m_AllocationCount;
6045  bool m_AllAllocations;
6046 
6047  VkDeviceSize m_BytesMoved;
6048  uint32_t m_AllocationsMoved;
6049 
6050  struct AllocationInfoSizeGreater
6051  {
6052  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6053  {
6054  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6055  }
6056  };
6057 
6058  struct AllocationInfoOffsetGreater
6059  {
6060  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6061  {
6062  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6063  }
6064  };
6065 
6066  struct BlockInfo
6067  {
6068  size_t m_OriginalBlockIndex;
6069  VmaDeviceMemoryBlock* m_pBlock;
6070  bool m_HasNonMovableAllocations;
6071  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6072 
6073  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6074  m_OriginalBlockIndex(SIZE_MAX),
6075  m_pBlock(VMA_NULL),
6076  m_HasNonMovableAllocations(true),
6077  m_Allocations(pAllocationCallbacks)
6078  {
6079  }
6080 
6081  void CalcHasNonMovableAllocations()
6082  {
6083  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6084  const size_t defragmentAllocCount = m_Allocations.size();
6085  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6086  }
6087 
6088  void SortAllocationsBySizeDescending()
6089  {
6090  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6091  }
6092 
6093  void SortAllocationsByOffsetDescending()
6094  {
6095  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6096  }
6097  };
6098 
6099  struct BlockPointerLess
6100  {
6101  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6102  {
6103  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6104  }
6105  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6106  {
6107  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6108  }
6109  };
6110 
6111  // 1. Blocks with some non-movable allocations go first.
6112  // 2. Blocks with smaller sumFreeSize go first.
6113  struct BlockInfoCompareMoveDestination
6114  {
6115  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6116  {
6117  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6118  {
6119  return true;
6120  }
6121  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6122  {
6123  return false;
6124  }
6125  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6126  {
6127  return true;
6128  }
6129  return false;
6130  }
6131  };
6132 
6133  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6134  BlockInfoVector m_Blocks;
6135 
6136  VkResult DefragmentRound(
6137  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6138  VkDeviceSize maxBytesToMove,
6139  uint32_t maxAllocationsToMove);
6140 
6141  size_t CalcBlocksWithNonMovableCount() const;
6142 
6143  static bool MoveMakesSense(
6144  size_t dstBlockIndex, VkDeviceSize dstOffset,
6145  size_t srcBlockIndex, VkDeviceSize srcOffset);
6146 };
6147 
6148 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6149 {
6150  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6151 public:
6152  VmaDefragmentationAlgorithm_Fast(
6153  VmaAllocator hAllocator,
6154  VmaBlockVector* pBlockVector,
6155  uint32_t currentFrameIndex,
6156  bool overlappingMoveSupported);
6157  virtual ~VmaDefragmentationAlgorithm_Fast();
6158 
6159  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6160  virtual void AddAll() { m_AllAllocations = true; }
6161 
6162  virtual VkResult Defragment(
6163  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6164  VkDeviceSize maxBytesToMove,
6165  uint32_t maxAllocationsToMove);
6166 
6167  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6168  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6169 
6170 private:
6171  struct BlockInfo
6172  {
6173  size_t origBlockIndex;
6174  };
6175 
6176  class FreeSpaceDatabase
6177  {
6178  public:
6179  FreeSpaceDatabase()
6180  {
6181  FreeSpace s = {};
6182  s.blockInfoIndex = SIZE_MAX;
6183  for(size_t i = 0; i < MAX_COUNT; ++i)
6184  {
6185  m_FreeSpaces[i] = s;
6186  }
6187  }
6188 
6189  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6190  {
6191  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6192  {
6193  return;
6194  }
6195 
6196  // Find first invalid or the smallest structure.
6197  size_t bestIndex = SIZE_MAX;
6198  for(size_t i = 0; i < MAX_COUNT; ++i)
6199  {
6200  // Empty structure.
6201  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6202  {
6203  bestIndex = i;
6204  break;
6205  }
6206  if(m_FreeSpaces[i].size < size &&
6207  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6208  {
6209  bestIndex = i;
6210  }
6211  }
6212 
6213  if(bestIndex != SIZE_MAX)
6214  {
6215  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6216  m_FreeSpaces[bestIndex].offset = offset;
6217  m_FreeSpaces[bestIndex].size = size;
6218  }
6219  }
6220 
6221  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6222  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6223  {
6224  size_t bestIndex = SIZE_MAX;
6225  VkDeviceSize bestFreeSpaceAfter = 0;
6226  for(size_t i = 0; i < MAX_COUNT; ++i)
6227  {
6228  // Structure is valid.
6229  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6230  {
6231  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6232  // Allocation fits into this structure.
6233  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6234  {
6235  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6236  (dstOffset + size);
6237  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6238  {
6239  bestIndex = i;
6240  bestFreeSpaceAfter = freeSpaceAfter;
6241  }
6242  }
6243  }
6244  }
6245 
6246  if(bestIndex != SIZE_MAX)
6247  {
6248  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6249  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6250 
6251  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6252  {
6253  // Leave this structure for remaining empty space.
6254  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6255  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6256  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6257  }
6258  else
6259  {
6260  // This structure becomes invalid.
6261  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6262  }
6263 
6264  return true;
6265  }
6266 
6267  return false;
6268  }
6269 
6270  private:
6271  static const size_t MAX_COUNT = 4;
6272 
6273  struct FreeSpace
6274  {
6275  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6276  VkDeviceSize offset;
6277  VkDeviceSize size;
6278  } m_FreeSpaces[MAX_COUNT];
6279  };
6280 
6281  const bool m_OverlappingMoveSupported;
6282 
6283  uint32_t m_AllocationCount;
6284  bool m_AllAllocations;
6285 
6286  VkDeviceSize m_BytesMoved;
6287  uint32_t m_AllocationsMoved;
6288 
6289  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6290 
6291  void PreprocessMetadata();
6292  void PostprocessMetadata();
6293  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6294 };
6295 
6296 struct VmaBlockDefragmentationContext
6297 {
6298  enum BLOCK_FLAG
6299  {
6300  BLOCK_FLAG_USED = 0x00000001,
6301  };
6302  uint32_t flags;
6303  VkBuffer hBuffer;
6304 
6305  VmaBlockDefragmentationContext() :
6306  flags(0),
6307  hBuffer(VK_NULL_HANDLE)
6308  {
6309  }
6310 };
6311 
6312 class VmaBlockVectorDefragmentationContext
6313 {
6314  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6315 public:
6316  VkResult res;
6317  bool mutexLocked;
6318  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6319 
6320  VmaBlockVectorDefragmentationContext(
6321  VmaAllocator hAllocator,
6322  VmaPool hCustomPool, // Optional.
6323  VmaBlockVector* pBlockVector,
6324  uint32_t currFrameIndex,
6325  uint32_t flags);
6326  ~VmaBlockVectorDefragmentationContext();
6327 
6328  VmaPool GetCustomPool() const { return m_hCustomPool; }
6329  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6330  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6331 
6332  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6333  void AddAll() { m_AllAllocations = true; }
6334 
6335  void Begin(bool overlappingMoveSupported);
6336 
6337 private:
6338  const VmaAllocator m_hAllocator;
6339  // Null if not from custom pool.
6340  const VmaPool m_hCustomPool;
6341  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6342  VmaBlockVector* const m_pBlockVector;
6343  const uint32_t m_CurrFrameIndex;
6344  const uint32_t m_AlgorithmFlags;
6345  // Owner of this object.
6346  VmaDefragmentationAlgorithm* m_pAlgorithm;
6347 
6348  struct AllocInfo
6349  {
6350  VmaAllocation hAlloc;
6351  VkBool32* pChanged;
6352  };
6353  // Used between constructor and Begin.
6354  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6355  bool m_AllAllocations;
6356 };
6357 
6358 struct VmaDefragmentationContext_T
6359 {
6360 private:
6361  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6362 public:
6363  VmaDefragmentationContext_T(
6364  VmaAllocator hAllocator,
6365  uint32_t currFrameIndex,
6366  uint32_t flags,
6367  VmaDefragmentationStats* pStats);
6368  ~VmaDefragmentationContext_T();
6369 
6370  void AddPools(uint32_t poolCount, VmaPool* pPools);
6371  void AddAllocations(
6372  uint32_t allocationCount,
6373  VmaAllocation* pAllocations,
6374  VkBool32* pAllocationsChanged);
6375 
6376  /*
6377  Returns:
6378  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6379  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6380  - Negative value if error occured and object can be destroyed immediately.
6381  */
6382  VkResult Defragment(
6383  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6384  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6385  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6386 
6387 private:
6388  const VmaAllocator m_hAllocator;
6389  const uint32_t m_CurrFrameIndex;
6390  const uint32_t m_Flags;
6391  VmaDefragmentationStats* const m_pStats;
6392  // Owner of these objects.
6393  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6394  // Owner of these objects.
6395  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6396 };
6397 
6398 #if VMA_RECORDING_ENABLED
6399 
6400 class VmaRecorder
6401 {
6402 public:
6403  VmaRecorder();
6404  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6405  void WriteConfiguration(
6406  const VkPhysicalDeviceProperties& devProps,
6407  const VkPhysicalDeviceMemoryProperties& memProps,
6408  bool dedicatedAllocationExtensionEnabled);
6409  ~VmaRecorder();
6410 
6411  void RecordCreateAllocator(uint32_t frameIndex);
6412  void RecordDestroyAllocator(uint32_t frameIndex);
6413  void RecordCreatePool(uint32_t frameIndex,
6414  const VmaPoolCreateInfo& createInfo,
6415  VmaPool pool);
6416  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6417  void RecordAllocateMemory(uint32_t frameIndex,
6418  const VkMemoryRequirements& vkMemReq,
6419  const VmaAllocationCreateInfo& createInfo,
6420  VmaAllocation allocation);
6421  void RecordAllocateMemoryPages(uint32_t frameIndex,
6422  const VkMemoryRequirements& vkMemReq,
6423  const VmaAllocationCreateInfo& createInfo,
6424  uint64_t allocationCount,
6425  const VmaAllocation* pAllocations);
6426  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6427  const VkMemoryRequirements& vkMemReq,
6428  bool requiresDedicatedAllocation,
6429  bool prefersDedicatedAllocation,
6430  const VmaAllocationCreateInfo& createInfo,
6431  VmaAllocation allocation);
6432  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6433  const VkMemoryRequirements& vkMemReq,
6434  bool requiresDedicatedAllocation,
6435  bool prefersDedicatedAllocation,
6436  const VmaAllocationCreateInfo& createInfo,
6437  VmaAllocation allocation);
6438  void RecordFreeMemory(uint32_t frameIndex,
6439  VmaAllocation allocation);
6440  void RecordFreeMemoryPages(uint32_t frameIndex,
6441  uint64_t allocationCount,
6442  const VmaAllocation* pAllocations);
6443  void RecordResizeAllocation(
6444  uint32_t frameIndex,
6445  VmaAllocation allocation,
6446  VkDeviceSize newSize);
6447  void RecordSetAllocationUserData(uint32_t frameIndex,
6448  VmaAllocation allocation,
6449  const void* pUserData);
6450  void RecordCreateLostAllocation(uint32_t frameIndex,
6451  VmaAllocation allocation);
6452  void RecordMapMemory(uint32_t frameIndex,
6453  VmaAllocation allocation);
6454  void RecordUnmapMemory(uint32_t frameIndex,
6455  VmaAllocation allocation);
6456  void RecordFlushAllocation(uint32_t frameIndex,
6457  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6458  void RecordInvalidateAllocation(uint32_t frameIndex,
6459  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6460  void RecordCreateBuffer(uint32_t frameIndex,
6461  const VkBufferCreateInfo& bufCreateInfo,
6462  const VmaAllocationCreateInfo& allocCreateInfo,
6463  VmaAllocation allocation);
6464  void RecordCreateImage(uint32_t frameIndex,
6465  const VkImageCreateInfo& imageCreateInfo,
6466  const VmaAllocationCreateInfo& allocCreateInfo,
6467  VmaAllocation allocation);
6468  void RecordDestroyBuffer(uint32_t frameIndex,
6469  VmaAllocation allocation);
6470  void RecordDestroyImage(uint32_t frameIndex,
6471  VmaAllocation allocation);
6472  void RecordTouchAllocation(uint32_t frameIndex,
6473  VmaAllocation allocation);
6474  void RecordGetAllocationInfo(uint32_t frameIndex,
6475  VmaAllocation allocation);
6476  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6477  VmaPool pool);
6478  void RecordDefragmentationBegin(uint32_t frameIndex,
6479  const VmaDefragmentationInfo2& info,
6481  void RecordDefragmentationEnd(uint32_t frameIndex,
6483 
6484 private:
6485  struct CallParams
6486  {
6487  uint32_t threadId;
6488  double time;
6489  };
6490 
6491  class UserDataString
6492  {
6493  public:
6494  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6495  const char* GetString() const { return m_Str; }
6496 
6497  private:
6498  char m_PtrStr[17];
6499  const char* m_Str;
6500  };
6501 
6502  bool m_UseMutex;
6503  VmaRecordFlags m_Flags;
6504  FILE* m_File;
6505  VMA_MUTEX m_FileMutex;
6506  int64_t m_Freq;
6507  int64_t m_StartCounter;
6508 
6509  void GetBasicParams(CallParams& outParams);
6510 
6511  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6512  template<typename T>
6513  void PrintPointerList(uint64_t count, const T* pItems)
6514  {
6515  if(count)
6516  {
6517  fprintf(m_File, "%p", pItems[0]);
6518  for(uint64_t i = 1; i < count; ++i)
6519  {
6520  fprintf(m_File, " %p", pItems[i]);
6521  }
6522  }
6523  }
6524 
6525  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6526  void Flush();
6527 };
6528 
6529 #endif // #if VMA_RECORDING_ENABLED
6530 
6531 // Main allocator object.
6532 struct VmaAllocator_T
6533 {
6534  VMA_CLASS_NO_COPY(VmaAllocator_T)
6535 public:
6536  bool m_UseMutex;
6537  bool m_UseKhrDedicatedAllocation;
6538  VkDevice m_hDevice;
6539  bool m_AllocationCallbacksSpecified;
6540  VkAllocationCallbacks m_AllocationCallbacks;
6541  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6542 
6543  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
6544  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6545  VMA_MUTEX m_HeapSizeLimitMutex;
6546 
6547  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6548  VkPhysicalDeviceMemoryProperties m_MemProps;
6549 
6550  // Default pools.
6551  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6552 
6553  // Each vector is sorted by memory (handle value).
6554  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6555  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6556  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6557 
6558  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6559  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6560  ~VmaAllocator_T();
6561 
6562  const VkAllocationCallbacks* GetAllocationCallbacks() const
6563  {
6564  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6565  }
6566  const VmaVulkanFunctions& GetVulkanFunctions() const
6567  {
6568  return m_VulkanFunctions;
6569  }
6570 
6571  VkDeviceSize GetBufferImageGranularity() const
6572  {
6573  return VMA_MAX(
6574  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6575  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6576  }
6577 
6578  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
6579  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6580 
6581  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6582  {
6583  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6584  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6585  }
6586  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
6587  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6588  {
6589  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6590  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6591  }
6592  // Minimum alignment for all allocations in specific memory type.
6593  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6594  {
6595  return IsMemoryTypeNonCoherent(memTypeIndex) ?
6596  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6597  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6598  }
6599 
6600  bool IsIntegratedGpu() const
6601  {
6602  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6603  }
6604 
6605 #if VMA_RECORDING_ENABLED
6606  VmaRecorder* GetRecorder() const { return m_pRecorder; }
6607 #endif
6608 
6609  void GetBufferMemoryRequirements(
6610  VkBuffer hBuffer,
6611  VkMemoryRequirements& memReq,
6612  bool& requiresDedicatedAllocation,
6613  bool& prefersDedicatedAllocation) const;
6614  void GetImageMemoryRequirements(
6615  VkImage hImage,
6616  VkMemoryRequirements& memReq,
6617  bool& requiresDedicatedAllocation,
6618  bool& prefersDedicatedAllocation) const;
6619 
6620  // Main allocation function.
6621  VkResult AllocateMemory(
6622  const VkMemoryRequirements& vkMemReq,
6623  bool requiresDedicatedAllocation,
6624  bool prefersDedicatedAllocation,
6625  VkBuffer dedicatedBuffer,
6626  VkImage dedicatedImage,
6627  const VmaAllocationCreateInfo& createInfo,
6628  VmaSuballocationType suballocType,
6629  size_t allocationCount,
6630  VmaAllocation* pAllocations);
6631 
6632  // Main deallocation function.
6633  void FreeMemory(
6634  size_t allocationCount,
6635  const VmaAllocation* pAllocations);
6636 
6637  VkResult ResizeAllocation(
6638  const VmaAllocation alloc,
6639  VkDeviceSize newSize);
6640 
6641  void CalculateStats(VmaStats* pStats);
6642 
6643 #if VMA_STATS_STRING_ENABLED
6644  void PrintDetailedMap(class VmaJsonWriter& json);
6645 #endif
6646 
6647  VkResult DefragmentationBegin(
6648  const VmaDefragmentationInfo2& info,
6649  VmaDefragmentationStats* pStats,
6650  VmaDefragmentationContext* pContext);
6651  VkResult DefragmentationEnd(
6652  VmaDefragmentationContext context);
6653 
6654  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
6655  bool TouchAllocation(VmaAllocation hAllocation);
6656 
6657  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
6658  void DestroyPool(VmaPool pool);
6659  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
6660 
6661  void SetCurrentFrameIndex(uint32_t frameIndex);
6662  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6663 
6664  void MakePoolAllocationsLost(
6665  VmaPool hPool,
6666  size_t* pLostAllocationCount);
6667  VkResult CheckPoolCorruption(VmaPool hPool);
6668  VkResult CheckCorruption(uint32_t memoryTypeBits);
6669 
6670  void CreateLostAllocation(VmaAllocation* pAllocation);
6671 
6672  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6673  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6674 
6675  VkResult Map(VmaAllocation hAllocation, void** ppData);
6676  void Unmap(VmaAllocation hAllocation);
6677 
6678  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
6679  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
6680 
6681  void FlushOrInvalidateAllocation(
6682  VmaAllocation hAllocation,
6683  VkDeviceSize offset, VkDeviceSize size,
6684  VMA_CACHE_OPERATION op);
6685 
6686  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
6687 
6688 private:
6689  VkDeviceSize m_PreferredLargeHeapBlockSize;
6690 
6691  VkPhysicalDevice m_PhysicalDevice;
6692  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6693 
6694  VMA_RW_MUTEX m_PoolsMutex;
6695  // Protected by m_PoolsMutex. Sorted by pointer value.
6696  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6697  uint32_t m_NextPoolId;
6698 
6699  VmaVulkanFunctions m_VulkanFunctions;
6700 
6701 #if VMA_RECORDING_ENABLED
6702  VmaRecorder* m_pRecorder;
6703 #endif
6704 
6705  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
6706 
6707  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6708 
6709  VkResult AllocateMemoryOfType(
6710  VkDeviceSize size,
6711  VkDeviceSize alignment,
6712  bool dedicatedAllocation,
6713  VkBuffer dedicatedBuffer,
6714  VkImage dedicatedImage,
6715  const VmaAllocationCreateInfo& createInfo,
6716  uint32_t memTypeIndex,
6717  VmaSuballocationType suballocType,
6718  size_t allocationCount,
6719  VmaAllocation* pAllocations);
6720 
6721  // Helper function only to be used inside AllocateDedicatedMemory.
6722  VkResult AllocateDedicatedMemoryPage(
6723  VkDeviceSize size,
6724  VmaSuballocationType suballocType,
6725  uint32_t memTypeIndex,
6726  const VkMemoryAllocateInfo& allocInfo,
6727  bool map,
6728  bool isUserDataString,
6729  void* pUserData,
6730  VmaAllocation* pAllocation);
6731 
6732  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
6733  VkResult AllocateDedicatedMemory(
6734  VkDeviceSize size,
6735  VmaSuballocationType suballocType,
6736  uint32_t memTypeIndex,
6737  bool map,
6738  bool isUserDataString,
6739  void* pUserData,
6740  VkBuffer dedicatedBuffer,
6741  VkImage dedicatedImage,
6742  size_t allocationCount,
6743  VmaAllocation* pAllocations);
6744 
6745  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
6746  void FreeDedicatedMemory(VmaAllocation allocation);
6747 };
6748 
6750 // Memory allocation #2 after VmaAllocator_T definition
6751 
6752 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
6753 {
6754  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6755 }
6756 
6757 static void VmaFree(VmaAllocator hAllocator, void* ptr)
6758 {
6759  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6760 }
6761 
6762 template<typename T>
6763 static T* VmaAllocate(VmaAllocator hAllocator)
6764 {
6765  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
6766 }
6767 
6768 template<typename T>
6769 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
6770 {
6771  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
6772 }
6773 
6774 template<typename T>
6775 static void vma_delete(VmaAllocator hAllocator, T* ptr)
6776 {
6777  if(ptr != VMA_NULL)
6778  {
6779  ptr->~T();
6780  VmaFree(hAllocator, ptr);
6781  }
6782 }
6783 
6784 template<typename T>
6785 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6786 {
6787  if(ptr != VMA_NULL)
6788  {
6789  for(size_t i = count; i--; )
6790  ptr[i].~T();
6791  VmaFree(hAllocator, ptr);
6792  }
6793 }
6794 
6796 // VmaStringBuilder
6797 
6798 #if VMA_STATS_STRING_ENABLED
6799 
6800 class VmaStringBuilder
6801 {
6802 public:
6803  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
6804  size_t GetLength() const { return m_Data.size(); }
6805  const char* GetData() const { return m_Data.data(); }
6806 
6807  void Add(char ch) { m_Data.push_back(ch); }
6808  void Add(const char* pStr);
6809  void AddNewLine() { Add('\n'); }
6810  void AddNumber(uint32_t num);
6811  void AddNumber(uint64_t num);
6812  void AddPointer(const void* ptr);
6813 
6814 private:
6815  VmaVector< char, VmaStlAllocator<char> > m_Data;
6816 };
6817 
6818 void VmaStringBuilder::Add(const char* pStr)
6819 {
6820  const size_t strLen = strlen(pStr);
6821  if(strLen > 0)
6822  {
6823  const size_t oldCount = m_Data.size();
6824  m_Data.resize(oldCount + strLen);
6825  memcpy(m_Data.data() + oldCount, pStr, strLen);
6826  }
6827 }
6828 
6829 void VmaStringBuilder::AddNumber(uint32_t num)
6830 {
6831  char buf[11];
6832  VmaUint32ToStr(buf, sizeof(buf), num);
6833  Add(buf);
6834 }
6835 
6836 void VmaStringBuilder::AddNumber(uint64_t num)
6837 {
6838  char buf[21];
6839  VmaUint64ToStr(buf, sizeof(buf), num);
6840  Add(buf);
6841 }
6842 
6843 void VmaStringBuilder::AddPointer(const void* ptr)
6844 {
6845  char buf[21];
6846  VmaPtrToStr(buf, sizeof(buf), ptr);
6847  Add(buf);
6848 }
6849 
6850 #endif // #if VMA_STATS_STRING_ENABLED
6851 
6853 // VmaJsonWriter
6854 
6855 #if VMA_STATS_STRING_ENABLED
6856 
6857 class VmaJsonWriter
6858 {
6859  VMA_CLASS_NO_COPY(VmaJsonWriter)
6860 public:
6861  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6862  ~VmaJsonWriter();
6863 
6864  void BeginObject(bool singleLine = false);
6865  void EndObject();
6866 
6867  void BeginArray(bool singleLine = false);
6868  void EndArray();
6869 
6870  void WriteString(const char* pStr);
6871  void BeginString(const char* pStr = VMA_NULL);
6872  void ContinueString(const char* pStr);
6873  void ContinueString(uint32_t n);
6874  void ContinueString(uint64_t n);
6875  void ContinueString_Pointer(const void* ptr);
6876  void EndString(const char* pStr = VMA_NULL);
6877 
6878  void WriteNumber(uint32_t n);
6879  void WriteNumber(uint64_t n);
6880  void WriteBool(bool b);
6881  void WriteNull();
6882 
6883 private:
6884  static const char* const INDENT;
6885 
6886  enum COLLECTION_TYPE
6887  {
6888  COLLECTION_TYPE_OBJECT,
6889  COLLECTION_TYPE_ARRAY,
6890  };
6891  struct StackItem
6892  {
6893  COLLECTION_TYPE type;
6894  uint32_t valueCount;
6895  bool singleLineMode;
6896  };
6897 
6898  VmaStringBuilder& m_SB;
6899  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6900  bool m_InsideString;
6901 
6902  void BeginValue(bool isString);
6903  void WriteIndent(bool oneLess = false);
6904 };
6905 
6906 const char* const VmaJsonWriter::INDENT = " ";
6907 
6908 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6909  m_SB(sb),
6910  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6911  m_InsideString(false)
6912 {
6913 }
6914 
6915 VmaJsonWriter::~VmaJsonWriter()
6916 {
6917  VMA_ASSERT(!m_InsideString);
6918  VMA_ASSERT(m_Stack.empty());
6919 }
6920 
6921 void VmaJsonWriter::BeginObject(bool singleLine)
6922 {
6923  VMA_ASSERT(!m_InsideString);
6924 
6925  BeginValue(false);
6926  m_SB.Add('{');
6927 
6928  StackItem item;
6929  item.type = COLLECTION_TYPE_OBJECT;
6930  item.valueCount = 0;
6931  item.singleLineMode = singleLine;
6932  m_Stack.push_back(item);
6933 }
6934 
6935 void VmaJsonWriter::EndObject()
6936 {
6937  VMA_ASSERT(!m_InsideString);
6938 
6939  WriteIndent(true);
6940  m_SB.Add('}');
6941 
6942  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6943  m_Stack.pop_back();
6944 }
6945 
6946 void VmaJsonWriter::BeginArray(bool singleLine)
6947 {
6948  VMA_ASSERT(!m_InsideString);
6949 
6950  BeginValue(false);
6951  m_SB.Add('[');
6952 
6953  StackItem item;
6954  item.type = COLLECTION_TYPE_ARRAY;
6955  item.valueCount = 0;
6956  item.singleLineMode = singleLine;
6957  m_Stack.push_back(item);
6958 }
6959 
6960 void VmaJsonWriter::EndArray()
6961 {
6962  VMA_ASSERT(!m_InsideString);
6963 
6964  WriteIndent(true);
6965  m_SB.Add(']');
6966 
6967  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6968  m_Stack.pop_back();
6969 }
6970 
6971 void VmaJsonWriter::WriteString(const char* pStr)
6972 {
6973  BeginString(pStr);
6974  EndString();
6975 }
6976 
6977 void VmaJsonWriter::BeginString(const char* pStr)
6978 {
6979  VMA_ASSERT(!m_InsideString);
6980 
6981  BeginValue(true);
6982  m_SB.Add('"');
6983  m_InsideString = true;
6984  if(pStr != VMA_NULL && pStr[0] != '\0')
6985  {
6986  ContinueString(pStr);
6987  }
6988 }
6989 
6990 void VmaJsonWriter::ContinueString(const char* pStr)
6991 {
6992  VMA_ASSERT(m_InsideString);
6993 
6994  const size_t strLen = strlen(pStr);
6995  for(size_t i = 0; i < strLen; ++i)
6996  {
6997  char ch = pStr[i];
6998  if(ch == '\\')
6999  {
7000  m_SB.Add("\\\\");
7001  }
7002  else if(ch == '"')
7003  {
7004  m_SB.Add("\\\"");
7005  }
7006  else if(ch >= 32)
7007  {
7008  m_SB.Add(ch);
7009  }
7010  else switch(ch)
7011  {
7012  case '\b':
7013  m_SB.Add("\\b");
7014  break;
7015  case '\f':
7016  m_SB.Add("\\f");
7017  break;
7018  case '\n':
7019  m_SB.Add("\\n");
7020  break;
7021  case '\r':
7022  m_SB.Add("\\r");
7023  break;
7024  case '\t':
7025  m_SB.Add("\\t");
7026  break;
7027  default:
7028  VMA_ASSERT(0 && "Character not currently supported.");
7029  break;
7030  }
7031  }
7032 }
7033 
7034 void VmaJsonWriter::ContinueString(uint32_t n)
7035 {
7036  VMA_ASSERT(m_InsideString);
7037  m_SB.AddNumber(n);
7038 }
7039 
7040 void VmaJsonWriter::ContinueString(uint64_t n)
7041 {
7042  VMA_ASSERT(m_InsideString);
7043  m_SB.AddNumber(n);
7044 }
7045 
7046 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7047 {
7048  VMA_ASSERT(m_InsideString);
7049  m_SB.AddPointer(ptr);
7050 }
7051 
7052 void VmaJsonWriter::EndString(const char* pStr)
7053 {
7054  VMA_ASSERT(m_InsideString);
7055  if(pStr != VMA_NULL && pStr[0] != '\0')
7056  {
7057  ContinueString(pStr);
7058  }
7059  m_SB.Add('"');
7060  m_InsideString = false;
7061 }
7062 
7063 void VmaJsonWriter::WriteNumber(uint32_t n)
7064 {
7065  VMA_ASSERT(!m_InsideString);
7066  BeginValue(false);
7067  m_SB.AddNumber(n);
7068 }
7069 
7070 void VmaJsonWriter::WriteNumber(uint64_t n)
7071 {
7072  VMA_ASSERT(!m_InsideString);
7073  BeginValue(false);
7074  m_SB.AddNumber(n);
7075 }
7076 
7077 void VmaJsonWriter::WriteBool(bool b)
7078 {
7079  VMA_ASSERT(!m_InsideString);
7080  BeginValue(false);
7081  m_SB.Add(b ? "true" : "false");
7082 }
7083 
7084 void VmaJsonWriter::WriteNull()
7085 {
7086  VMA_ASSERT(!m_InsideString);
7087  BeginValue(false);
7088  m_SB.Add("null");
7089 }
7090 
7091 void VmaJsonWriter::BeginValue(bool isString)
7092 {
7093  if(!m_Stack.empty())
7094  {
7095  StackItem& currItem = m_Stack.back();
7096  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7097  currItem.valueCount % 2 == 0)
7098  {
7099  VMA_ASSERT(isString);
7100  }
7101 
7102  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7103  currItem.valueCount % 2 != 0)
7104  {
7105  m_SB.Add(": ");
7106  }
7107  else if(currItem.valueCount > 0)
7108  {
7109  m_SB.Add(", ");
7110  WriteIndent();
7111  }
7112  else
7113  {
7114  WriteIndent();
7115  }
7116  ++currItem.valueCount;
7117  }
7118 }
7119 
7120 void VmaJsonWriter::WriteIndent(bool oneLess)
7121 {
7122  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7123  {
7124  m_SB.AddNewLine();
7125 
7126  size_t count = m_Stack.size();
7127  if(count > 0 && oneLess)
7128  {
7129  --count;
7130  }
7131  for(size_t i = 0; i < count; ++i)
7132  {
7133  m_SB.Add(INDENT);
7134  }
7135  }
7136 }
7137 
7138 #endif // #if VMA_STATS_STRING_ENABLED
7139 
7141 
7142 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7143 {
7144  if(IsUserDataString())
7145  {
7146  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7147 
7148  FreeUserDataString(hAllocator);
7149 
7150  if(pUserData != VMA_NULL)
7151  {
7152  const char* const newStrSrc = (char*)pUserData;
7153  const size_t newStrLen = strlen(newStrSrc);
7154  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
7155  memcpy(newStrDst, newStrSrc, newStrLen + 1);
7156  m_pUserData = newStrDst;
7157  }
7158  }
7159  else
7160  {
7161  m_pUserData = pUserData;
7162  }
7163 }
7164 
7165 void VmaAllocation_T::ChangeBlockAllocation(
7166  VmaAllocator hAllocator,
7167  VmaDeviceMemoryBlock* block,
7168  VkDeviceSize offset)
7169 {
7170  VMA_ASSERT(block != VMA_NULL);
7171  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7172 
7173  // Move mapping reference counter from old block to new block.
7174  if(block != m_BlockAllocation.m_Block)
7175  {
7176  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7177  if(IsPersistentMap())
7178  ++mapRefCount;
7179  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7180  block->Map(hAllocator, mapRefCount, VMA_NULL);
7181  }
7182 
7183  m_BlockAllocation.m_Block = block;
7184  m_BlockAllocation.m_Offset = offset;
7185 }
7186 
7187 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
7188 {
7189  VMA_ASSERT(newSize > 0);
7190  m_Size = newSize;
7191 }
7192 
7193 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7194 {
7195  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7196  m_BlockAllocation.m_Offset = newOffset;
7197 }
7198 
7199 VkDeviceSize VmaAllocation_T::GetOffset() const
7200 {
7201  switch(m_Type)
7202  {
7203  case ALLOCATION_TYPE_BLOCK:
7204  return m_BlockAllocation.m_Offset;
7205  case ALLOCATION_TYPE_DEDICATED:
7206  return 0;
7207  default:
7208  VMA_ASSERT(0);
7209  return 0;
7210  }
7211 }
7212 
7213 VkDeviceMemory VmaAllocation_T::GetMemory() const
7214 {
7215  switch(m_Type)
7216  {
7217  case ALLOCATION_TYPE_BLOCK:
7218  return m_BlockAllocation.m_Block->GetDeviceMemory();
7219  case ALLOCATION_TYPE_DEDICATED:
7220  return m_DedicatedAllocation.m_hMemory;
7221  default:
7222  VMA_ASSERT(0);
7223  return VK_NULL_HANDLE;
7224  }
7225 }
7226 
7227 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7228 {
7229  switch(m_Type)
7230  {
7231  case ALLOCATION_TYPE_BLOCK:
7232  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7233  case ALLOCATION_TYPE_DEDICATED:
7234  return m_DedicatedAllocation.m_MemoryTypeIndex;
7235  default:
7236  VMA_ASSERT(0);
7237  return UINT32_MAX;
7238  }
7239 }
7240 
7241 void* VmaAllocation_T::GetMappedData() const
7242 {
7243  switch(m_Type)
7244  {
7245  case ALLOCATION_TYPE_BLOCK:
7246  if(m_MapCount != 0)
7247  {
7248  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7249  VMA_ASSERT(pBlockData != VMA_NULL);
7250  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7251  }
7252  else
7253  {
7254  return VMA_NULL;
7255  }
7256  break;
7257  case ALLOCATION_TYPE_DEDICATED:
7258  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7259  return m_DedicatedAllocation.m_pMappedData;
7260  default:
7261  VMA_ASSERT(0);
7262  return VMA_NULL;
7263  }
7264 }
7265 
7266 bool VmaAllocation_T::CanBecomeLost() const
7267 {
7268  switch(m_Type)
7269  {
7270  case ALLOCATION_TYPE_BLOCK:
7271  return m_BlockAllocation.m_CanBecomeLost;
7272  case ALLOCATION_TYPE_DEDICATED:
7273  return false;
7274  default:
7275  VMA_ASSERT(0);
7276  return false;
7277  }
7278 }
7279 
7280 VmaPool VmaAllocation_T::GetPool() const
7281 {
7282  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7283  return m_BlockAllocation.m_hPool;
7284 }
7285 
7286 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7287 {
7288  VMA_ASSERT(CanBecomeLost());
7289 
7290  /*
7291  Warning: This is a carefully designed algorithm.
7292  Do not modify unless you really know what you're doing :)
7293  */
7294  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7295  for(;;)
7296  {
7297  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7298  {
7299  VMA_ASSERT(0);
7300  return false;
7301  }
7302  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7303  {
7304  return false;
7305  }
7306  else // Last use time earlier than current time.
7307  {
7308  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7309  {
7310  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7311  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7312  return true;
7313  }
7314  }
7315  }
7316 }
7317 
7318 #if VMA_STATS_STRING_ENABLED
7319 
7320 // Correspond to values of enum VmaSuballocationType.
7321 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7322  "FREE",
7323  "UNKNOWN",
7324  "BUFFER",
7325  "IMAGE_UNKNOWN",
7326  "IMAGE_LINEAR",
7327  "IMAGE_OPTIMAL",
7328 };
7329 
7330 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7331 {
7332  json.WriteString("Type");
7333  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7334 
7335  json.WriteString("Size");
7336  json.WriteNumber(m_Size);
7337 
7338  if(m_pUserData != VMA_NULL)
7339  {
7340  json.WriteString("UserData");
7341  if(IsUserDataString())
7342  {
7343  json.WriteString((const char*)m_pUserData);
7344  }
7345  else
7346  {
7347  json.BeginString();
7348  json.ContinueString_Pointer(m_pUserData);
7349  json.EndString();
7350  }
7351  }
7352 
7353  json.WriteString("CreationFrameIndex");
7354  json.WriteNumber(m_CreationFrameIndex);
7355 
7356  json.WriteString("LastUseFrameIndex");
7357  json.WriteNumber(GetLastUseFrameIndex());
7358 
7359  if(m_BufferImageUsage != 0)
7360  {
7361  json.WriteString("Usage");
7362  json.WriteNumber(m_BufferImageUsage);
7363  }
7364 }
7365 
7366 #endif
7367 
7368 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7369 {
7370  VMA_ASSERT(IsUserDataString());
7371  if(m_pUserData != VMA_NULL)
7372  {
7373  char* const oldStr = (char*)m_pUserData;
7374  const size_t oldStrLen = strlen(oldStr);
7375  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7376  m_pUserData = VMA_NULL;
7377  }
7378 }
7379 
7380 void VmaAllocation_T::BlockAllocMap()
7381 {
7382  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7383 
7384  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7385  {
7386  ++m_MapCount;
7387  }
7388  else
7389  {
7390  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7391  }
7392 }
7393 
7394 void VmaAllocation_T::BlockAllocUnmap()
7395 {
7396  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7397 
7398  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7399  {
7400  --m_MapCount;
7401  }
7402  else
7403  {
7404  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7405  }
7406 }
7407 
7408 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7409 {
7410  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7411 
7412  if(m_MapCount != 0)
7413  {
7414  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7415  {
7416  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7417  *ppData = m_DedicatedAllocation.m_pMappedData;
7418  ++m_MapCount;
7419  return VK_SUCCESS;
7420  }
7421  else
7422  {
7423  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7424  return VK_ERROR_MEMORY_MAP_FAILED;
7425  }
7426  }
7427  else
7428  {
7429  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7430  hAllocator->m_hDevice,
7431  m_DedicatedAllocation.m_hMemory,
7432  0, // offset
7433  VK_WHOLE_SIZE,
7434  0, // flags
7435  ppData);
7436  if(result == VK_SUCCESS)
7437  {
7438  m_DedicatedAllocation.m_pMappedData = *ppData;
7439  m_MapCount = 1;
7440  }
7441  return result;
7442  }
7443 }
7444 
7445 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7446 {
7447  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7448 
7449  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7450  {
7451  --m_MapCount;
7452  if(m_MapCount == 0)
7453  {
7454  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7455  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7456  hAllocator->m_hDevice,
7457  m_DedicatedAllocation.m_hMemory);
7458  }
7459  }
7460  else
7461  {
7462  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7463  }
7464 }
7465 
7466 #if VMA_STATS_STRING_ENABLED
7467 
7468 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7469 {
7470  json.BeginObject();
7471 
7472  json.WriteString("Blocks");
7473  json.WriteNumber(stat.blockCount);
7474 
7475  json.WriteString("Allocations");
7476  json.WriteNumber(stat.allocationCount);
7477 
7478  json.WriteString("UnusedRanges");
7479  json.WriteNumber(stat.unusedRangeCount);
7480 
7481  json.WriteString("UsedBytes");
7482  json.WriteNumber(stat.usedBytes);
7483 
7484  json.WriteString("UnusedBytes");
7485  json.WriteNumber(stat.unusedBytes);
7486 
7487  if(stat.allocationCount > 1)
7488  {
7489  json.WriteString("AllocationSize");
7490  json.BeginObject(true);
7491  json.WriteString("Min");
7492  json.WriteNumber(stat.allocationSizeMin);
7493  json.WriteString("Avg");
7494  json.WriteNumber(stat.allocationSizeAvg);
7495  json.WriteString("Max");
7496  json.WriteNumber(stat.allocationSizeMax);
7497  json.EndObject();
7498  }
7499 
7500  if(stat.unusedRangeCount > 1)
7501  {
7502  json.WriteString("UnusedRangeSize");
7503  json.BeginObject(true);
7504  json.WriteString("Min");
7505  json.WriteNumber(stat.unusedRangeSizeMin);
7506  json.WriteString("Avg");
7507  json.WriteNumber(stat.unusedRangeSizeAvg);
7508  json.WriteString("Max");
7509  json.WriteNumber(stat.unusedRangeSizeMax);
7510  json.EndObject();
7511  }
7512 
7513  json.EndObject();
7514 }
7515 
7516 #endif // #if VMA_STATS_STRING_ENABLED
7517 
7518 struct VmaSuballocationItemSizeLess
7519 {
7520  bool operator()(
7521  const VmaSuballocationList::iterator lhs,
7522  const VmaSuballocationList::iterator rhs) const
7523  {
7524  return lhs->size < rhs->size;
7525  }
7526  bool operator()(
7527  const VmaSuballocationList::iterator lhs,
7528  VkDeviceSize rhsSize) const
7529  {
7530  return lhs->size < rhsSize;
7531  }
7532 };
7533 
7534 
7536 // class VmaBlockMetadata
7537 
7538 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7539  m_Size(0),
7540  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7541 {
7542 }
7543 
7544 #if VMA_STATS_STRING_ENABLED
7545 
7546 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7547  VkDeviceSize unusedBytes,
7548  size_t allocationCount,
7549  size_t unusedRangeCount) const
7550 {
7551  json.BeginObject();
7552 
7553  json.WriteString("TotalBytes");
7554  json.WriteNumber(GetSize());
7555 
7556  json.WriteString("UnusedBytes");
7557  json.WriteNumber(unusedBytes);
7558 
7559  json.WriteString("Allocations");
7560  json.WriteNumber((uint64_t)allocationCount);
7561 
7562  json.WriteString("UnusedRanges");
7563  json.WriteNumber((uint64_t)unusedRangeCount);
7564 
7565  json.WriteString("Suballocations");
7566  json.BeginArray();
7567 }
7568 
7569 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
7570  VkDeviceSize offset,
7571  VmaAllocation hAllocation) const
7572 {
7573  json.BeginObject(true);
7574 
7575  json.WriteString("Offset");
7576  json.WriteNumber(offset);
7577 
7578  hAllocation->PrintParameters(json);
7579 
7580  json.EndObject();
7581 }
7582 
7583 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
7584  VkDeviceSize offset,
7585  VkDeviceSize size) const
7586 {
7587  json.BeginObject(true);
7588 
7589  json.WriteString("Offset");
7590  json.WriteNumber(offset);
7591 
7592  json.WriteString("Type");
7593  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7594 
7595  json.WriteString("Size");
7596  json.WriteNumber(size);
7597 
7598  json.EndObject();
7599 }
7600 
7601 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
7602 {
7603  json.EndArray();
7604  json.EndObject();
7605 }
7606 
7607 #endif // #if VMA_STATS_STRING_ENABLED
7608 
7610 // class VmaBlockMetadata_Generic
7611 
7612 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
7613  VmaBlockMetadata(hAllocator),
7614  m_FreeCount(0),
7615  m_SumFreeSize(0),
7616  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7617  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7618 {
7619 }
7620 
7621 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7622 {
7623 }
7624 
7625 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7626 {
7627  VmaBlockMetadata::Init(size);
7628 
7629  m_FreeCount = 1;
7630  m_SumFreeSize = size;
7631 
7632  VmaSuballocation suballoc = {};
7633  suballoc.offset = 0;
7634  suballoc.size = size;
7635  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7636  suballoc.hAllocation = VK_NULL_HANDLE;
7637 
7638  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7639  m_Suballocations.push_back(suballoc);
7640  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7641  --suballocItem;
7642  m_FreeSuballocationsBySize.push_back(suballocItem);
7643 }
7644 
7645 bool VmaBlockMetadata_Generic::Validate() const
7646 {
7647  VMA_VALIDATE(!m_Suballocations.empty());
7648 
7649  // Expected offset of new suballocation as calculated from previous ones.
7650  VkDeviceSize calculatedOffset = 0;
7651  // Expected number of free suballocations as calculated from traversing their list.
7652  uint32_t calculatedFreeCount = 0;
7653  // Expected sum size of free suballocations as calculated from traversing their list.
7654  VkDeviceSize calculatedSumFreeSize = 0;
7655  // Expected number of free suballocations that should be registered in
7656  // m_FreeSuballocationsBySize calculated from traversing their list.
7657  size_t freeSuballocationsToRegister = 0;
7658  // True if previous visited suballocation was free.
7659  bool prevFree = false;
7660 
7661  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7662  suballocItem != m_Suballocations.cend();
7663  ++suballocItem)
7664  {
7665  const VmaSuballocation& subAlloc = *suballocItem;
7666 
7667  // Actual offset of this suballocation doesn't match expected one.
7668  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7669 
7670  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7671  // Two adjacent free suballocations are invalid. They should be merged.
7672  VMA_VALIDATE(!prevFree || !currFree);
7673 
7674  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7675 
7676  if(currFree)
7677  {
7678  calculatedSumFreeSize += subAlloc.size;
7679  ++calculatedFreeCount;
7680  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7681  {
7682  ++freeSuballocationsToRegister;
7683  }
7684 
7685  // Margin required between allocations - every free space must be at least that large.
7686  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7687  }
7688  else
7689  {
7690  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7691  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7692 
7693  // Margin required between allocations - previous allocation must be free.
7694  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7695  }
7696 
7697  calculatedOffset += subAlloc.size;
7698  prevFree = currFree;
7699  }
7700 
7701  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
7702  // match expected one.
7703  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7704 
7705  VkDeviceSize lastSize = 0;
7706  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7707  {
7708  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7709 
7710  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
7711  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7712  // They must be sorted by size ascending.
7713  VMA_VALIDATE(suballocItem->size >= lastSize);
7714 
7715  lastSize = suballocItem->size;
7716  }
7717 
7718  // Check if totals match calculacted values.
7719  VMA_VALIDATE(ValidateFreeSuballocationList());
7720  VMA_VALIDATE(calculatedOffset == GetSize());
7721  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7722  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7723 
7724  return true;
7725 }
7726 
7727 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
7728 {
7729  if(!m_FreeSuballocationsBySize.empty())
7730  {
7731  return m_FreeSuballocationsBySize.back()->size;
7732  }
7733  else
7734  {
7735  return 0;
7736  }
7737 }
7738 
7739 bool VmaBlockMetadata_Generic::IsEmpty() const
7740 {
7741  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7742 }
7743 
7744 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7745 {
7746  outInfo.blockCount = 1;
7747 
7748  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7749  outInfo.allocationCount = rangeCount - m_FreeCount;
7750  outInfo.unusedRangeCount = m_FreeCount;
7751 
7752  outInfo.unusedBytes = m_SumFreeSize;
7753  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
7754 
7755  outInfo.allocationSizeMin = UINT64_MAX;
7756  outInfo.allocationSizeMax = 0;
7757  outInfo.unusedRangeSizeMin = UINT64_MAX;
7758  outInfo.unusedRangeSizeMax = 0;
7759 
7760  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7761  suballocItem != m_Suballocations.cend();
7762  ++suballocItem)
7763  {
7764  const VmaSuballocation& suballoc = *suballocItem;
7765  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7766  {
7767  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7768  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
7769  }
7770  else
7771  {
7772  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
7773  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
7774  }
7775  }
7776 }
7777 
7778 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
7779 {
7780  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7781 
7782  inoutStats.size += GetSize();
7783  inoutStats.unusedSize += m_SumFreeSize;
7784  inoutStats.allocationCount += rangeCount - m_FreeCount;
7785  inoutStats.unusedRangeCount += m_FreeCount;
7786  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
7787 }
7788 
7789 #if VMA_STATS_STRING_ENABLED
7790 
7791 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7792 {
7793  PrintDetailedMap_Begin(json,
7794  m_SumFreeSize, // unusedBytes
7795  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7796  m_FreeCount); // unusedRangeCount
7797 
7798  size_t i = 0;
7799  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7800  suballocItem != m_Suballocations.cend();
7801  ++suballocItem, ++i)
7802  {
7803  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7804  {
7805  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
7806  }
7807  else
7808  {
7809  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
7810  }
7811  }
7812 
7813  PrintDetailedMap_End(json);
7814 }
7815 
7816 #endif // #if VMA_STATS_STRING_ENABLED
7817 
7818 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7819  uint32_t currentFrameIndex,
7820  uint32_t frameInUseCount,
7821  VkDeviceSize bufferImageGranularity,
7822  VkDeviceSize allocSize,
7823  VkDeviceSize allocAlignment,
7824  bool upperAddress,
7825  VmaSuballocationType allocType,
7826  bool canMakeOtherLost,
7827  uint32_t strategy,
7828  VmaAllocationRequest* pAllocationRequest)
7829 {
7830  VMA_ASSERT(allocSize > 0);
7831  VMA_ASSERT(!upperAddress);
7832  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7833  VMA_ASSERT(pAllocationRequest != VMA_NULL);
7834  VMA_HEAVY_ASSERT(Validate());
7835 
7836  // There is not enough total free space in this block to fullfill the request: Early return.
7837  if(canMakeOtherLost == false &&
7838  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
7839  {
7840  return false;
7841  }
7842 
7843  // New algorithm, efficiently searching freeSuballocationsBySize.
7844  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7845  if(freeSuballocCount > 0)
7846  {
7848  {
7849  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7850  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7851  m_FreeSuballocationsBySize.data(),
7852  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7853  allocSize + 2 * VMA_DEBUG_MARGIN,
7854  VmaSuballocationItemSizeLess());
7855  size_t index = it - m_FreeSuballocationsBySize.data();
7856  for(; index < freeSuballocCount; ++index)
7857  {
7858  if(CheckAllocation(
7859  currentFrameIndex,
7860  frameInUseCount,
7861  bufferImageGranularity,
7862  allocSize,
7863  allocAlignment,
7864  allocType,
7865  m_FreeSuballocationsBySize[index],
7866  false, // canMakeOtherLost
7867  &pAllocationRequest->offset,
7868  &pAllocationRequest->itemsToMakeLostCount,
7869  &pAllocationRequest->sumFreeSize,
7870  &pAllocationRequest->sumItemSize))
7871  {
7872  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7873  return true;
7874  }
7875  }
7876  }
7877  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7878  {
7879  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7880  it != m_Suballocations.end();
7881  ++it)
7882  {
7883  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
7884  currentFrameIndex,
7885  frameInUseCount,
7886  bufferImageGranularity,
7887  allocSize,
7888  allocAlignment,
7889  allocType,
7890  it,
7891  false, // canMakeOtherLost
7892  &pAllocationRequest->offset,
7893  &pAllocationRequest->itemsToMakeLostCount,
7894  &pAllocationRequest->sumFreeSize,
7895  &pAllocationRequest->sumItemSize))
7896  {
7897  pAllocationRequest->item = it;
7898  return true;
7899  }
7900  }
7901  }
7902  else // WORST_FIT, FIRST_FIT
7903  {
7904  // Search staring from biggest suballocations.
7905  for(size_t index = freeSuballocCount; index--; )
7906  {
7907  if(CheckAllocation(
7908  currentFrameIndex,
7909  frameInUseCount,
7910  bufferImageGranularity,
7911  allocSize,
7912  allocAlignment,
7913  allocType,
7914  m_FreeSuballocationsBySize[index],
7915  false, // canMakeOtherLost
7916  &pAllocationRequest->offset,
7917  &pAllocationRequest->itemsToMakeLostCount,
7918  &pAllocationRequest->sumFreeSize,
7919  &pAllocationRequest->sumItemSize))
7920  {
7921  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7922  return true;
7923  }
7924  }
7925  }
7926  }
7927 
7928  if(canMakeOtherLost)
7929  {
7930  // Brute-force algorithm. TODO: Come up with something better.
7931 
7932  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
7933  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
7934 
7935  VmaAllocationRequest tmpAllocRequest = {};
7936  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
7937  suballocIt != m_Suballocations.end();
7938  ++suballocIt)
7939  {
7940  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
7941  suballocIt->hAllocation->CanBecomeLost())
7942  {
7943  if(CheckAllocation(
7944  currentFrameIndex,
7945  frameInUseCount,
7946  bufferImageGranularity,
7947  allocSize,
7948  allocAlignment,
7949  allocType,
7950  suballocIt,
7951  canMakeOtherLost,
7952  &tmpAllocRequest.offset,
7953  &tmpAllocRequest.itemsToMakeLostCount,
7954  &tmpAllocRequest.sumFreeSize,
7955  &tmpAllocRequest.sumItemSize))
7956  {
7957  tmpAllocRequest.item = suballocIt;
7958 
7959  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7961  {
7962  *pAllocationRequest = tmpAllocRequest;
7963  }
7964  }
7965  }
7966  }
7967 
7968  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7969  {
7970  return true;
7971  }
7972  }
7973 
7974  return false;
7975 }
7976 
7977 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7978  uint32_t currentFrameIndex,
7979  uint32_t frameInUseCount,
7980  VmaAllocationRequest* pAllocationRequest)
7981 {
7982  while(pAllocationRequest->itemsToMakeLostCount > 0)
7983  {
7984  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7985  {
7986  ++pAllocationRequest->item;
7987  }
7988  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7989  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7990  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7991  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7992  {
7993  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7994  --pAllocationRequest->itemsToMakeLostCount;
7995  }
7996  else
7997  {
7998  return false;
7999  }
8000  }
8001 
8002  VMA_HEAVY_ASSERT(Validate());
8003  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8004  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8005 
8006  return true;
8007 }
8008 
8009 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8010 {
8011  uint32_t lostAllocationCount = 0;
8012  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8013  it != m_Suballocations.end();
8014  ++it)
8015  {
8016  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8017  it->hAllocation->CanBecomeLost() &&
8018  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8019  {
8020  it = FreeSuballocation(it);
8021  ++lostAllocationCount;
8022  }
8023  }
8024  return lostAllocationCount;
8025 }
8026 
8027 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8028 {
8029  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8030  it != m_Suballocations.end();
8031  ++it)
8032  {
8033  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8034  {
8035  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8036  {
8037  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8038  return VK_ERROR_VALIDATION_FAILED_EXT;
8039  }
8040  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8041  {
8042  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8043  return VK_ERROR_VALIDATION_FAILED_EXT;
8044  }
8045  }
8046  }
8047 
8048  return VK_SUCCESS;
8049 }
8050 
8051 void VmaBlockMetadata_Generic::Alloc(
8052  const VmaAllocationRequest& request,
8053  VmaSuballocationType type,
8054  VkDeviceSize allocSize,
8055  bool upperAddress,
8056  VmaAllocation hAllocation)
8057 {
8058  VMA_ASSERT(!upperAddress);
8059  VMA_ASSERT(request.item != m_Suballocations.end());
8060  VmaSuballocation& suballoc = *request.item;
8061  // Given suballocation is a free block.
8062  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8063  // Given offset is inside this suballocation.
8064  VMA_ASSERT(request.offset >= suballoc.offset);
8065  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8066  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8067  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8068 
8069  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8070  // it to become used.
8071  UnregisterFreeSuballocation(request.item);
8072 
8073  suballoc.offset = request.offset;
8074  suballoc.size = allocSize;
8075  suballoc.type = type;
8076  suballoc.hAllocation = hAllocation;
8077 
8078  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8079  if(paddingEnd)
8080  {
8081  VmaSuballocation paddingSuballoc = {};
8082  paddingSuballoc.offset = request.offset + allocSize;
8083  paddingSuballoc.size = paddingEnd;
8084  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8085  VmaSuballocationList::iterator next = request.item;
8086  ++next;
8087  const VmaSuballocationList::iterator paddingEndItem =
8088  m_Suballocations.insert(next, paddingSuballoc);
8089  RegisterFreeSuballocation(paddingEndItem);
8090  }
8091 
8092  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8093  if(paddingBegin)
8094  {
8095  VmaSuballocation paddingSuballoc = {};
8096  paddingSuballoc.offset = request.offset - paddingBegin;
8097  paddingSuballoc.size = paddingBegin;
8098  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8099  const VmaSuballocationList::iterator paddingBeginItem =
8100  m_Suballocations.insert(request.item, paddingSuballoc);
8101  RegisterFreeSuballocation(paddingBeginItem);
8102  }
8103 
8104  // Update totals.
8105  m_FreeCount = m_FreeCount - 1;
8106  if(paddingBegin > 0)
8107  {
8108  ++m_FreeCount;
8109  }
8110  if(paddingEnd > 0)
8111  {
8112  ++m_FreeCount;
8113  }
8114  m_SumFreeSize -= allocSize;
8115 }
8116 
8117 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8118 {
8119  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8120  suballocItem != m_Suballocations.end();
8121  ++suballocItem)
8122  {
8123  VmaSuballocation& suballoc = *suballocItem;
8124  if(suballoc.hAllocation == allocation)
8125  {
8126  FreeSuballocation(suballocItem);
8127  VMA_HEAVY_ASSERT(Validate());
8128  return;
8129  }
8130  }
8131  VMA_ASSERT(0 && "Not found!");
8132 }
8133 
8134 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8135 {
8136  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8137  suballocItem != m_Suballocations.end();
8138  ++suballocItem)
8139  {
8140  VmaSuballocation& suballoc = *suballocItem;
8141  if(suballoc.offset == offset)
8142  {
8143  FreeSuballocation(suballocItem);
8144  return;
8145  }
8146  }
8147  VMA_ASSERT(0 && "Not found!");
8148 }
8149 
8150 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
8151 {
8152  typedef VmaSuballocationList::iterator iter_type;
8153  for(iter_type suballocItem = m_Suballocations.begin();
8154  suballocItem != m_Suballocations.end();
8155  ++suballocItem)
8156  {
8157  VmaSuballocation& suballoc = *suballocItem;
8158  if(suballoc.hAllocation == alloc)
8159  {
8160  iter_type nextItem = suballocItem;
8161  ++nextItem;
8162 
8163  // Should have been ensured on higher level.
8164  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
8165 
8166  // Shrinking.
8167  if(newSize < alloc->GetSize())
8168  {
8169  const VkDeviceSize sizeDiff = suballoc.size - newSize;
8170 
8171  // There is next item.
8172  if(nextItem != m_Suballocations.end())
8173  {
8174  // Next item is free.
8175  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8176  {
8177  // Grow this next item backward.
8178  UnregisterFreeSuballocation(nextItem);
8179  nextItem->offset -= sizeDiff;
8180  nextItem->size += sizeDiff;
8181  RegisterFreeSuballocation(nextItem);
8182  }
8183  // Next item is not free.
8184  else
8185  {
8186  // Create free item after current one.
8187  VmaSuballocation newFreeSuballoc;
8188  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8189  newFreeSuballoc.offset = suballoc.offset + newSize;
8190  newFreeSuballoc.size = sizeDiff;
8191  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8192  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
8193  RegisterFreeSuballocation(newFreeSuballocIt);
8194 
8195  ++m_FreeCount;
8196  }
8197  }
8198  // This is the last item.
8199  else
8200  {
8201  // Create free item at the end.
8202  VmaSuballocation newFreeSuballoc;
8203  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8204  newFreeSuballoc.offset = suballoc.offset + newSize;
8205  newFreeSuballoc.size = sizeDiff;
8206  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8207  m_Suballocations.push_back(newFreeSuballoc);
8208 
8209  iter_type newFreeSuballocIt = m_Suballocations.end();
8210  RegisterFreeSuballocation(--newFreeSuballocIt);
8211 
8212  ++m_FreeCount;
8213  }
8214 
8215  suballoc.size = newSize;
8216  m_SumFreeSize += sizeDiff;
8217  }
8218  // Growing.
8219  else
8220  {
8221  const VkDeviceSize sizeDiff = newSize - suballoc.size;
8222 
8223  // There is next item.
8224  if(nextItem != m_Suballocations.end())
8225  {
8226  // Next item is free.
8227  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8228  {
8229  // There is not enough free space, including margin.
8230  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
8231  {
8232  return false;
8233  }
8234 
8235  // There is more free space than required.
8236  if(nextItem->size > sizeDiff)
8237  {
8238  // Move and shrink this next item.
8239  UnregisterFreeSuballocation(nextItem);
8240  nextItem->offset += sizeDiff;
8241  nextItem->size -= sizeDiff;
8242  RegisterFreeSuballocation(nextItem);
8243  }
8244  // There is exactly the amount of free space required.
8245  else
8246  {
8247  // Remove this next free item.
8248  UnregisterFreeSuballocation(nextItem);
8249  m_Suballocations.erase(nextItem);
8250  --m_FreeCount;
8251  }
8252  }
8253  // Next item is not free - there is no space to grow.
8254  else
8255  {
8256  return false;
8257  }
8258  }
8259  // This is the last item - there is no space to grow.
8260  else
8261  {
8262  return false;
8263  }
8264 
8265  suballoc.size = newSize;
8266  m_SumFreeSize -= sizeDiff;
8267  }
8268 
8269  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
8270  return true;
8271  }
8272  }
8273  VMA_ASSERT(0 && "Not found!");
8274  return false;
8275 }
8276 
8277 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8278 {
8279  VkDeviceSize lastSize = 0;
8280  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8281  {
8282  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8283 
8284  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8285  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8286  VMA_VALIDATE(it->size >= lastSize);
8287  lastSize = it->size;
8288  }
8289  return true;
8290 }
8291 
8292 bool VmaBlockMetadata_Generic::CheckAllocation(
8293  uint32_t currentFrameIndex,
8294  uint32_t frameInUseCount,
8295  VkDeviceSize bufferImageGranularity,
8296  VkDeviceSize allocSize,
8297  VkDeviceSize allocAlignment,
8298  VmaSuballocationType allocType,
8299  VmaSuballocationList::const_iterator suballocItem,
8300  bool canMakeOtherLost,
8301  VkDeviceSize* pOffset,
8302  size_t* itemsToMakeLostCount,
8303  VkDeviceSize* pSumFreeSize,
8304  VkDeviceSize* pSumItemSize) const
8305 {
8306  VMA_ASSERT(allocSize > 0);
8307  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8308  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8309  VMA_ASSERT(pOffset != VMA_NULL);
8310 
8311  *itemsToMakeLostCount = 0;
8312  *pSumFreeSize = 0;
8313  *pSumItemSize = 0;
8314 
8315  if(canMakeOtherLost)
8316  {
8317  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8318  {
8319  *pSumFreeSize = suballocItem->size;
8320  }
8321  else
8322  {
8323  if(suballocItem->hAllocation->CanBecomeLost() &&
8324  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8325  {
8326  ++*itemsToMakeLostCount;
8327  *pSumItemSize = suballocItem->size;
8328  }
8329  else
8330  {
8331  return false;
8332  }
8333  }
8334 
8335  // Remaining size is too small for this request: Early return.
8336  if(GetSize() - suballocItem->offset < allocSize)
8337  {
8338  return false;
8339  }
8340 
8341  // Start from offset equal to beginning of this suballocation.
8342  *pOffset = suballocItem->offset;
8343 
8344  // Apply VMA_DEBUG_MARGIN at the beginning.
8345  if(VMA_DEBUG_MARGIN > 0)
8346  {
8347  *pOffset += VMA_DEBUG_MARGIN;
8348  }
8349 
8350  // Apply alignment.
8351  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8352 
8353  // Check previous suballocations for BufferImageGranularity conflicts.
8354  // Make bigger alignment if necessary.
8355  if(bufferImageGranularity > 1)
8356  {
8357  bool bufferImageGranularityConflict = false;
8358  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8359  while(prevSuballocItem != m_Suballocations.cbegin())
8360  {
8361  --prevSuballocItem;
8362  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8363  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8364  {
8365  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8366  {
8367  bufferImageGranularityConflict = true;
8368  break;
8369  }
8370  }
8371  else
8372  // Already on previous page.
8373  break;
8374  }
8375  if(bufferImageGranularityConflict)
8376  {
8377  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8378  }
8379  }
8380 
8381  // Now that we have final *pOffset, check if we are past suballocItem.
8382  // If yes, return false - this function should be called for another suballocItem as starting point.
8383  if(*pOffset >= suballocItem->offset + suballocItem->size)
8384  {
8385  return false;
8386  }
8387 
8388  // Calculate padding at the beginning based on current offset.
8389  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8390 
8391  // Calculate required margin at the end.
8392  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8393 
8394  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8395  // Another early return check.
8396  if(suballocItem->offset + totalSize > GetSize())
8397  {
8398  return false;
8399  }
8400 
8401  // Advance lastSuballocItem until desired size is reached.
8402  // Update itemsToMakeLostCount.
8403  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8404  if(totalSize > suballocItem->size)
8405  {
8406  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8407  while(remainingSize > 0)
8408  {
8409  ++lastSuballocItem;
8410  if(lastSuballocItem == m_Suballocations.cend())
8411  {
8412  return false;
8413  }
8414  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8415  {
8416  *pSumFreeSize += lastSuballocItem->size;
8417  }
8418  else
8419  {
8420  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8421  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8422  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8423  {
8424  ++*itemsToMakeLostCount;
8425  *pSumItemSize += lastSuballocItem->size;
8426  }
8427  else
8428  {
8429  return false;
8430  }
8431  }
8432  remainingSize = (lastSuballocItem->size < remainingSize) ?
8433  remainingSize - lastSuballocItem->size : 0;
8434  }
8435  }
8436 
8437  // Check next suballocations for BufferImageGranularity conflicts.
8438  // If conflict exists, we must mark more allocations lost or fail.
8439  if(bufferImageGranularity > 1)
8440  {
8441  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8442  ++nextSuballocItem;
8443  while(nextSuballocItem != m_Suballocations.cend())
8444  {
8445  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8446  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8447  {
8448  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8449  {
8450  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8451  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8452  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8453  {
8454  ++*itemsToMakeLostCount;
8455  }
8456  else
8457  {
8458  return false;
8459  }
8460  }
8461  }
8462  else
8463  {
8464  // Already on next page.
8465  break;
8466  }
8467  ++nextSuballocItem;
8468  }
8469  }
8470  }
8471  else
8472  {
8473  const VmaSuballocation& suballoc = *suballocItem;
8474  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8475 
8476  *pSumFreeSize = suballoc.size;
8477 
8478  // Size of this suballocation is too small for this request: Early return.
8479  if(suballoc.size < allocSize)
8480  {
8481  return false;
8482  }
8483 
8484  // Start from offset equal to beginning of this suballocation.
8485  *pOffset = suballoc.offset;
8486 
8487  // Apply VMA_DEBUG_MARGIN at the beginning.
8488  if(VMA_DEBUG_MARGIN > 0)
8489  {
8490  *pOffset += VMA_DEBUG_MARGIN;
8491  }
8492 
8493  // Apply alignment.
8494  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8495 
8496  // Check previous suballocations for BufferImageGranularity conflicts.
8497  // Make bigger alignment if necessary.
8498  if(bufferImageGranularity > 1)
8499  {
8500  bool bufferImageGranularityConflict = false;
8501  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8502  while(prevSuballocItem != m_Suballocations.cbegin())
8503  {
8504  --prevSuballocItem;
8505  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8506  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8507  {
8508  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8509  {
8510  bufferImageGranularityConflict = true;
8511  break;
8512  }
8513  }
8514  else
8515  // Already on previous page.
8516  break;
8517  }
8518  if(bufferImageGranularityConflict)
8519  {
8520  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8521  }
8522  }
8523 
8524  // Calculate padding at the beginning based on current offset.
8525  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8526 
8527  // Calculate required margin at the end.
8528  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8529 
8530  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8531  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8532  {
8533  return false;
8534  }
8535 
8536  // Check next suballocations for BufferImageGranularity conflicts.
8537  // If conflict exists, allocation cannot be made here.
8538  if(bufferImageGranularity > 1)
8539  {
8540  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8541  ++nextSuballocItem;
8542  while(nextSuballocItem != m_Suballocations.cend())
8543  {
8544  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8545  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8546  {
8547  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8548  {
8549  return false;
8550  }
8551  }
8552  else
8553  {
8554  // Already on next page.
8555  break;
8556  }
8557  ++nextSuballocItem;
8558  }
8559  }
8560  }
8561 
8562  // All tests passed: Success. pOffset is already filled.
8563  return true;
8564 }
8565 
8566 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8567 {
8568  VMA_ASSERT(item != m_Suballocations.end());
8569  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8570 
8571  VmaSuballocationList::iterator nextItem = item;
8572  ++nextItem;
8573  VMA_ASSERT(nextItem != m_Suballocations.end());
8574  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8575 
8576  item->size += nextItem->size;
8577  --m_FreeCount;
8578  m_Suballocations.erase(nextItem);
8579 }
8580 
8581 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8582 {
8583  // Change this suballocation to be marked as free.
8584  VmaSuballocation& suballoc = *suballocItem;
8585  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8586  suballoc.hAllocation = VK_NULL_HANDLE;
8587 
8588  // Update totals.
8589  ++m_FreeCount;
8590  m_SumFreeSize += suballoc.size;
8591 
8592  // Merge with previous and/or next suballocation if it's also free.
8593  bool mergeWithNext = false;
8594  bool mergeWithPrev = false;
8595 
8596  VmaSuballocationList::iterator nextItem = suballocItem;
8597  ++nextItem;
8598  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8599  {
8600  mergeWithNext = true;
8601  }
8602 
8603  VmaSuballocationList::iterator prevItem = suballocItem;
8604  if(suballocItem != m_Suballocations.begin())
8605  {
8606  --prevItem;
8607  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8608  {
8609  mergeWithPrev = true;
8610  }
8611  }
8612 
8613  if(mergeWithNext)
8614  {
8615  UnregisterFreeSuballocation(nextItem);
8616  MergeFreeWithNext(suballocItem);
8617  }
8618 
8619  if(mergeWithPrev)
8620  {
8621  UnregisterFreeSuballocation(prevItem);
8622  MergeFreeWithNext(prevItem);
8623  RegisterFreeSuballocation(prevItem);
8624  return prevItem;
8625  }
8626  else
8627  {
8628  RegisterFreeSuballocation(suballocItem);
8629  return suballocItem;
8630  }
8631 }
8632 
8633 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8634 {
8635  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8636  VMA_ASSERT(item->size > 0);
8637 
8638  // You may want to enable this validation at the beginning or at the end of
8639  // this function, depending on what do you want to check.
8640  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8641 
8642  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8643  {
8644  if(m_FreeSuballocationsBySize.empty())
8645  {
8646  m_FreeSuballocationsBySize.push_back(item);
8647  }
8648  else
8649  {
8650  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8651  }
8652  }
8653 
8654  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8655 }
8656 
8657 
8658 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8659 {
8660  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8661  VMA_ASSERT(item->size > 0);
8662 
8663  // You may want to enable this validation at the beginning or at the end of
8664  // this function, depending on what do you want to check.
8665  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8666 
8667  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8668  {
8669  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8670  m_FreeSuballocationsBySize.data(),
8671  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8672  item,
8673  VmaSuballocationItemSizeLess());
8674  for(size_t index = it - m_FreeSuballocationsBySize.data();
8675  index < m_FreeSuballocationsBySize.size();
8676  ++index)
8677  {
8678  if(m_FreeSuballocationsBySize[index] == item)
8679  {
8680  VmaVectorRemove(m_FreeSuballocationsBySize, index);
8681  return;
8682  }
8683  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8684  }
8685  VMA_ASSERT(0 && "Not found.");
8686  }
8687 
8688  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8689 }
8690 
8691 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8692  VkDeviceSize bufferImageGranularity,
8693  VmaSuballocationType& inOutPrevSuballocType) const
8694 {
8695  if(bufferImageGranularity == 1 || IsEmpty())
8696  {
8697  return false;
8698  }
8699 
8700  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8701  bool typeConflictFound = false;
8702  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8703  it != m_Suballocations.cend();
8704  ++it)
8705  {
8706  const VmaSuballocationType suballocType = it->type;
8707  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8708  {
8709  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8710  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8711  {
8712  typeConflictFound = true;
8713  }
8714  inOutPrevSuballocType = suballocType;
8715  }
8716  }
8717 
8718  return typeConflictFound || minAlignment >= bufferImageGranularity;
8719 }
8720 
8722 // class VmaBlockMetadata_Linear
8723 
8724 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
8725  VmaBlockMetadata(hAllocator),
8726  m_SumFreeSize(0),
8727  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8728  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8729  m_1stVectorIndex(0),
8730  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8731  m_1stNullItemsBeginCount(0),
8732  m_1stNullItemsMiddleCount(0),
8733  m_2ndNullItemsCount(0)
8734 {
8735 }
8736 
8737 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8738 {
8739 }
8740 
8741 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8742 {
8743  VmaBlockMetadata::Init(size);
8744  m_SumFreeSize = size;
8745 }
8746 
8747 bool VmaBlockMetadata_Linear::Validate() const
8748 {
8749  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8750  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8751 
8752  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8753  VMA_VALIDATE(!suballocations1st.empty() ||
8754  suballocations2nd.empty() ||
8755  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8756 
8757  if(!suballocations1st.empty())
8758  {
8759  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
8760  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8761  // Null item at the end should be just pop_back().
8762  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8763  }
8764  if(!suballocations2nd.empty())
8765  {
8766  // Null item at the end should be just pop_back().
8767  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8768  }
8769 
8770  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8771  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8772 
8773  VkDeviceSize sumUsedSize = 0;
8774  const size_t suballoc1stCount = suballocations1st.size();
8775  VkDeviceSize offset = VMA_DEBUG_MARGIN;
8776 
8777  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8778  {
8779  const size_t suballoc2ndCount = suballocations2nd.size();
8780  size_t nullItem2ndCount = 0;
8781  for(size_t i = 0; i < suballoc2ndCount; ++i)
8782  {
8783  const VmaSuballocation& suballoc = suballocations2nd[i];
8784  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8785 
8786  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8787  VMA_VALIDATE(suballoc.offset >= offset);
8788 
8789  if(!currFree)
8790  {
8791  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8792  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8793  sumUsedSize += suballoc.size;
8794  }
8795  else
8796  {
8797  ++nullItem2ndCount;
8798  }
8799 
8800  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8801  }
8802 
8803  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8804  }
8805 
8806  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8807  {
8808  const VmaSuballocation& suballoc = suballocations1st[i];
8809  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8810  suballoc.hAllocation == VK_NULL_HANDLE);
8811  }
8812 
8813  size_t nullItem1stCount = m_1stNullItemsBeginCount;
8814 
8815  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8816  {
8817  const VmaSuballocation& suballoc = suballocations1st[i];
8818  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8819 
8820  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8821  VMA_VALIDATE(suballoc.offset >= offset);
8822  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8823 
8824  if(!currFree)
8825  {
8826  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8827  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8828  sumUsedSize += suballoc.size;
8829  }
8830  else
8831  {
8832  ++nullItem1stCount;
8833  }
8834 
8835  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8836  }
8837  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8838 
8839  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8840  {
8841  const size_t suballoc2ndCount = suballocations2nd.size();
8842  size_t nullItem2ndCount = 0;
8843  for(size_t i = suballoc2ndCount; i--; )
8844  {
8845  const VmaSuballocation& suballoc = suballocations2nd[i];
8846  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8847 
8848  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8849  VMA_VALIDATE(suballoc.offset >= offset);
8850 
8851  if(!currFree)
8852  {
8853  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8854  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8855  sumUsedSize += suballoc.size;
8856  }
8857  else
8858  {
8859  ++nullItem2ndCount;
8860  }
8861 
8862  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8863  }
8864 
8865  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8866  }
8867 
8868  VMA_VALIDATE(offset <= GetSize());
8869  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
8870 
8871  return true;
8872 }
8873 
8874 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
8875 {
8876  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
8877  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
8878 }
8879 
8880 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
8881 {
8882  const VkDeviceSize size = GetSize();
8883 
8884  /*
8885  We don't consider gaps inside allocation vectors with freed allocations because
8886  they are not suitable for reuse in linear allocator. We consider only space that
8887  is available for new allocations.
8888  */
8889  if(IsEmpty())
8890  {
8891  return size;
8892  }
8893 
8894  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8895 
8896  switch(m_2ndVectorMode)
8897  {
8898  case SECOND_VECTOR_EMPTY:
8899  /*
8900  Available space is after end of 1st, as well as before beginning of 1st (which
8901  whould make it a ring buffer).
8902  */
8903  {
8904  const size_t suballocations1stCount = suballocations1st.size();
8905  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
8906  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8907  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
8908  return VMA_MAX(
8909  firstSuballoc.offset,
8910  size - (lastSuballoc.offset + lastSuballoc.size));
8911  }
8912  break;
8913 
8914  case SECOND_VECTOR_RING_BUFFER:
8915  /*
8916  Available space is only between end of 2nd and beginning of 1st.
8917  */
8918  {
8919  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8920  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
8921  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
8922  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
8923  }
8924  break;
8925 
8926  case SECOND_VECTOR_DOUBLE_STACK:
8927  /*
8928  Available space is only between end of 1st and top of 2nd.
8929  */
8930  {
8931  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8932  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
8933  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
8934  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
8935  }
8936  break;
8937 
8938  default:
8939  VMA_ASSERT(0);
8940  return 0;
8941  }
8942 }
8943 
8944 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8945 {
8946  const VkDeviceSize size = GetSize();
8947  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8948  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8949  const size_t suballoc1stCount = suballocations1st.size();
8950  const size_t suballoc2ndCount = suballocations2nd.size();
8951 
8952  outInfo.blockCount = 1;
8953  outInfo.allocationCount = (uint32_t)GetAllocationCount();
8954  outInfo.unusedRangeCount = 0;
8955  outInfo.usedBytes = 0;
8956  outInfo.allocationSizeMin = UINT64_MAX;
8957  outInfo.allocationSizeMax = 0;
8958  outInfo.unusedRangeSizeMin = UINT64_MAX;
8959  outInfo.unusedRangeSizeMax = 0;
8960 
8961  VkDeviceSize lastOffset = 0;
8962 
8963  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8964  {
8965  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8966  size_t nextAlloc2ndIndex = 0;
8967  while(lastOffset < freeSpace2ndTo1stEnd)
8968  {
8969  // Find next non-null allocation or move nextAllocIndex to the end.
8970  while(nextAlloc2ndIndex < suballoc2ndCount &&
8971  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8972  {
8973  ++nextAlloc2ndIndex;
8974  }
8975 
8976  // Found non-null allocation.
8977  if(nextAlloc2ndIndex < suballoc2ndCount)
8978  {
8979  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8980 
8981  // 1. Process free space before this allocation.
8982  if(lastOffset < suballoc.offset)
8983  {
8984  // There is free space from lastOffset to suballoc.offset.
8985  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8986  ++outInfo.unusedRangeCount;
8987  outInfo.unusedBytes += unusedRangeSize;
8988  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8989  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8990  }
8991 
8992  // 2. Process this allocation.
8993  // There is allocation with suballoc.offset, suballoc.size.
8994  outInfo.usedBytes += suballoc.size;
8995  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8996  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8997 
8998  // 3. Prepare for next iteration.
8999  lastOffset = suballoc.offset + suballoc.size;
9000  ++nextAlloc2ndIndex;
9001  }
9002  // We are at the end.
9003  else
9004  {
9005  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9006  if(lastOffset < freeSpace2ndTo1stEnd)
9007  {
9008  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9009  ++outInfo.unusedRangeCount;
9010  outInfo.unusedBytes += unusedRangeSize;
9011  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9012  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9013  }
9014 
9015  // End of loop.
9016  lastOffset = freeSpace2ndTo1stEnd;
9017  }
9018  }
9019  }
9020 
9021  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9022  const VkDeviceSize freeSpace1stTo2ndEnd =
9023  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9024  while(lastOffset < freeSpace1stTo2ndEnd)
9025  {
9026  // Find next non-null allocation or move nextAllocIndex to the end.
9027  while(nextAlloc1stIndex < suballoc1stCount &&
9028  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9029  {
9030  ++nextAlloc1stIndex;
9031  }
9032 
9033  // Found non-null allocation.
9034  if(nextAlloc1stIndex < suballoc1stCount)
9035  {
9036  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9037 
9038  // 1. Process free space before this allocation.
9039  if(lastOffset < suballoc.offset)
9040  {
9041  // There is free space from lastOffset to suballoc.offset.
9042  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9043  ++outInfo.unusedRangeCount;
9044  outInfo.unusedBytes += unusedRangeSize;
9045  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9046  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9047  }
9048 
9049  // 2. Process this allocation.
9050  // There is allocation with suballoc.offset, suballoc.size.
9051  outInfo.usedBytes += suballoc.size;
9052  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9053  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9054 
9055  // 3. Prepare for next iteration.
9056  lastOffset = suballoc.offset + suballoc.size;
9057  ++nextAlloc1stIndex;
9058  }
9059  // We are at the end.
9060  else
9061  {
9062  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9063  if(lastOffset < freeSpace1stTo2ndEnd)
9064  {
9065  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9066  ++outInfo.unusedRangeCount;
9067  outInfo.unusedBytes += unusedRangeSize;
9068  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9069  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9070  }
9071 
9072  // End of loop.
9073  lastOffset = freeSpace1stTo2ndEnd;
9074  }
9075  }
9076 
9077  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9078  {
9079  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9080  while(lastOffset < size)
9081  {
9082  // Find next non-null allocation or move nextAllocIndex to the end.
9083  while(nextAlloc2ndIndex != SIZE_MAX &&
9084  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9085  {
9086  --nextAlloc2ndIndex;
9087  }
9088 
9089  // Found non-null allocation.
9090  if(nextAlloc2ndIndex != SIZE_MAX)
9091  {
9092  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9093 
9094  // 1. Process free space before this allocation.
9095  if(lastOffset < suballoc.offset)
9096  {
9097  // There is free space from lastOffset to suballoc.offset.
9098  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9099  ++outInfo.unusedRangeCount;
9100  outInfo.unusedBytes += unusedRangeSize;
9101  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9102  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9103  }
9104 
9105  // 2. Process this allocation.
9106  // There is allocation with suballoc.offset, suballoc.size.
9107  outInfo.usedBytes += suballoc.size;
9108  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9109  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9110 
9111  // 3. Prepare for next iteration.
9112  lastOffset = suballoc.offset + suballoc.size;
9113  --nextAlloc2ndIndex;
9114  }
9115  // We are at the end.
9116  else
9117  {
9118  // There is free space from lastOffset to size.
9119  if(lastOffset < size)
9120  {
9121  const VkDeviceSize unusedRangeSize = size - lastOffset;
9122  ++outInfo.unusedRangeCount;
9123  outInfo.unusedBytes += unusedRangeSize;
9124  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9125  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9126  }
9127 
9128  // End of loop.
9129  lastOffset = size;
9130  }
9131  }
9132  }
9133 
9134  outInfo.unusedBytes = size - outInfo.usedBytes;
9135 }
9136 
9137 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9138 {
9139  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9140  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9141  const VkDeviceSize size = GetSize();
9142  const size_t suballoc1stCount = suballocations1st.size();
9143  const size_t suballoc2ndCount = suballocations2nd.size();
9144 
9145  inoutStats.size += size;
9146 
9147  VkDeviceSize lastOffset = 0;
9148 
9149  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9150  {
9151  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9152  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9153  while(lastOffset < freeSpace2ndTo1stEnd)
9154  {
9155  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9156  while(nextAlloc2ndIndex < suballoc2ndCount &&
9157  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9158  {
9159  ++nextAlloc2ndIndex;
9160  }
9161 
9162  // Found non-null allocation.
9163  if(nextAlloc2ndIndex < suballoc2ndCount)
9164  {
9165  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9166 
9167  // 1. Process free space before this allocation.
9168  if(lastOffset < suballoc.offset)
9169  {
9170  // There is free space from lastOffset to suballoc.offset.
9171  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9172  inoutStats.unusedSize += unusedRangeSize;
9173  ++inoutStats.unusedRangeCount;
9174  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9175  }
9176 
9177  // 2. Process this allocation.
9178  // There is allocation with suballoc.offset, suballoc.size.
9179  ++inoutStats.allocationCount;
9180 
9181  // 3. Prepare for next iteration.
9182  lastOffset = suballoc.offset + suballoc.size;
9183  ++nextAlloc2ndIndex;
9184  }
9185  // We are at the end.
9186  else
9187  {
9188  if(lastOffset < freeSpace2ndTo1stEnd)
9189  {
9190  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9191  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9192  inoutStats.unusedSize += unusedRangeSize;
9193  ++inoutStats.unusedRangeCount;
9194  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9195  }
9196 
9197  // End of loop.
9198  lastOffset = freeSpace2ndTo1stEnd;
9199  }
9200  }
9201  }
9202 
9203  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9204  const VkDeviceSize freeSpace1stTo2ndEnd =
9205  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9206  while(lastOffset < freeSpace1stTo2ndEnd)
9207  {
9208  // Find next non-null allocation or move nextAllocIndex to the end.
9209  while(nextAlloc1stIndex < suballoc1stCount &&
9210  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9211  {
9212  ++nextAlloc1stIndex;
9213  }
9214 
9215  // Found non-null allocation.
9216  if(nextAlloc1stIndex < suballoc1stCount)
9217  {
9218  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9219 
9220  // 1. Process free space before this allocation.
9221  if(lastOffset < suballoc.offset)
9222  {
9223  // There is free space from lastOffset to suballoc.offset.
9224  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9225  inoutStats.unusedSize += unusedRangeSize;
9226  ++inoutStats.unusedRangeCount;
9227  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9228  }
9229 
9230  // 2. Process this allocation.
9231  // There is allocation with suballoc.offset, suballoc.size.
9232  ++inoutStats.allocationCount;
9233 
9234  // 3. Prepare for next iteration.
9235  lastOffset = suballoc.offset + suballoc.size;
9236  ++nextAlloc1stIndex;
9237  }
9238  // We are at the end.
9239  else
9240  {
9241  if(lastOffset < freeSpace1stTo2ndEnd)
9242  {
9243  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9244  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9245  inoutStats.unusedSize += unusedRangeSize;
9246  ++inoutStats.unusedRangeCount;
9247  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9248  }
9249 
9250  // End of loop.
9251  lastOffset = freeSpace1stTo2ndEnd;
9252  }
9253  }
9254 
9255  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9256  {
9257  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9258  while(lastOffset < size)
9259  {
9260  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9261  while(nextAlloc2ndIndex != SIZE_MAX &&
9262  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9263  {
9264  --nextAlloc2ndIndex;
9265  }
9266 
9267  // Found non-null allocation.
9268  if(nextAlloc2ndIndex != SIZE_MAX)
9269  {
9270  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9271 
9272  // 1. Process free space before this allocation.
9273  if(lastOffset < suballoc.offset)
9274  {
9275  // There is free space from lastOffset to suballoc.offset.
9276  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9277  inoutStats.unusedSize += unusedRangeSize;
9278  ++inoutStats.unusedRangeCount;
9279  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9280  }
9281 
9282  // 2. Process this allocation.
9283  // There is allocation with suballoc.offset, suballoc.size.
9284  ++inoutStats.allocationCount;
9285 
9286  // 3. Prepare for next iteration.
9287  lastOffset = suballoc.offset + suballoc.size;
9288  --nextAlloc2ndIndex;
9289  }
9290  // We are at the end.
9291  else
9292  {
9293  if(lastOffset < size)
9294  {
9295  // There is free space from lastOffset to size.
9296  const VkDeviceSize unusedRangeSize = size - lastOffset;
9297  inoutStats.unusedSize += unusedRangeSize;
9298  ++inoutStats.unusedRangeCount;
9299  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9300  }
9301 
9302  // End of loop.
9303  lastOffset = size;
9304  }
9305  }
9306  }
9307 }
9308 
9309 #if VMA_STATS_STRING_ENABLED
9310 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9311 {
9312  const VkDeviceSize size = GetSize();
9313  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9314  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9315  const size_t suballoc1stCount = suballocations1st.size();
9316  const size_t suballoc2ndCount = suballocations2nd.size();
9317 
9318  // FIRST PASS
9319 
9320  size_t unusedRangeCount = 0;
9321  VkDeviceSize usedBytes = 0;
9322 
9323  VkDeviceSize lastOffset = 0;
9324 
9325  size_t alloc2ndCount = 0;
9326  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9327  {
9328  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9329  size_t nextAlloc2ndIndex = 0;
9330  while(lastOffset < freeSpace2ndTo1stEnd)
9331  {
9332  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9333  while(nextAlloc2ndIndex < suballoc2ndCount &&
9334  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9335  {
9336  ++nextAlloc2ndIndex;
9337  }
9338 
9339  // Found non-null allocation.
9340  if(nextAlloc2ndIndex < suballoc2ndCount)
9341  {
9342  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9343 
9344  // 1. Process free space before this allocation.
9345  if(lastOffset < suballoc.offset)
9346  {
9347  // There is free space from lastOffset to suballoc.offset.
9348  ++unusedRangeCount;
9349  }
9350 
9351  // 2. Process this allocation.
9352  // There is allocation with suballoc.offset, suballoc.size.
9353  ++alloc2ndCount;
9354  usedBytes += suballoc.size;
9355 
9356  // 3. Prepare for next iteration.
9357  lastOffset = suballoc.offset + suballoc.size;
9358  ++nextAlloc2ndIndex;
9359  }
9360  // We are at the end.
9361  else
9362  {
9363  if(lastOffset < freeSpace2ndTo1stEnd)
9364  {
9365  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9366  ++unusedRangeCount;
9367  }
9368 
9369  // End of loop.
9370  lastOffset = freeSpace2ndTo1stEnd;
9371  }
9372  }
9373  }
9374 
9375  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9376  size_t alloc1stCount = 0;
9377  const VkDeviceSize freeSpace1stTo2ndEnd =
9378  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9379  while(lastOffset < freeSpace1stTo2ndEnd)
9380  {
9381  // Find next non-null allocation or move nextAllocIndex to the end.
9382  while(nextAlloc1stIndex < suballoc1stCount &&
9383  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9384  {
9385  ++nextAlloc1stIndex;
9386  }
9387 
9388  // Found non-null allocation.
9389  if(nextAlloc1stIndex < suballoc1stCount)
9390  {
9391  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9392 
9393  // 1. Process free space before this allocation.
9394  if(lastOffset < suballoc.offset)
9395  {
9396  // There is free space from lastOffset to suballoc.offset.
9397  ++unusedRangeCount;
9398  }
9399 
9400  // 2. Process this allocation.
9401  // There is allocation with suballoc.offset, suballoc.size.
9402  ++alloc1stCount;
9403  usedBytes += suballoc.size;
9404 
9405  // 3. Prepare for next iteration.
9406  lastOffset = suballoc.offset + suballoc.size;
9407  ++nextAlloc1stIndex;
9408  }
9409  // We are at the end.
9410  else
9411  {
9412  if(lastOffset < size)
9413  {
9414  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9415  ++unusedRangeCount;
9416  }
9417 
9418  // End of loop.
9419  lastOffset = freeSpace1stTo2ndEnd;
9420  }
9421  }
9422 
9423  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9424  {
9425  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9426  while(lastOffset < size)
9427  {
9428  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9429  while(nextAlloc2ndIndex != SIZE_MAX &&
9430  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9431  {
9432  --nextAlloc2ndIndex;
9433  }
9434 
9435  // Found non-null allocation.
9436  if(nextAlloc2ndIndex != SIZE_MAX)
9437  {
9438  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9439 
9440  // 1. Process free space before this allocation.
9441  if(lastOffset < suballoc.offset)
9442  {
9443  // There is free space from lastOffset to suballoc.offset.
9444  ++unusedRangeCount;
9445  }
9446 
9447  // 2. Process this allocation.
9448  // There is allocation with suballoc.offset, suballoc.size.
9449  ++alloc2ndCount;
9450  usedBytes += suballoc.size;
9451 
9452  // 3. Prepare for next iteration.
9453  lastOffset = suballoc.offset + suballoc.size;
9454  --nextAlloc2ndIndex;
9455  }
9456  // We are at the end.
9457  else
9458  {
9459  if(lastOffset < size)
9460  {
9461  // There is free space from lastOffset to size.
9462  ++unusedRangeCount;
9463  }
9464 
9465  // End of loop.
9466  lastOffset = size;
9467  }
9468  }
9469  }
9470 
9471  const VkDeviceSize unusedBytes = size - usedBytes;
9472  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9473 
9474  // SECOND PASS
9475  lastOffset = 0;
9476 
9477  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9478  {
9479  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9480  size_t nextAlloc2ndIndex = 0;
9481  while(lastOffset < freeSpace2ndTo1stEnd)
9482  {
9483  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9484  while(nextAlloc2ndIndex < suballoc2ndCount &&
9485  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9486  {
9487  ++nextAlloc2ndIndex;
9488  }
9489 
9490  // Found non-null allocation.
9491  if(nextAlloc2ndIndex < suballoc2ndCount)
9492  {
9493  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9494 
9495  // 1. Process free space before this allocation.
9496  if(lastOffset < suballoc.offset)
9497  {
9498  // There is free space from lastOffset to suballoc.offset.
9499  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9500  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9501  }
9502 
9503  // 2. Process this allocation.
9504  // There is allocation with suballoc.offset, suballoc.size.
9505  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9506 
9507  // 3. Prepare for next iteration.
9508  lastOffset = suballoc.offset + suballoc.size;
9509  ++nextAlloc2ndIndex;
9510  }
9511  // We are at the end.
9512  else
9513  {
9514  if(lastOffset < freeSpace2ndTo1stEnd)
9515  {
9516  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9517  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9518  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9519  }
9520 
9521  // End of loop.
9522  lastOffset = freeSpace2ndTo1stEnd;
9523  }
9524  }
9525  }
9526 
9527  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9528  while(lastOffset < freeSpace1stTo2ndEnd)
9529  {
9530  // Find next non-null allocation or move nextAllocIndex to the end.
9531  while(nextAlloc1stIndex < suballoc1stCount &&
9532  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9533  {
9534  ++nextAlloc1stIndex;
9535  }
9536 
9537  // Found non-null allocation.
9538  if(nextAlloc1stIndex < suballoc1stCount)
9539  {
9540  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9541 
9542  // 1. Process free space before this allocation.
9543  if(lastOffset < suballoc.offset)
9544  {
9545  // There is free space from lastOffset to suballoc.offset.
9546  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9547  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9548  }
9549 
9550  // 2. Process this allocation.
9551  // There is allocation with suballoc.offset, suballoc.size.
9552  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9553 
9554  // 3. Prepare for next iteration.
9555  lastOffset = suballoc.offset + suballoc.size;
9556  ++nextAlloc1stIndex;
9557  }
9558  // We are at the end.
9559  else
9560  {
9561  if(lastOffset < freeSpace1stTo2ndEnd)
9562  {
9563  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9564  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9565  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9566  }
9567 
9568  // End of loop.
9569  lastOffset = freeSpace1stTo2ndEnd;
9570  }
9571  }
9572 
9573  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9574  {
9575  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9576  while(lastOffset < size)
9577  {
9578  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9579  while(nextAlloc2ndIndex != SIZE_MAX &&
9580  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9581  {
9582  --nextAlloc2ndIndex;
9583  }
9584 
9585  // Found non-null allocation.
9586  if(nextAlloc2ndIndex != SIZE_MAX)
9587  {
9588  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9589 
9590  // 1. Process free space before this allocation.
9591  if(lastOffset < suballoc.offset)
9592  {
9593  // There is free space from lastOffset to suballoc.offset.
9594  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9595  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9596  }
9597 
9598  // 2. Process this allocation.
9599  // There is allocation with suballoc.offset, suballoc.size.
9600  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9601 
9602  // 3. Prepare for next iteration.
9603  lastOffset = suballoc.offset + suballoc.size;
9604  --nextAlloc2ndIndex;
9605  }
9606  // We are at the end.
9607  else
9608  {
9609  if(lastOffset < size)
9610  {
9611  // There is free space from lastOffset to size.
9612  const VkDeviceSize unusedRangeSize = size - lastOffset;
9613  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9614  }
9615 
9616  // End of loop.
9617  lastOffset = size;
9618  }
9619  }
9620  }
9621 
9622  PrintDetailedMap_End(json);
9623 }
9624 #endif // #if VMA_STATS_STRING_ENABLED
9625 
9626 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9627  uint32_t currentFrameIndex,
9628  uint32_t frameInUseCount,
9629  VkDeviceSize bufferImageGranularity,
9630  VkDeviceSize allocSize,
9631  VkDeviceSize allocAlignment,
9632  bool upperAddress,
9633  VmaSuballocationType allocType,
9634  bool canMakeOtherLost,
9635  uint32_t strategy,
9636  VmaAllocationRequest* pAllocationRequest)
9637 {
9638  VMA_ASSERT(allocSize > 0);
9639  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9640  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9641  VMA_HEAVY_ASSERT(Validate());
9642 
9643  const VkDeviceSize size = GetSize();
9644  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9645  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9646 
9647  if(upperAddress)
9648  {
9649  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9650  {
9651  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9652  return false;
9653  }
9654 
9655  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9656  if(allocSize > size)
9657  {
9658  return false;
9659  }
9660  VkDeviceSize resultBaseOffset = size - allocSize;
9661  if(!suballocations2nd.empty())
9662  {
9663  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9664  resultBaseOffset = lastSuballoc.offset - allocSize;
9665  if(allocSize > lastSuballoc.offset)
9666  {
9667  return false;
9668  }
9669  }
9670 
9671  // Start from offset equal to end of free space.
9672  VkDeviceSize resultOffset = resultBaseOffset;
9673 
9674  // Apply VMA_DEBUG_MARGIN at the end.
9675  if(VMA_DEBUG_MARGIN > 0)
9676  {
9677  if(resultOffset < VMA_DEBUG_MARGIN)
9678  {
9679  return false;
9680  }
9681  resultOffset -= VMA_DEBUG_MARGIN;
9682  }
9683 
9684  // Apply alignment.
9685  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9686 
9687  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9688  // Make bigger alignment if necessary.
9689  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9690  {
9691  bool bufferImageGranularityConflict = false;
9692  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9693  {
9694  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9695  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9696  {
9697  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9698  {
9699  bufferImageGranularityConflict = true;
9700  break;
9701  }
9702  }
9703  else
9704  // Already on previous page.
9705  break;
9706  }
9707  if(bufferImageGranularityConflict)
9708  {
9709  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9710  }
9711  }
9712 
9713  // There is enough free space.
9714  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9715  suballocations1st.back().offset + suballocations1st.back().size :
9716  0;
9717  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9718  {
9719  // Check previous suballocations for BufferImageGranularity conflicts.
9720  // If conflict exists, allocation cannot be made here.
9721  if(bufferImageGranularity > 1)
9722  {
9723  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9724  {
9725  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9726  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9727  {
9728  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9729  {
9730  return false;
9731  }
9732  }
9733  else
9734  {
9735  // Already on next page.
9736  break;
9737  }
9738  }
9739  }
9740 
9741  // All tests passed: Success.
9742  pAllocationRequest->offset = resultOffset;
9743  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9744  pAllocationRequest->sumItemSize = 0;
9745  // pAllocationRequest->item unused.
9746  pAllocationRequest->itemsToMakeLostCount = 0;
9747  return true;
9748  }
9749  }
9750  else // !upperAddress
9751  {
9752  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9753  {
9754  // Try to allocate at the end of 1st vector.
9755 
9756  VkDeviceSize resultBaseOffset = 0;
9757  if(!suballocations1st.empty())
9758  {
9759  const VmaSuballocation& lastSuballoc = suballocations1st.back();
9760  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9761  }
9762 
9763  // Start from offset equal to beginning of free space.
9764  VkDeviceSize resultOffset = resultBaseOffset;
9765 
9766  // Apply VMA_DEBUG_MARGIN at the beginning.
9767  if(VMA_DEBUG_MARGIN > 0)
9768  {
9769  resultOffset += VMA_DEBUG_MARGIN;
9770  }
9771 
9772  // Apply alignment.
9773  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9774 
9775  // Check previous suballocations for BufferImageGranularity conflicts.
9776  // Make bigger alignment if necessary.
9777  if(bufferImageGranularity > 1 && !suballocations1st.empty())
9778  {
9779  bool bufferImageGranularityConflict = false;
9780  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9781  {
9782  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9783  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9784  {
9785  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9786  {
9787  bufferImageGranularityConflict = true;
9788  break;
9789  }
9790  }
9791  else
9792  // Already on previous page.
9793  break;
9794  }
9795  if(bufferImageGranularityConflict)
9796  {
9797  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9798  }
9799  }
9800 
9801  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9802  suballocations2nd.back().offset : size;
9803 
9804  // There is enough free space at the end after alignment.
9805  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9806  {
9807  // Check next suballocations for BufferImageGranularity conflicts.
9808  // If conflict exists, allocation cannot be made here.
9809  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9810  {
9811  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9812  {
9813  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9814  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9815  {
9816  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9817  {
9818  return false;
9819  }
9820  }
9821  else
9822  {
9823  // Already on previous page.
9824  break;
9825  }
9826  }
9827  }
9828 
9829  // All tests passed: Success.
9830  pAllocationRequest->offset = resultOffset;
9831  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
9832  pAllocationRequest->sumItemSize = 0;
9833  // pAllocationRequest->item unused.
9834  pAllocationRequest->itemsToMakeLostCount = 0;
9835  return true;
9836  }
9837  }
9838 
9839  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9840  // beginning of 1st vector as the end of free space.
9841  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9842  {
9843  VMA_ASSERT(!suballocations1st.empty());
9844 
9845  VkDeviceSize resultBaseOffset = 0;
9846  if(!suballocations2nd.empty())
9847  {
9848  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9849  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9850  }
9851 
9852  // Start from offset equal to beginning of free space.
9853  VkDeviceSize resultOffset = resultBaseOffset;
9854 
9855  // Apply VMA_DEBUG_MARGIN at the beginning.
9856  if(VMA_DEBUG_MARGIN > 0)
9857  {
9858  resultOffset += VMA_DEBUG_MARGIN;
9859  }
9860 
9861  // Apply alignment.
9862  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9863 
9864  // Check previous suballocations for BufferImageGranularity conflicts.
9865  // Make bigger alignment if necessary.
9866  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9867  {
9868  bool bufferImageGranularityConflict = false;
9869  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
9870  {
9871  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
9872  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9873  {
9874  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9875  {
9876  bufferImageGranularityConflict = true;
9877  break;
9878  }
9879  }
9880  else
9881  // Already on previous page.
9882  break;
9883  }
9884  if(bufferImageGranularityConflict)
9885  {
9886  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9887  }
9888  }
9889 
9890  pAllocationRequest->itemsToMakeLostCount = 0;
9891  pAllocationRequest->sumItemSize = 0;
9892  size_t index1st = m_1stNullItemsBeginCount;
9893 
9894  if(canMakeOtherLost)
9895  {
9896  while(index1st < suballocations1st.size() &&
9897  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
9898  {
9899  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
9900  const VmaSuballocation& suballoc = suballocations1st[index1st];
9901  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
9902  {
9903  // No problem.
9904  }
9905  else
9906  {
9907  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9908  if(suballoc.hAllocation->CanBecomeLost() &&
9909  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9910  {
9911  ++pAllocationRequest->itemsToMakeLostCount;
9912  pAllocationRequest->sumItemSize += suballoc.size;
9913  }
9914  else
9915  {
9916  return false;
9917  }
9918  }
9919  ++index1st;
9920  }
9921 
9922  // Check next suballocations for BufferImageGranularity conflicts.
9923  // If conflict exists, we must mark more allocations lost or fail.
9924  if(bufferImageGranularity > 1)
9925  {
9926  while(index1st < suballocations1st.size())
9927  {
9928  const VmaSuballocation& suballoc = suballocations1st[index1st];
9929  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
9930  {
9931  if(suballoc.hAllocation != VK_NULL_HANDLE)
9932  {
9933  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
9934  if(suballoc.hAllocation->CanBecomeLost() &&
9935  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9936  {
9937  ++pAllocationRequest->itemsToMakeLostCount;
9938  pAllocationRequest->sumItemSize += suballoc.size;
9939  }
9940  else
9941  {
9942  return false;
9943  }
9944  }
9945  }
9946  else
9947  {
9948  // Already on next page.
9949  break;
9950  }
9951  ++index1st;
9952  }
9953  }
9954  }
9955 
9956  // There is enough free space at the end after alignment.
9957  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
9958  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
9959  {
9960  // Check next suballocations for BufferImageGranularity conflicts.
9961  // If conflict exists, allocation cannot be made here.
9962  if(bufferImageGranularity > 1)
9963  {
9964  for(size_t nextSuballocIndex = index1st;
9965  nextSuballocIndex < suballocations1st.size();
9966  nextSuballocIndex++)
9967  {
9968  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
9969  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9970  {
9971  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9972  {
9973  return false;
9974  }
9975  }
9976  else
9977  {
9978  // Already on next page.
9979  break;
9980  }
9981  }
9982  }
9983 
9984  // All tests passed: Success.
9985  pAllocationRequest->offset = resultOffset;
9986  pAllocationRequest->sumFreeSize =
9987  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
9988  - resultBaseOffset
9989  - pAllocationRequest->sumItemSize;
9990  // pAllocationRequest->item unused.
9991  return true;
9992  }
9993  }
9994  }
9995 
9996  return false;
9997 }
9998 
9999 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10000  uint32_t currentFrameIndex,
10001  uint32_t frameInUseCount,
10002  VmaAllocationRequest* pAllocationRequest)
10003 {
10004  if(pAllocationRequest->itemsToMakeLostCount == 0)
10005  {
10006  return true;
10007  }
10008 
10009  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10010 
10011  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10012  size_t index1st = m_1stNullItemsBeginCount;
10013  size_t madeLostCount = 0;
10014  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10015  {
10016  VMA_ASSERT(index1st < suballocations1st.size());
10017  VmaSuballocation& suballoc = suballocations1st[index1st];
10018  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10019  {
10020  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10021  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10022  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10023  {
10024  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10025  suballoc.hAllocation = VK_NULL_HANDLE;
10026  m_SumFreeSize += suballoc.size;
10027  ++m_1stNullItemsMiddleCount;
10028  ++madeLostCount;
10029  }
10030  else
10031  {
10032  return false;
10033  }
10034  }
10035  ++index1st;
10036  }
10037 
10038  CleanupAfterFree();
10039  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10040 
10041  return true;
10042 }
10043 
10044 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10045 {
10046  uint32_t lostAllocationCount = 0;
10047 
10048  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10049  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10050  {
10051  VmaSuballocation& suballoc = suballocations1st[i];
10052  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10053  suballoc.hAllocation->CanBecomeLost() &&
10054  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10055  {
10056  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10057  suballoc.hAllocation = VK_NULL_HANDLE;
10058  ++m_1stNullItemsMiddleCount;
10059  m_SumFreeSize += suballoc.size;
10060  ++lostAllocationCount;
10061  }
10062  }
10063 
10064  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10065  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10066  {
10067  VmaSuballocation& suballoc = suballocations2nd[i];
10068  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10069  suballoc.hAllocation->CanBecomeLost() &&
10070  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10071  {
10072  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10073  suballoc.hAllocation = VK_NULL_HANDLE;
10074  ++m_2ndNullItemsCount;
10075  ++lostAllocationCount;
10076  }
10077  }
10078 
10079  if(lostAllocationCount)
10080  {
10081  CleanupAfterFree();
10082  }
10083 
10084  return lostAllocationCount;
10085 }
10086 
10087 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10088 {
10089  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10090  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10091  {
10092  const VmaSuballocation& suballoc = suballocations1st[i];
10093  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10094  {
10095  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10096  {
10097  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10098  return VK_ERROR_VALIDATION_FAILED_EXT;
10099  }
10100  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10101  {
10102  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10103  return VK_ERROR_VALIDATION_FAILED_EXT;
10104  }
10105  }
10106  }
10107 
10108  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10109  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10110  {
10111  const VmaSuballocation& suballoc = suballocations2nd[i];
10112  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10113  {
10114  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10115  {
10116  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10117  return VK_ERROR_VALIDATION_FAILED_EXT;
10118  }
10119  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10120  {
10121  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10122  return VK_ERROR_VALIDATION_FAILED_EXT;
10123  }
10124  }
10125  }
10126 
10127  return VK_SUCCESS;
10128 }
10129 
10130 void VmaBlockMetadata_Linear::Alloc(
10131  const VmaAllocationRequest& request,
10132  VmaSuballocationType type,
10133  VkDeviceSize allocSize,
10134  bool upperAddress,
10135  VmaAllocation hAllocation)
10136 {
10137  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10138 
10139  if(upperAddress)
10140  {
10141  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10142  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10143  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10144  suballocations2nd.push_back(newSuballoc);
10145  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10146  }
10147  else
10148  {
10149  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10150 
10151  // First allocation.
10152  if(suballocations1st.empty())
10153  {
10154  suballocations1st.push_back(newSuballoc);
10155  }
10156  else
10157  {
10158  // New allocation at the end of 1st vector.
10159  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
10160  {
10161  // Check if it fits before the end of the block.
10162  VMA_ASSERT(request.offset + allocSize <= GetSize());
10163  suballocations1st.push_back(newSuballoc);
10164  }
10165  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10166  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
10167  {
10168  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10169 
10170  switch(m_2ndVectorMode)
10171  {
10172  case SECOND_VECTOR_EMPTY:
10173  // First allocation from second part ring buffer.
10174  VMA_ASSERT(suballocations2nd.empty());
10175  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10176  break;
10177  case SECOND_VECTOR_RING_BUFFER:
10178  // 2-part ring buffer is already started.
10179  VMA_ASSERT(!suballocations2nd.empty());
10180  break;
10181  case SECOND_VECTOR_DOUBLE_STACK:
10182  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10183  break;
10184  default:
10185  VMA_ASSERT(0);
10186  }
10187 
10188  suballocations2nd.push_back(newSuballoc);
10189  }
10190  else
10191  {
10192  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10193  }
10194  }
10195  }
10196 
10197  m_SumFreeSize -= newSuballoc.size;
10198 }
10199 
10200 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10201 {
10202  FreeAtOffset(allocation->GetOffset());
10203 }
10204 
10205 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10206 {
10207  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10208  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10209 
10210  if(!suballocations1st.empty())
10211  {
10212  // First allocation: Mark it as next empty at the beginning.
10213  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10214  if(firstSuballoc.offset == offset)
10215  {
10216  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10217  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10218  m_SumFreeSize += firstSuballoc.size;
10219  ++m_1stNullItemsBeginCount;
10220  CleanupAfterFree();
10221  return;
10222  }
10223  }
10224 
10225  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10226  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10227  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10228  {
10229  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10230  if(lastSuballoc.offset == offset)
10231  {
10232  m_SumFreeSize += lastSuballoc.size;
10233  suballocations2nd.pop_back();
10234  CleanupAfterFree();
10235  return;
10236  }
10237  }
10238  // Last allocation in 1st vector.
10239  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10240  {
10241  VmaSuballocation& lastSuballoc = suballocations1st.back();
10242  if(lastSuballoc.offset == offset)
10243  {
10244  m_SumFreeSize += lastSuballoc.size;
10245  suballocations1st.pop_back();
10246  CleanupAfterFree();
10247  return;
10248  }
10249  }
10250 
10251  // Item from the middle of 1st vector.
10252  {
10253  VmaSuballocation refSuballoc;
10254  refSuballoc.offset = offset;
10255  // Rest of members stays uninitialized intentionally for better performance.
10256  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
10257  suballocations1st.begin() + m_1stNullItemsBeginCount,
10258  suballocations1st.end(),
10259  refSuballoc);
10260  if(it != suballocations1st.end())
10261  {
10262  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10263  it->hAllocation = VK_NULL_HANDLE;
10264  ++m_1stNullItemsMiddleCount;
10265  m_SumFreeSize += it->size;
10266  CleanupAfterFree();
10267  return;
10268  }
10269  }
10270 
10271  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10272  {
10273  // Item from the middle of 2nd vector.
10274  VmaSuballocation refSuballoc;
10275  refSuballoc.offset = offset;
10276  // Rest of members stays uninitialized intentionally for better performance.
10277  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10278  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
10279  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
10280  if(it != suballocations2nd.end())
10281  {
10282  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10283  it->hAllocation = VK_NULL_HANDLE;
10284  ++m_2ndNullItemsCount;
10285  m_SumFreeSize += it->size;
10286  CleanupAfterFree();
10287  return;
10288  }
10289  }
10290 
10291  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10292 }
10293 
10294 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10295 {
10296  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10297  const size_t suballocCount = AccessSuballocations1st().size();
10298  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10299 }
10300 
10301 void VmaBlockMetadata_Linear::CleanupAfterFree()
10302 {
10303  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10304  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10305 
10306  if(IsEmpty())
10307  {
10308  suballocations1st.clear();
10309  suballocations2nd.clear();
10310  m_1stNullItemsBeginCount = 0;
10311  m_1stNullItemsMiddleCount = 0;
10312  m_2ndNullItemsCount = 0;
10313  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10314  }
10315  else
10316  {
10317  const size_t suballoc1stCount = suballocations1st.size();
10318  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10319  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10320 
10321  // Find more null items at the beginning of 1st vector.
10322  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10323  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10324  {
10325  ++m_1stNullItemsBeginCount;
10326  --m_1stNullItemsMiddleCount;
10327  }
10328 
10329  // Find more null items at the end of 1st vector.
10330  while(m_1stNullItemsMiddleCount > 0 &&
10331  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10332  {
10333  --m_1stNullItemsMiddleCount;
10334  suballocations1st.pop_back();
10335  }
10336 
10337  // Find more null items at the end of 2nd vector.
10338  while(m_2ndNullItemsCount > 0 &&
10339  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10340  {
10341  --m_2ndNullItemsCount;
10342  suballocations2nd.pop_back();
10343  }
10344 
10345  if(ShouldCompact1st())
10346  {
10347  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10348  size_t srcIndex = m_1stNullItemsBeginCount;
10349  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10350  {
10351  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10352  {
10353  ++srcIndex;
10354  }
10355  if(dstIndex != srcIndex)
10356  {
10357  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10358  }
10359  ++srcIndex;
10360  }
10361  suballocations1st.resize(nonNullItemCount);
10362  m_1stNullItemsBeginCount = 0;
10363  m_1stNullItemsMiddleCount = 0;
10364  }
10365 
10366  // 2nd vector became empty.
10367  if(suballocations2nd.empty())
10368  {
10369  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10370  }
10371 
10372  // 1st vector became empty.
10373  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10374  {
10375  suballocations1st.clear();
10376  m_1stNullItemsBeginCount = 0;
10377 
10378  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10379  {
10380  // Swap 1st with 2nd. Now 2nd is empty.
10381  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10382  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10383  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10384  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10385  {
10386  ++m_1stNullItemsBeginCount;
10387  --m_1stNullItemsMiddleCount;
10388  }
10389  m_2ndNullItemsCount = 0;
10390  m_1stVectorIndex ^= 1;
10391  }
10392  }
10393  }
10394 
10395  VMA_HEAVY_ASSERT(Validate());
10396 }
10397 
10398 
10400 // class VmaBlockMetadata_Buddy
10401 
10402 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10403  VmaBlockMetadata(hAllocator),
10404  m_Root(VMA_NULL),
10405  m_AllocationCount(0),
10406  m_FreeCount(1),
10407  m_SumFreeSize(0)
10408 {
10409  memset(m_FreeList, 0, sizeof(m_FreeList));
10410 }
10411 
10412 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10413 {
10414  DeleteNode(m_Root);
10415 }
10416 
10417 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10418 {
10419  VmaBlockMetadata::Init(size);
10420 
10421  m_UsableSize = VmaPrevPow2(size);
10422  m_SumFreeSize = m_UsableSize;
10423 
10424  // Calculate m_LevelCount.
10425  m_LevelCount = 1;
10426  while(m_LevelCount < MAX_LEVELS &&
10427  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10428  {
10429  ++m_LevelCount;
10430  }
10431 
10432  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10433  rootNode->offset = 0;
10434  rootNode->type = Node::TYPE_FREE;
10435  rootNode->parent = VMA_NULL;
10436  rootNode->buddy = VMA_NULL;
10437 
10438  m_Root = rootNode;
10439  AddToFreeListFront(0, rootNode);
10440 }
10441 
10442 bool VmaBlockMetadata_Buddy::Validate() const
10443 {
10444  // Validate tree.
10445  ValidationContext ctx;
10446  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10447  {
10448  VMA_VALIDATE(false && "ValidateNode failed.");
10449  }
10450  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10451  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10452 
10453  // Validate free node lists.
10454  for(uint32_t level = 0; level < m_LevelCount; ++level)
10455  {
10456  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10457  m_FreeList[level].front->free.prev == VMA_NULL);
10458 
10459  for(Node* node = m_FreeList[level].front;
10460  node != VMA_NULL;
10461  node = node->free.next)
10462  {
10463  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10464 
10465  if(node->free.next == VMA_NULL)
10466  {
10467  VMA_VALIDATE(m_FreeList[level].back == node);
10468  }
10469  else
10470  {
10471  VMA_VALIDATE(node->free.next->free.prev == node);
10472  }
10473  }
10474  }
10475 
10476  // Validate that free lists ar higher levels are empty.
10477  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10478  {
10479  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10480  }
10481 
10482  return true;
10483 }
10484 
10485 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10486 {
10487  for(uint32_t level = 0; level < m_LevelCount; ++level)
10488  {
10489  if(m_FreeList[level].front != VMA_NULL)
10490  {
10491  return LevelToNodeSize(level);
10492  }
10493  }
10494  return 0;
10495 }
10496 
10497 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10498 {
10499  const VkDeviceSize unusableSize = GetUnusableSize();
10500 
10501  outInfo.blockCount = 1;
10502 
10503  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10504  outInfo.usedBytes = outInfo.unusedBytes = 0;
10505 
10506  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10507  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10508  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10509 
10510  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10511 
10512  if(unusableSize > 0)
10513  {
10514  ++outInfo.unusedRangeCount;
10515  outInfo.unusedBytes += unusableSize;
10516  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10517  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10518  }
10519 }
10520 
10521 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10522 {
10523  const VkDeviceSize unusableSize = GetUnusableSize();
10524 
10525  inoutStats.size += GetSize();
10526  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10527  inoutStats.allocationCount += m_AllocationCount;
10528  inoutStats.unusedRangeCount += m_FreeCount;
10529  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10530 
10531  if(unusableSize > 0)
10532  {
10533  ++inoutStats.unusedRangeCount;
10534  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10535  }
10536 }
10537 
10538 #if VMA_STATS_STRING_ENABLED
10539 
10540 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10541 {
10542  // TODO optimize
10543  VmaStatInfo stat;
10544  CalcAllocationStatInfo(stat);
10545 
10546  PrintDetailedMap_Begin(
10547  json,
10548  stat.unusedBytes,
10549  stat.allocationCount,
10550  stat.unusedRangeCount);
10551 
10552  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10553 
10554  const VkDeviceSize unusableSize = GetUnusableSize();
10555  if(unusableSize > 0)
10556  {
10557  PrintDetailedMap_UnusedRange(json,
10558  m_UsableSize, // offset
10559  unusableSize); // size
10560  }
10561 
10562  PrintDetailedMap_End(json);
10563 }
10564 
10565 #endif // #if VMA_STATS_STRING_ENABLED
10566 
10567 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10568  uint32_t currentFrameIndex,
10569  uint32_t frameInUseCount,
10570  VkDeviceSize bufferImageGranularity,
10571  VkDeviceSize allocSize,
10572  VkDeviceSize allocAlignment,
10573  bool upperAddress,
10574  VmaSuballocationType allocType,
10575  bool canMakeOtherLost,
10576  uint32_t strategy,
10577  VmaAllocationRequest* pAllocationRequest)
10578 {
10579  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10580 
10581  // Simple way to respect bufferImageGranularity. May be optimized some day.
10582  // Whenever it might be an OPTIMAL image...
10583  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10584  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10585  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10586  {
10587  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10588  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10589  }
10590 
10591  if(allocSize > m_UsableSize)
10592  {
10593  return false;
10594  }
10595 
10596  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10597  for(uint32_t level = targetLevel + 1; level--; )
10598  {
10599  for(Node* freeNode = m_FreeList[level].front;
10600  freeNode != VMA_NULL;
10601  freeNode = freeNode->free.next)
10602  {
10603  if(freeNode->offset % allocAlignment == 0)
10604  {
10605  pAllocationRequest->offset = freeNode->offset;
10606  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10607  pAllocationRequest->sumItemSize = 0;
10608  pAllocationRequest->itemsToMakeLostCount = 0;
10609  pAllocationRequest->customData = (void*)(uintptr_t)level;
10610  return true;
10611  }
10612  }
10613  }
10614 
10615  return false;
10616 }
10617 
10618 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10619  uint32_t currentFrameIndex,
10620  uint32_t frameInUseCount,
10621  VmaAllocationRequest* pAllocationRequest)
10622 {
10623  /*
10624  Lost allocations are not supported in buddy allocator at the moment.
10625  Support might be added in the future.
10626  */
10627  return pAllocationRequest->itemsToMakeLostCount == 0;
10628 }
10629 
10630 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10631 {
10632  /*
10633  Lost allocations are not supported in buddy allocator at the moment.
10634  Support might be added in the future.
10635  */
10636  return 0;
10637 }
10638 
10639 void VmaBlockMetadata_Buddy::Alloc(
10640  const VmaAllocationRequest& request,
10641  VmaSuballocationType type,
10642  VkDeviceSize allocSize,
10643  bool upperAddress,
10644  VmaAllocation hAllocation)
10645 {
10646  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10647  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10648 
10649  Node* currNode = m_FreeList[currLevel].front;
10650  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10651  while(currNode->offset != request.offset)
10652  {
10653  currNode = currNode->free.next;
10654  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10655  }
10656 
10657  // Go down, splitting free nodes.
10658  while(currLevel < targetLevel)
10659  {
10660  // currNode is already first free node at currLevel.
10661  // Remove it from list of free nodes at this currLevel.
10662  RemoveFromFreeList(currLevel, currNode);
10663 
10664  const uint32_t childrenLevel = currLevel + 1;
10665 
10666  // Create two free sub-nodes.
10667  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10668  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10669 
10670  leftChild->offset = currNode->offset;
10671  leftChild->type = Node::TYPE_FREE;
10672  leftChild->parent = currNode;
10673  leftChild->buddy = rightChild;
10674 
10675  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10676  rightChild->type = Node::TYPE_FREE;
10677  rightChild->parent = currNode;
10678  rightChild->buddy = leftChild;
10679 
10680  // Convert current currNode to split type.
10681  currNode->type = Node::TYPE_SPLIT;
10682  currNode->split.leftChild = leftChild;
10683 
10684  // Add child nodes to free list. Order is important!
10685  AddToFreeListFront(childrenLevel, rightChild);
10686  AddToFreeListFront(childrenLevel, leftChild);
10687 
10688  ++m_FreeCount;
10689  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
10690  ++currLevel;
10691  currNode = m_FreeList[currLevel].front;
10692 
10693  /*
10694  We can be sure that currNode, as left child of node previously split,
10695  also fullfills the alignment requirement.
10696  */
10697  }
10698 
10699  // Remove from free list.
10700  VMA_ASSERT(currLevel == targetLevel &&
10701  currNode != VMA_NULL &&
10702  currNode->type == Node::TYPE_FREE);
10703  RemoveFromFreeList(currLevel, currNode);
10704 
10705  // Convert to allocation node.
10706  currNode->type = Node::TYPE_ALLOCATION;
10707  currNode->allocation.alloc = hAllocation;
10708 
10709  ++m_AllocationCount;
10710  --m_FreeCount;
10711  m_SumFreeSize -= allocSize;
10712 }
10713 
10714 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10715 {
10716  if(node->type == Node::TYPE_SPLIT)
10717  {
10718  DeleteNode(node->split.leftChild->buddy);
10719  DeleteNode(node->split.leftChild);
10720  }
10721 
10722  vma_delete(GetAllocationCallbacks(), node);
10723 }
10724 
10725 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
10726 {
10727  VMA_VALIDATE(level < m_LevelCount);
10728  VMA_VALIDATE(curr->parent == parent);
10729  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10730  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10731  switch(curr->type)
10732  {
10733  case Node::TYPE_FREE:
10734  // curr->free.prev, next are validated separately.
10735  ctx.calculatedSumFreeSize += levelNodeSize;
10736  ++ctx.calculatedFreeCount;
10737  break;
10738  case Node::TYPE_ALLOCATION:
10739  ++ctx.calculatedAllocationCount;
10740  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10741  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10742  break;
10743  case Node::TYPE_SPLIT:
10744  {
10745  const uint32_t childrenLevel = level + 1;
10746  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10747  const Node* const leftChild = curr->split.leftChild;
10748  VMA_VALIDATE(leftChild != VMA_NULL);
10749  VMA_VALIDATE(leftChild->offset == curr->offset);
10750  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10751  {
10752  VMA_VALIDATE(false && "ValidateNode for left child failed.");
10753  }
10754  const Node* const rightChild = leftChild->buddy;
10755  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10756  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10757  {
10758  VMA_VALIDATE(false && "ValidateNode for right child failed.");
10759  }
10760  }
10761  break;
10762  default:
10763  return false;
10764  }
10765 
10766  return true;
10767 }
10768 
10769 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
10770 {
10771  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
10772  uint32_t level = 0;
10773  VkDeviceSize currLevelNodeSize = m_UsableSize;
10774  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10775  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10776  {
10777  ++level;
10778  currLevelNodeSize = nextLevelNodeSize;
10779  nextLevelNodeSize = currLevelNodeSize >> 1;
10780  }
10781  return level;
10782 }
10783 
10784 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
10785 {
10786  // Find node and level.
10787  Node* node = m_Root;
10788  VkDeviceSize nodeOffset = 0;
10789  uint32_t level = 0;
10790  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
10791  while(node->type == Node::TYPE_SPLIT)
10792  {
10793  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
10794  if(offset < nodeOffset + nextLevelSize)
10795  {
10796  node = node->split.leftChild;
10797  }
10798  else
10799  {
10800  node = node->split.leftChild->buddy;
10801  nodeOffset += nextLevelSize;
10802  }
10803  ++level;
10804  levelNodeSize = nextLevelSize;
10805  }
10806 
10807  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
10808  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
10809 
10810  ++m_FreeCount;
10811  --m_AllocationCount;
10812  m_SumFreeSize += alloc->GetSize();
10813 
10814  node->type = Node::TYPE_FREE;
10815 
10816  // Join free nodes if possible.
10817  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
10818  {
10819  RemoveFromFreeList(level, node->buddy);
10820  Node* const parent = node->parent;
10821 
10822  vma_delete(GetAllocationCallbacks(), node->buddy);
10823  vma_delete(GetAllocationCallbacks(), node);
10824  parent->type = Node::TYPE_FREE;
10825 
10826  node = parent;
10827  --level;
10828  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
10829  --m_FreeCount;
10830  }
10831 
10832  AddToFreeListFront(level, node);
10833 }
10834 
10835 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
10836 {
10837  switch(node->type)
10838  {
10839  case Node::TYPE_FREE:
10840  ++outInfo.unusedRangeCount;
10841  outInfo.unusedBytes += levelNodeSize;
10842  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
10843  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
10844  break;
10845  case Node::TYPE_ALLOCATION:
10846  {
10847  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10848  ++outInfo.allocationCount;
10849  outInfo.usedBytes += allocSize;
10850  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
10851  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
10852 
10853  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
10854  if(unusedRangeSize > 0)
10855  {
10856  ++outInfo.unusedRangeCount;
10857  outInfo.unusedBytes += unusedRangeSize;
10858  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
10859  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
10860  }
10861  }
10862  break;
10863  case Node::TYPE_SPLIT:
10864  {
10865  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10866  const Node* const leftChild = node->split.leftChild;
10867  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
10868  const Node* const rightChild = leftChild->buddy;
10869  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
10870  }
10871  break;
10872  default:
10873  VMA_ASSERT(0);
10874  }
10875 }
10876 
10877 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
10878 {
10879  VMA_ASSERT(node->type == Node::TYPE_FREE);
10880 
10881  // List is empty.
10882  Node* const frontNode = m_FreeList[level].front;
10883  if(frontNode == VMA_NULL)
10884  {
10885  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
10886  node->free.prev = node->free.next = VMA_NULL;
10887  m_FreeList[level].front = m_FreeList[level].back = node;
10888  }
10889  else
10890  {
10891  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
10892  node->free.prev = VMA_NULL;
10893  node->free.next = frontNode;
10894  frontNode->free.prev = node;
10895  m_FreeList[level].front = node;
10896  }
10897 }
10898 
10899 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
10900 {
10901  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
10902 
10903  // It is at the front.
10904  if(node->free.prev == VMA_NULL)
10905  {
10906  VMA_ASSERT(m_FreeList[level].front == node);
10907  m_FreeList[level].front = node->free.next;
10908  }
10909  else
10910  {
10911  Node* const prevFreeNode = node->free.prev;
10912  VMA_ASSERT(prevFreeNode->free.next == node);
10913  prevFreeNode->free.next = node->free.next;
10914  }
10915 
10916  // It is at the back.
10917  if(node->free.next == VMA_NULL)
10918  {
10919  VMA_ASSERT(m_FreeList[level].back == node);
10920  m_FreeList[level].back = node->free.prev;
10921  }
10922  else
10923  {
10924  Node* const nextFreeNode = node->free.next;
10925  VMA_ASSERT(nextFreeNode->free.prev == node);
10926  nextFreeNode->free.prev = node->free.prev;
10927  }
10928 }
10929 
10930 #if VMA_STATS_STRING_ENABLED
10931 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
10932 {
10933  switch(node->type)
10934  {
10935  case Node::TYPE_FREE:
10936  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
10937  break;
10938  case Node::TYPE_ALLOCATION:
10939  {
10940  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
10941  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10942  if(allocSize < levelNodeSize)
10943  {
10944  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
10945  }
10946  }
10947  break;
10948  case Node::TYPE_SPLIT:
10949  {
10950  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10951  const Node* const leftChild = node->split.leftChild;
10952  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
10953  const Node* const rightChild = leftChild->buddy;
10954  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
10955  }
10956  break;
10957  default:
10958  VMA_ASSERT(0);
10959  }
10960 }
10961 #endif // #if VMA_STATS_STRING_ENABLED
10962 
10963 
10965 // class VmaDeviceMemoryBlock
10966 
10967 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
10968  m_pMetadata(VMA_NULL),
10969  m_MemoryTypeIndex(UINT32_MAX),
10970  m_Id(0),
10971  m_hMemory(VK_NULL_HANDLE),
10972  m_MapCount(0),
10973  m_pMappedData(VMA_NULL)
10974 {
10975 }
10976 
10977 void VmaDeviceMemoryBlock::Init(
10978  VmaAllocator hAllocator,
10979  uint32_t newMemoryTypeIndex,
10980  VkDeviceMemory newMemory,
10981  VkDeviceSize newSize,
10982  uint32_t id,
10983  uint32_t algorithm)
10984 {
10985  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
10986 
10987  m_MemoryTypeIndex = newMemoryTypeIndex;
10988  m_Id = id;
10989  m_hMemory = newMemory;
10990 
10991  switch(algorithm)
10992  {
10994  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
10995  break;
10997  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
10998  break;
10999  default:
11000  VMA_ASSERT(0);
11001  // Fall-through.
11002  case 0:
11003  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11004  }
11005  m_pMetadata->Init(newSize);
11006 }
11007 
11008 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11009 {
11010  // This is the most important assert in the entire library.
11011  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11012  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11013 
11014  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11015  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11016  m_hMemory = VK_NULL_HANDLE;
11017 
11018  vma_delete(allocator, m_pMetadata);
11019  m_pMetadata = VMA_NULL;
11020 }
11021 
11022 bool VmaDeviceMemoryBlock::Validate() const
11023 {
11024  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11025  (m_pMetadata->GetSize() != 0));
11026 
11027  return m_pMetadata->Validate();
11028 }
11029 
11030 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11031 {
11032  void* pData = nullptr;
11033  VkResult res = Map(hAllocator, 1, &pData);
11034  if(res != VK_SUCCESS)
11035  {
11036  return res;
11037  }
11038 
11039  res = m_pMetadata->CheckCorruption(pData);
11040 
11041  Unmap(hAllocator, 1);
11042 
11043  return res;
11044 }
11045 
11046 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11047 {
11048  if(count == 0)
11049  {
11050  return VK_SUCCESS;
11051  }
11052 
11053  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11054  if(m_MapCount != 0)
11055  {
11056  m_MapCount += count;
11057  VMA_ASSERT(m_pMappedData != VMA_NULL);
11058  if(ppData != VMA_NULL)
11059  {
11060  *ppData = m_pMappedData;
11061  }
11062  return VK_SUCCESS;
11063  }
11064  else
11065  {
11066  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11067  hAllocator->m_hDevice,
11068  m_hMemory,
11069  0, // offset
11070  VK_WHOLE_SIZE,
11071  0, // flags
11072  &m_pMappedData);
11073  if(result == VK_SUCCESS)
11074  {
11075  if(ppData != VMA_NULL)
11076  {
11077  *ppData = m_pMappedData;
11078  }
11079  m_MapCount = count;
11080  }
11081  return result;
11082  }
11083 }
11084 
11085 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11086 {
11087  if(count == 0)
11088  {
11089  return;
11090  }
11091 
11092  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11093  if(m_MapCount >= count)
11094  {
11095  m_MapCount -= count;
11096  if(m_MapCount == 0)
11097  {
11098  m_pMappedData = VMA_NULL;
11099  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11100  }
11101  }
11102  else
11103  {
11104  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11105  }
11106 }
11107 
11108 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11109 {
11110  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11111  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11112 
11113  void* pData;
11114  VkResult res = Map(hAllocator, 1, &pData);
11115  if(res != VK_SUCCESS)
11116  {
11117  return res;
11118  }
11119 
11120  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11121  VmaWriteMagicValue(pData, allocOffset + allocSize);
11122 
11123  Unmap(hAllocator, 1);
11124 
11125  return VK_SUCCESS;
11126 }
11127 
11128 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11129 {
11130  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11131  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11132 
11133  void* pData;
11134  VkResult res = Map(hAllocator, 1, &pData);
11135  if(res != VK_SUCCESS)
11136  {
11137  return res;
11138  }
11139 
11140  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11141  {
11142  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11143  }
11144  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11145  {
11146  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11147  }
11148 
11149  Unmap(hAllocator, 1);
11150 
11151  return VK_SUCCESS;
11152 }
11153 
11154 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11155  const VmaAllocator hAllocator,
11156  const VmaAllocation hAllocation,
11157  VkBuffer hBuffer)
11158 {
11159  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11160  hAllocation->GetBlock() == this);
11161  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11162  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11163  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
11164  hAllocator->m_hDevice,
11165  hBuffer,
11166  m_hMemory,
11167  hAllocation->GetOffset());
11168 }
11169 
11170 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11171  const VmaAllocator hAllocator,
11172  const VmaAllocation hAllocation,
11173  VkImage hImage)
11174 {
11175  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11176  hAllocation->GetBlock() == this);
11177  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11178  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11179  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
11180  hAllocator->m_hDevice,
11181  hImage,
11182  m_hMemory,
11183  hAllocation->GetOffset());
11184 }
11185 
11186 static void InitStatInfo(VmaStatInfo& outInfo)
11187 {
11188  memset(&outInfo, 0, sizeof(outInfo));
11189  outInfo.allocationSizeMin = UINT64_MAX;
11190  outInfo.unusedRangeSizeMin = UINT64_MAX;
11191 }
11192 
11193 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11194 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11195 {
11196  inoutInfo.blockCount += srcInfo.blockCount;
11197  inoutInfo.allocationCount += srcInfo.allocationCount;
11198  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11199  inoutInfo.usedBytes += srcInfo.usedBytes;
11200  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11201  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11202  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11203  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11204  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11205 }
11206 
11207 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11208 {
11209  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11210  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11211  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11212  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11213 }
11214 
11215 VmaPool_T::VmaPool_T(
11216  VmaAllocator hAllocator,
11217  const VmaPoolCreateInfo& createInfo,
11218  VkDeviceSize preferredBlockSize) :
11219  m_BlockVector(
11220  hAllocator,
11221  createInfo.memoryTypeIndex,
11222  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11223  createInfo.minBlockCount,
11224  createInfo.maxBlockCount,
11225  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11226  createInfo.frameInUseCount,
11227  true, // isCustomPool
11228  createInfo.blockSize != 0, // explicitBlockSize
11229  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11230  m_Id(0)
11231 {
11232 }
11233 
11234 VmaPool_T::~VmaPool_T()
11235 {
11236 }
11237 
11238 #if VMA_STATS_STRING_ENABLED
11239 
11240 #endif // #if VMA_STATS_STRING_ENABLED
11241 
11242 VmaBlockVector::VmaBlockVector(
11243  VmaAllocator hAllocator,
11244  uint32_t memoryTypeIndex,
11245  VkDeviceSize preferredBlockSize,
11246  size_t minBlockCount,
11247  size_t maxBlockCount,
11248  VkDeviceSize bufferImageGranularity,
11249  uint32_t frameInUseCount,
11250  bool isCustomPool,
11251  bool explicitBlockSize,
11252  uint32_t algorithm) :
11253  m_hAllocator(hAllocator),
11254  m_MemoryTypeIndex(memoryTypeIndex),
11255  m_PreferredBlockSize(preferredBlockSize),
11256  m_MinBlockCount(minBlockCount),
11257  m_MaxBlockCount(maxBlockCount),
11258  m_BufferImageGranularity(bufferImageGranularity),
11259  m_FrameInUseCount(frameInUseCount),
11260  m_IsCustomPool(isCustomPool),
11261  m_ExplicitBlockSize(explicitBlockSize),
11262  m_Algorithm(algorithm),
11263  m_HasEmptyBlock(false),
11264  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11265  m_NextBlockId(0)
11266 {
11267 }
11268 
11269 VmaBlockVector::~VmaBlockVector()
11270 {
11271  for(size_t i = m_Blocks.size(); i--; )
11272  {
11273  m_Blocks[i]->Destroy(m_hAllocator);
11274  vma_delete(m_hAllocator, m_Blocks[i]);
11275  }
11276 }
11277 
11278 VkResult VmaBlockVector::CreateMinBlocks()
11279 {
11280  for(size_t i = 0; i < m_MinBlockCount; ++i)
11281  {
11282  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11283  if(res != VK_SUCCESS)
11284  {
11285  return res;
11286  }
11287  }
11288  return VK_SUCCESS;
11289 }
11290 
11291 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11292 {
11293  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11294 
11295  const size_t blockCount = m_Blocks.size();
11296 
11297  pStats->size = 0;
11298  pStats->unusedSize = 0;
11299  pStats->allocationCount = 0;
11300  pStats->unusedRangeCount = 0;
11301  pStats->unusedRangeSizeMax = 0;
11302  pStats->blockCount = blockCount;
11303 
11304  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11305  {
11306  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11307  VMA_ASSERT(pBlock);
11308  VMA_HEAVY_ASSERT(pBlock->Validate());
11309  pBlock->m_pMetadata->AddPoolStats(*pStats);
11310  }
11311 }
11312 
11313 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11314 {
11315  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11316  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11317  (VMA_DEBUG_MARGIN > 0) &&
11318  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11319 }
11320 
11321 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11322 
11323 VkResult VmaBlockVector::Allocate(
11324  VmaPool hCurrentPool,
11325  uint32_t currentFrameIndex,
11326  VkDeviceSize size,
11327  VkDeviceSize alignment,
11328  const VmaAllocationCreateInfo& createInfo,
11329  VmaSuballocationType suballocType,
11330  size_t allocationCount,
11331  VmaAllocation* pAllocations)
11332 {
11333  size_t allocIndex;
11334  VkResult res = VK_SUCCESS;
11335 
11336  {
11337  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11338  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11339  {
11340  res = AllocatePage(
11341  hCurrentPool,
11342  currentFrameIndex,
11343  size,
11344  alignment,
11345  createInfo,
11346  suballocType,
11347  pAllocations + allocIndex);
11348  if(res != VK_SUCCESS)
11349  {
11350  break;
11351  }
11352  }
11353  }
11354 
11355  if(res != VK_SUCCESS)
11356  {
11357  // Free all already created allocations.
11358  while(allocIndex--)
11359  {
11360  Free(pAllocations[allocIndex]);
11361  }
11362  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11363  }
11364 
11365  return res;
11366 }
11367 
11368 VkResult VmaBlockVector::AllocatePage(
11369  VmaPool hCurrentPool,
11370  uint32_t currentFrameIndex,
11371  VkDeviceSize size,
11372  VkDeviceSize alignment,
11373  const VmaAllocationCreateInfo& createInfo,
11374  VmaSuballocationType suballocType,
11375  VmaAllocation* pAllocation)
11376 {
11377  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11378  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11379  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11380  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11381  const bool canCreateNewBlock =
11382  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11383  (m_Blocks.size() < m_MaxBlockCount);
11384  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11385 
11386  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11387  // Which in turn is available only when maxBlockCount = 1.
11388  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11389  {
11390  canMakeOtherLost = false;
11391  }
11392 
11393  // Upper address can only be used with linear allocator and within single memory block.
11394  if(isUpperAddress &&
11395  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11396  {
11397  return VK_ERROR_FEATURE_NOT_PRESENT;
11398  }
11399 
11400  // Validate strategy.
11401  switch(strategy)
11402  {
11403  case 0:
11405  break;
11409  break;
11410  default:
11411  return VK_ERROR_FEATURE_NOT_PRESENT;
11412  }
11413 
11414  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11415  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11416  {
11417  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11418  }
11419 
11420  /*
11421  Under certain condition, this whole section can be skipped for optimization, so
11422  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11423  e.g. for custom pools with linear algorithm.
11424  */
11425  if(!canMakeOtherLost || canCreateNewBlock)
11426  {
11427  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11428  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11430 
11431  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11432  {
11433  // Use only last block.
11434  if(!m_Blocks.empty())
11435  {
11436  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11437  VMA_ASSERT(pCurrBlock);
11438  VkResult res = AllocateFromBlock(
11439  pCurrBlock,
11440  hCurrentPool,
11441  currentFrameIndex,
11442  size,
11443  alignment,
11444  allocFlagsCopy,
11445  createInfo.pUserData,
11446  suballocType,
11447  strategy,
11448  pAllocation);
11449  if(res == VK_SUCCESS)
11450  {
11451  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11452  return VK_SUCCESS;
11453  }
11454  }
11455  }
11456  else
11457  {
11459  {
11460  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11461  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11462  {
11463  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11464  VMA_ASSERT(pCurrBlock);
11465  VkResult res = AllocateFromBlock(
11466  pCurrBlock,
11467  hCurrentPool,
11468  currentFrameIndex,
11469  size,
11470  alignment,
11471  allocFlagsCopy,
11472  createInfo.pUserData,
11473  suballocType,
11474  strategy,
11475  pAllocation);
11476  if(res == VK_SUCCESS)
11477  {
11478  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11479  return VK_SUCCESS;
11480  }
11481  }
11482  }
11483  else // WORST_FIT, FIRST_FIT
11484  {
11485  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11486  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11487  {
11488  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11489  VMA_ASSERT(pCurrBlock);
11490  VkResult res = AllocateFromBlock(
11491  pCurrBlock,
11492  hCurrentPool,
11493  currentFrameIndex,
11494  size,
11495  alignment,
11496  allocFlagsCopy,
11497  createInfo.pUserData,
11498  suballocType,
11499  strategy,
11500  pAllocation);
11501  if(res == VK_SUCCESS)
11502  {
11503  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11504  return VK_SUCCESS;
11505  }
11506  }
11507  }
11508  }
11509 
11510  // 2. Try to create new block.
11511  if(canCreateNewBlock)
11512  {
11513  // Calculate optimal size for new block.
11514  VkDeviceSize newBlockSize = m_PreferredBlockSize;
11515  uint32_t newBlockSizeShift = 0;
11516  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11517 
11518  if(!m_ExplicitBlockSize)
11519  {
11520  // Allocate 1/8, 1/4, 1/2 as first blocks.
11521  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11522  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11523  {
11524  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11525  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11526  {
11527  newBlockSize = smallerNewBlockSize;
11528  ++newBlockSizeShift;
11529  }
11530  else
11531  {
11532  break;
11533  }
11534  }
11535  }
11536 
11537  size_t newBlockIndex = 0;
11538  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11539  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11540  if(!m_ExplicitBlockSize)
11541  {
11542  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11543  {
11544  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11545  if(smallerNewBlockSize >= size)
11546  {
11547  newBlockSize = smallerNewBlockSize;
11548  ++newBlockSizeShift;
11549  res = CreateBlock(newBlockSize, &newBlockIndex);
11550  }
11551  else
11552  {
11553  break;
11554  }
11555  }
11556  }
11557 
11558  if(res == VK_SUCCESS)
11559  {
11560  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11561  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11562 
11563  res = AllocateFromBlock(
11564  pBlock,
11565  hCurrentPool,
11566  currentFrameIndex,
11567  size,
11568  alignment,
11569  allocFlagsCopy,
11570  createInfo.pUserData,
11571  suballocType,
11572  strategy,
11573  pAllocation);
11574  if(res == VK_SUCCESS)
11575  {
11576  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
11577  return VK_SUCCESS;
11578  }
11579  else
11580  {
11581  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11582  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11583  }
11584  }
11585  }
11586  }
11587 
11588  // 3. Try to allocate from existing blocks with making other allocations lost.
11589  if(canMakeOtherLost)
11590  {
11591  uint32_t tryIndex = 0;
11592  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11593  {
11594  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11595  VmaAllocationRequest bestRequest = {};
11596  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11597 
11598  // 1. Search existing allocations.
11600  {
11601  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11602  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11603  {
11604  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11605  VMA_ASSERT(pCurrBlock);
11606  VmaAllocationRequest currRequest = {};
11607  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11608  currentFrameIndex,
11609  m_FrameInUseCount,
11610  m_BufferImageGranularity,
11611  size,
11612  alignment,
11613  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11614  suballocType,
11615  canMakeOtherLost,
11616  strategy,
11617  &currRequest))
11618  {
11619  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11620  if(pBestRequestBlock == VMA_NULL ||
11621  currRequestCost < bestRequestCost)
11622  {
11623  pBestRequestBlock = pCurrBlock;
11624  bestRequest = currRequest;
11625  bestRequestCost = currRequestCost;
11626 
11627  if(bestRequestCost == 0)
11628  {
11629  break;
11630  }
11631  }
11632  }
11633  }
11634  }
11635  else // WORST_FIT, FIRST_FIT
11636  {
11637  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11638  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11639  {
11640  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11641  VMA_ASSERT(pCurrBlock);
11642  VmaAllocationRequest currRequest = {};
11643  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11644  currentFrameIndex,
11645  m_FrameInUseCount,
11646  m_BufferImageGranularity,
11647  size,
11648  alignment,
11649  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11650  suballocType,
11651  canMakeOtherLost,
11652  strategy,
11653  &currRequest))
11654  {
11655  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11656  if(pBestRequestBlock == VMA_NULL ||
11657  currRequestCost < bestRequestCost ||
11659  {
11660  pBestRequestBlock = pCurrBlock;
11661  bestRequest = currRequest;
11662  bestRequestCost = currRequestCost;
11663 
11664  if(bestRequestCost == 0 ||
11666  {
11667  break;
11668  }
11669  }
11670  }
11671  }
11672  }
11673 
11674  if(pBestRequestBlock != VMA_NULL)
11675  {
11676  if(mapped)
11677  {
11678  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11679  if(res != VK_SUCCESS)
11680  {
11681  return res;
11682  }
11683  }
11684 
11685  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11686  currentFrameIndex,
11687  m_FrameInUseCount,
11688  &bestRequest))
11689  {
11690  // We no longer have an empty Allocation.
11691  if(pBestRequestBlock->m_pMetadata->IsEmpty())
11692  {
11693  m_HasEmptyBlock = false;
11694  }
11695  // Allocate from this pBlock.
11696  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11697  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
11698  (*pAllocation)->InitBlockAllocation(
11699  hCurrentPool,
11700  pBestRequestBlock,
11701  bestRequest.offset,
11702  alignment,
11703  size,
11704  suballocType,
11705  mapped,
11706  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11707  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11708  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
11709  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
11710  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11711  {
11712  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11713  }
11714  if(IsCorruptionDetectionEnabled())
11715  {
11716  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11717  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11718  }
11719  return VK_SUCCESS;
11720  }
11721  // else: Some allocations must have been touched while we are here. Next try.
11722  }
11723  else
11724  {
11725  // Could not find place in any of the blocks - break outer loop.
11726  break;
11727  }
11728  }
11729  /* Maximum number of tries exceeded - a very unlike event when many other
11730  threads are simultaneously touching allocations making it impossible to make
11731  lost at the same time as we try to allocate. */
11732  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11733  {
11734  return VK_ERROR_TOO_MANY_OBJECTS;
11735  }
11736  }
11737 
11738  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11739 }
11740 
11741 void VmaBlockVector::Free(
11742  VmaAllocation hAllocation)
11743 {
11744  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11745 
11746  // Scope for lock.
11747  {
11748  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11749 
11750  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11751 
11752  if(IsCorruptionDetectionEnabled())
11753  {
11754  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11755  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
11756  }
11757 
11758  if(hAllocation->IsPersistentMap())
11759  {
11760  pBlock->Unmap(m_hAllocator, 1);
11761  }
11762 
11763  pBlock->m_pMetadata->Free(hAllocation);
11764  VMA_HEAVY_ASSERT(pBlock->Validate());
11765 
11766  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
11767 
11768  // pBlock became empty after this deallocation.
11769  if(pBlock->m_pMetadata->IsEmpty())
11770  {
11771  // Already has empty Allocation. We don't want to have two, so delete this one.
11772  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11773  {
11774  pBlockToDelete = pBlock;
11775  Remove(pBlock);
11776  }
11777  // We now have first empty block.
11778  else
11779  {
11780  m_HasEmptyBlock = true;
11781  }
11782  }
11783  // pBlock didn't become empty, but we have another empty block - find and free that one.
11784  // (This is optional, heuristics.)
11785  else if(m_HasEmptyBlock)
11786  {
11787  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
11788  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
11789  {
11790  pBlockToDelete = pLastBlock;
11791  m_Blocks.pop_back();
11792  m_HasEmptyBlock = false;
11793  }
11794  }
11795 
11796  IncrementallySortBlocks();
11797  }
11798 
11799  // Destruction of a free Allocation. Deferred until this point, outside of mutex
11800  // lock, for performance reason.
11801  if(pBlockToDelete != VMA_NULL)
11802  {
11803  VMA_DEBUG_LOG(" Deleted empty allocation");
11804  pBlockToDelete->Destroy(m_hAllocator);
11805  vma_delete(m_hAllocator, pBlockToDelete);
11806  }
11807 }
11808 
11809 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
11810 {
11811  VkDeviceSize result = 0;
11812  for(size_t i = m_Blocks.size(); i--; )
11813  {
11814  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
11815  if(result >= m_PreferredBlockSize)
11816  {
11817  break;
11818  }
11819  }
11820  return result;
11821 }
11822 
11823 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
11824 {
11825  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11826  {
11827  if(m_Blocks[blockIndex] == pBlock)
11828  {
11829  VmaVectorRemove(m_Blocks, blockIndex);
11830  return;
11831  }
11832  }
11833  VMA_ASSERT(0);
11834 }
11835 
11836 void VmaBlockVector::IncrementallySortBlocks()
11837 {
11838  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11839  {
11840  // Bubble sort only until first swap.
11841  for(size_t i = 1; i < m_Blocks.size(); ++i)
11842  {
11843  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
11844  {
11845  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
11846  return;
11847  }
11848  }
11849  }
11850 }
11851 
11852 VkResult VmaBlockVector::AllocateFromBlock(
11853  VmaDeviceMemoryBlock* pBlock,
11854  VmaPool hCurrentPool,
11855  uint32_t currentFrameIndex,
11856  VkDeviceSize size,
11857  VkDeviceSize alignment,
11858  VmaAllocationCreateFlags allocFlags,
11859  void* pUserData,
11860  VmaSuballocationType suballocType,
11861  uint32_t strategy,
11862  VmaAllocation* pAllocation)
11863 {
11864  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
11865  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11866  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11867  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11868 
11869  VmaAllocationRequest currRequest = {};
11870  if(pBlock->m_pMetadata->CreateAllocationRequest(
11871  currentFrameIndex,
11872  m_FrameInUseCount,
11873  m_BufferImageGranularity,
11874  size,
11875  alignment,
11876  isUpperAddress,
11877  suballocType,
11878  false, // canMakeOtherLost
11879  strategy,
11880  &currRequest))
11881  {
11882  // Allocate from pCurrBlock.
11883  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
11884 
11885  if(mapped)
11886  {
11887  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
11888  if(res != VK_SUCCESS)
11889  {
11890  return res;
11891  }
11892  }
11893 
11894  // We no longer have an empty Allocation.
11895  if(pBlock->m_pMetadata->IsEmpty())
11896  {
11897  m_HasEmptyBlock = false;
11898  }
11899 
11900  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11901  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
11902  (*pAllocation)->InitBlockAllocation(
11903  hCurrentPool,
11904  pBlock,
11905  currRequest.offset,
11906  alignment,
11907  size,
11908  suballocType,
11909  mapped,
11910  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11911  VMA_HEAVY_ASSERT(pBlock->Validate());
11912  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
11913  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11914  {
11915  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11916  }
11917  if(IsCorruptionDetectionEnabled())
11918  {
11919  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
11920  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11921  }
11922  return VK_SUCCESS;
11923  }
11924  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11925 }
11926 
11927 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
11928 {
11929  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
11930  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
11931  allocInfo.allocationSize = blockSize;
11932  VkDeviceMemory mem = VK_NULL_HANDLE;
11933  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
11934  if(res < 0)
11935  {
11936  return res;
11937  }
11938 
11939  // New VkDeviceMemory successfully created.
11940 
11941  // Create new Allocation for it.
11942  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
11943  pBlock->Init(
11944  m_hAllocator,
11945  m_MemoryTypeIndex,
11946  mem,
11947  allocInfo.allocationSize,
11948  m_NextBlockId++,
11949  m_Algorithm);
11950 
11951  m_Blocks.push_back(pBlock);
11952  if(pNewBlockIndex != VMA_NULL)
11953  {
11954  *pNewBlockIndex = m_Blocks.size() - 1;
11955  }
11956 
11957  return VK_SUCCESS;
11958 }
11959 
11960 void VmaBlockVector::ApplyDefragmentationMovesCpu(
11961  class VmaBlockVectorDefragmentationContext* pDefragCtx,
11962  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
11963 {
11964  const size_t blockCount = m_Blocks.size();
11965  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
11966 
11967  enum BLOCK_FLAG
11968  {
11969  BLOCK_FLAG_USED = 0x00000001,
11970  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
11971  };
11972 
11973  struct BlockInfo
11974  {
11975  uint32_t flags;
11976  void* pMappedData;
11977  };
11978  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
11979  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
11980  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
11981 
11982  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
11983  const size_t moveCount = moves.size();
11984  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11985  {
11986  const VmaDefragmentationMove& move = moves[moveIndex];
11987  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
11988  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
11989  }
11990 
11991  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
11992 
11993  // Go over all blocks. Get mapped pointer or map if necessary.
11994  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
11995  {
11996  BlockInfo& currBlockInfo = blockInfo[blockIndex];
11997  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11998  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
11999  {
12000  currBlockInfo.pMappedData = pBlock->GetMappedData();
12001  // It is not originally mapped - map it.
12002  if(currBlockInfo.pMappedData == VMA_NULL)
12003  {
12004  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12005  if(pDefragCtx->res == VK_SUCCESS)
12006  {
12007  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12008  }
12009  }
12010  }
12011  }
12012 
12013  // Go over all moves. Do actual data transfer.
12014  if(pDefragCtx->res == VK_SUCCESS)
12015  {
12016  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12017  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12018 
12019  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12020  {
12021  const VmaDefragmentationMove& move = moves[moveIndex];
12022 
12023  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12024  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12025 
12026  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12027 
12028  // Invalidate source.
12029  if(isNonCoherent)
12030  {
12031  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12032  memRange.memory = pSrcBlock->GetDeviceMemory();
12033  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12034  memRange.size = VMA_MIN(
12035  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12036  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12037  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12038  }
12039 
12040  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12041  memmove(
12042  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12043  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12044  static_cast<size_t>(move.size));
12045 
12046  if(IsCorruptionDetectionEnabled())
12047  {
12048  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12049  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12050  }
12051 
12052  // Flush destination.
12053  if(isNonCoherent)
12054  {
12055  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12056  memRange.memory = pDstBlock->GetDeviceMemory();
12057  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12058  memRange.size = VMA_MIN(
12059  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12060  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12061  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12062  }
12063  }
12064  }
12065 
12066  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12067  // Regardless of pCtx->res == VK_SUCCESS.
12068  for(size_t blockIndex = blockCount; blockIndex--; )
12069  {
12070  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12071  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12072  {
12073  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12074  pBlock->Unmap(m_hAllocator, 1);
12075  }
12076  }
12077 }
12078 
12079 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12080  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12081  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12082  VkCommandBuffer commandBuffer)
12083 {
12084  const size_t blockCount = m_Blocks.size();
12085 
12086  pDefragCtx->blockContexts.resize(blockCount);
12087  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12088 
12089  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12090  const size_t moveCount = moves.size();
12091  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12092  {
12093  const VmaDefragmentationMove& move = moves[moveIndex];
12094  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12095  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12096  }
12097 
12098  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12099 
12100  // Go over all blocks. Create and bind buffer for whole block if necessary.
12101  {
12102  VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
12103  bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
12104  VK_BUFFER_USAGE_TRANSFER_DST_BIT;
12105 
12106  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12107  {
12108  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12109  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12110  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12111  {
12112  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12113  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12114  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12115  if(pDefragCtx->res == VK_SUCCESS)
12116  {
12117  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12118  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12119  }
12120  }
12121  }
12122  }
12123 
12124  // Go over all moves. Post data transfer commands to command buffer.
12125  if(pDefragCtx->res == VK_SUCCESS)
12126  {
12127  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12128  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12129 
12130  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12131  {
12132  const VmaDefragmentationMove& move = moves[moveIndex];
12133 
12134  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12135  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12136 
12137  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12138 
12139  VkBufferCopy region = {
12140  move.srcOffset,
12141  move.dstOffset,
12142  move.size };
12143  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12144  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12145  }
12146  }
12147 
12148  // Save buffers to defrag context for later destruction.
12149  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12150  {
12151  pDefragCtx->res = VK_NOT_READY;
12152  }
12153 }
12154 
12155 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12156 {
12157  m_HasEmptyBlock = false;
12158  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12159  {
12160  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12161  if(pBlock->m_pMetadata->IsEmpty())
12162  {
12163  if(m_Blocks.size() > m_MinBlockCount)
12164  {
12165  if(pDefragmentationStats != VMA_NULL)
12166  {
12167  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12168  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12169  }
12170 
12171  VmaVectorRemove(m_Blocks, blockIndex);
12172  pBlock->Destroy(m_hAllocator);
12173  vma_delete(m_hAllocator, pBlock);
12174  }
12175  else
12176  {
12177  m_HasEmptyBlock = true;
12178  }
12179  }
12180  }
12181 }
12182 
12183 #if VMA_STATS_STRING_ENABLED
12184 
12185 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12186 {
12187  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12188 
12189  json.BeginObject();
12190 
12191  if(m_IsCustomPool)
12192  {
12193  json.WriteString("MemoryTypeIndex");
12194  json.WriteNumber(m_MemoryTypeIndex);
12195 
12196  json.WriteString("BlockSize");
12197  json.WriteNumber(m_PreferredBlockSize);
12198 
12199  json.WriteString("BlockCount");
12200  json.BeginObject(true);
12201  if(m_MinBlockCount > 0)
12202  {
12203  json.WriteString("Min");
12204  json.WriteNumber((uint64_t)m_MinBlockCount);
12205  }
12206  if(m_MaxBlockCount < SIZE_MAX)
12207  {
12208  json.WriteString("Max");
12209  json.WriteNumber((uint64_t)m_MaxBlockCount);
12210  }
12211  json.WriteString("Cur");
12212  json.WriteNumber((uint64_t)m_Blocks.size());
12213  json.EndObject();
12214 
12215  if(m_FrameInUseCount > 0)
12216  {
12217  json.WriteString("FrameInUseCount");
12218  json.WriteNumber(m_FrameInUseCount);
12219  }
12220 
12221  if(m_Algorithm != 0)
12222  {
12223  json.WriteString("Algorithm");
12224  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12225  }
12226  }
12227  else
12228  {
12229  json.WriteString("PreferredBlockSize");
12230  json.WriteNumber(m_PreferredBlockSize);
12231  }
12232 
12233  json.WriteString("Blocks");
12234  json.BeginObject();
12235  for(size_t i = 0; i < m_Blocks.size(); ++i)
12236  {
12237  json.BeginString();
12238  json.ContinueString(m_Blocks[i]->GetId());
12239  json.EndString();
12240 
12241  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12242  }
12243  json.EndObject();
12244 
12245  json.EndObject();
12246 }
12247 
12248 #endif // #if VMA_STATS_STRING_ENABLED
12249 
12250 void VmaBlockVector::Defragment(
12251  class VmaBlockVectorDefragmentationContext* pCtx,
12252  VmaDefragmentationStats* pStats,
12253  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12254  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12255  VkCommandBuffer commandBuffer)
12256 {
12257  pCtx->res = VK_SUCCESS;
12258 
12259  const VkMemoryPropertyFlags memPropFlags =
12260  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12261  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12262  const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
12263 
12264  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12265  isHostVisible;
12266  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12267  (VMA_DEBUG_DETECT_CORRUPTION == 0 || !(isHostVisible && isHostCoherent));
12268 
12269  // There are options to defragment this memory type.
12270  if(canDefragmentOnCpu || canDefragmentOnGpu)
12271  {
12272  bool defragmentOnGpu;
12273  // There is only one option to defragment this memory type.
12274  if(canDefragmentOnGpu != canDefragmentOnCpu)
12275  {
12276  defragmentOnGpu = canDefragmentOnGpu;
12277  }
12278  // Both options are available: Heuristics to choose the best one.
12279  else
12280  {
12281  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12282  m_hAllocator->IsIntegratedGpu();
12283  }
12284 
12285  bool overlappingMoveSupported = !defragmentOnGpu;
12286 
12287  if(m_hAllocator->m_UseMutex)
12288  {
12289  m_Mutex.LockWrite();
12290  pCtx->mutexLocked = true;
12291  }
12292 
12293  pCtx->Begin(overlappingMoveSupported);
12294 
12295  // Defragment.
12296 
12297  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12298  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12299  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12300  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12301  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12302 
12303  // Accumulate statistics.
12304  if(pStats != VMA_NULL)
12305  {
12306  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12307  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12308  pStats->bytesMoved += bytesMoved;
12309  pStats->allocationsMoved += allocationsMoved;
12310  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12311  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12312  if(defragmentOnGpu)
12313  {
12314  maxGpuBytesToMove -= bytesMoved;
12315  maxGpuAllocationsToMove -= allocationsMoved;
12316  }
12317  else
12318  {
12319  maxCpuBytesToMove -= bytesMoved;
12320  maxCpuAllocationsToMove -= allocationsMoved;
12321  }
12322  }
12323 
12324  if(pCtx->res >= VK_SUCCESS)
12325  {
12326  if(defragmentOnGpu)
12327  {
12328  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12329  }
12330  else
12331  {
12332  ApplyDefragmentationMovesCpu(pCtx, moves);
12333  }
12334  }
12335  }
12336 }
12337 
12338 void VmaBlockVector::DefragmentationEnd(
12339  class VmaBlockVectorDefragmentationContext* pCtx,
12340  VmaDefragmentationStats* pStats)
12341 {
12342  // Destroy buffers.
12343  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12344  {
12345  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12346  if(blockCtx.hBuffer)
12347  {
12348  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12349  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12350  }
12351  }
12352 
12353  if(pCtx->res >= VK_SUCCESS)
12354  {
12355  FreeEmptyBlocks(pStats);
12356  }
12357 
12358  if(pCtx->mutexLocked)
12359  {
12360  VMA_ASSERT(m_hAllocator->m_UseMutex);
12361  m_Mutex.UnlockWrite();
12362  }
12363 }
12364 
12365 size_t VmaBlockVector::CalcAllocationCount() const
12366 {
12367  size_t result = 0;
12368  for(size_t i = 0; i < m_Blocks.size(); ++i)
12369  {
12370  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12371  }
12372  return result;
12373 }
12374 
12375 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12376 {
12377  if(m_BufferImageGranularity == 1)
12378  {
12379  return false;
12380  }
12381  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12382  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12383  {
12384  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12385  VMA_ASSERT(m_Algorithm == 0);
12386  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12387  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12388  {
12389  return true;
12390  }
12391  }
12392  return false;
12393 }
12394 
12395 void VmaBlockVector::MakePoolAllocationsLost(
12396  uint32_t currentFrameIndex,
12397  size_t* pLostAllocationCount)
12398 {
12399  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12400  size_t lostAllocationCount = 0;
12401  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12402  {
12403  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12404  VMA_ASSERT(pBlock);
12405  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12406  }
12407  if(pLostAllocationCount != VMA_NULL)
12408  {
12409  *pLostAllocationCount = lostAllocationCount;
12410  }
12411 }
12412 
12413 VkResult VmaBlockVector::CheckCorruption()
12414 {
12415  if(!IsCorruptionDetectionEnabled())
12416  {
12417  return VK_ERROR_FEATURE_NOT_PRESENT;
12418  }
12419 
12420  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12421  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12422  {
12423  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12424  VMA_ASSERT(pBlock);
12425  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12426  if(res != VK_SUCCESS)
12427  {
12428  return res;
12429  }
12430  }
12431  return VK_SUCCESS;
12432 }
12433 
12434 void VmaBlockVector::AddStats(VmaStats* pStats)
12435 {
12436  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12437  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12438 
12439  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12440 
12441  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12442  {
12443  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12444  VMA_ASSERT(pBlock);
12445  VMA_HEAVY_ASSERT(pBlock->Validate());
12446  VmaStatInfo allocationStatInfo;
12447  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12448  VmaAddStatInfo(pStats->total, allocationStatInfo);
12449  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12450  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12451  }
12452 }
12453 
12455 // VmaDefragmentationAlgorithm_Generic members definition
12456 
12457 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12458  VmaAllocator hAllocator,
12459  VmaBlockVector* pBlockVector,
12460  uint32_t currentFrameIndex,
12461  bool overlappingMoveSupported) :
12462  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12463  m_AllAllocations(false),
12464  m_AllocationCount(0),
12465  m_BytesMoved(0),
12466  m_AllocationsMoved(0),
12467  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12468 {
12469  // Create block info for each block.
12470  const size_t blockCount = m_pBlockVector->m_Blocks.size();
12471  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12472  {
12473  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12474  pBlockInfo->m_OriginalBlockIndex = blockIndex;
12475  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12476  m_Blocks.push_back(pBlockInfo);
12477  }
12478 
12479  // Sort them by m_pBlock pointer value.
12480  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12481 }
12482 
12483 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12484 {
12485  for(size_t i = m_Blocks.size(); i--; )
12486  {
12487  vma_delete(m_hAllocator, m_Blocks[i]);
12488  }
12489 }
12490 
12491 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12492 {
12493  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12494  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12495  {
12496  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12497  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12498  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12499  {
12500  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12501  (*it)->m_Allocations.push_back(allocInfo);
12502  }
12503  else
12504  {
12505  VMA_ASSERT(0);
12506  }
12507 
12508  ++m_AllocationCount;
12509  }
12510 }
12511 
12512 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12513  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12514  VkDeviceSize maxBytesToMove,
12515  uint32_t maxAllocationsToMove)
12516 {
12517  if(m_Blocks.empty())
12518  {
12519  return VK_SUCCESS;
12520  }
12521 
12522  // This is a choice based on research.
12523  // Option 1:
12524  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12525  // Option 2:
12526  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12527  // Option 3:
12528  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12529 
12530  size_t srcBlockMinIndex = 0;
12531  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12532  /*
12533  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12534  {
12535  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12536  if(blocksWithNonMovableCount > 0)
12537  {
12538  srcBlockMinIndex = blocksWithNonMovableCount - 1;
12539  }
12540  }
12541  */
12542 
12543  size_t srcBlockIndex = m_Blocks.size() - 1;
12544  size_t srcAllocIndex = SIZE_MAX;
12545  for(;;)
12546  {
12547  // 1. Find next allocation to move.
12548  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12549  // 1.2. Then start from last to first m_Allocations.
12550  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12551  {
12552  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12553  {
12554  // Finished: no more allocations to process.
12555  if(srcBlockIndex == srcBlockMinIndex)
12556  {
12557  return VK_SUCCESS;
12558  }
12559  else
12560  {
12561  --srcBlockIndex;
12562  srcAllocIndex = SIZE_MAX;
12563  }
12564  }
12565  else
12566  {
12567  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12568  }
12569  }
12570 
12571  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12572  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12573 
12574  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12575  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12576  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12577  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12578 
12579  // 2. Try to find new place for this allocation in preceding or current block.
12580  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12581  {
12582  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12583  VmaAllocationRequest dstAllocRequest;
12584  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12585  m_CurrentFrameIndex,
12586  m_pBlockVector->GetFrameInUseCount(),
12587  m_pBlockVector->GetBufferImageGranularity(),
12588  size,
12589  alignment,
12590  false, // upperAddress
12591  suballocType,
12592  false, // canMakeOtherLost
12593  strategy,
12594  &dstAllocRequest) &&
12595  MoveMakesSense(
12596  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12597  {
12598  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12599 
12600  // Reached limit on number of allocations or bytes to move.
12601  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12602  (m_BytesMoved + size > maxBytesToMove))
12603  {
12604  return VK_SUCCESS;
12605  }
12606 
12607  VmaDefragmentationMove move;
12608  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12609  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12610  move.srcOffset = srcOffset;
12611  move.dstOffset = dstAllocRequest.offset;
12612  move.size = size;
12613  moves.push_back(move);
12614 
12615  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12616  dstAllocRequest,
12617  suballocType,
12618  size,
12619  false, // upperAddress
12620  allocInfo.m_hAllocation);
12621  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12622 
12623  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12624 
12625  if(allocInfo.m_pChanged != VMA_NULL)
12626  {
12627  *allocInfo.m_pChanged = VK_TRUE;
12628  }
12629 
12630  ++m_AllocationsMoved;
12631  m_BytesMoved += size;
12632 
12633  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12634 
12635  break;
12636  }
12637  }
12638 
12639  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
12640 
12641  if(srcAllocIndex > 0)
12642  {
12643  --srcAllocIndex;
12644  }
12645  else
12646  {
12647  if(srcBlockIndex > 0)
12648  {
12649  --srcBlockIndex;
12650  srcAllocIndex = SIZE_MAX;
12651  }
12652  else
12653  {
12654  return VK_SUCCESS;
12655  }
12656  }
12657  }
12658 }
12659 
12660 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
12661 {
12662  size_t result = 0;
12663  for(size_t i = 0; i < m_Blocks.size(); ++i)
12664  {
12665  if(m_Blocks[i]->m_HasNonMovableAllocations)
12666  {
12667  ++result;
12668  }
12669  }
12670  return result;
12671 }
12672 
12673 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12674  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12675  VkDeviceSize maxBytesToMove,
12676  uint32_t maxAllocationsToMove)
12677 {
12678  if(!m_AllAllocations && m_AllocationCount == 0)
12679  {
12680  return VK_SUCCESS;
12681  }
12682 
12683  const size_t blockCount = m_Blocks.size();
12684  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12685  {
12686  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12687 
12688  if(m_AllAllocations)
12689  {
12690  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12691  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12692  it != pMetadata->m_Suballocations.end();
12693  ++it)
12694  {
12695  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12696  {
12697  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12698  pBlockInfo->m_Allocations.push_back(allocInfo);
12699  }
12700  }
12701  }
12702 
12703  pBlockInfo->CalcHasNonMovableAllocations();
12704 
12705  // This is a choice based on research.
12706  // Option 1:
12707  pBlockInfo->SortAllocationsByOffsetDescending();
12708  // Option 2:
12709  //pBlockInfo->SortAllocationsBySizeDescending();
12710  }
12711 
12712  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
12713  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12714 
12715  // This is a choice based on research.
12716  const uint32_t roundCount = 2;
12717 
12718  // Execute defragmentation rounds (the main part).
12719  VkResult result = VK_SUCCESS;
12720  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12721  {
12722  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12723  }
12724 
12725  return result;
12726 }
12727 
12728 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12729  size_t dstBlockIndex, VkDeviceSize dstOffset,
12730  size_t srcBlockIndex, VkDeviceSize srcOffset)
12731 {
12732  if(dstBlockIndex < srcBlockIndex)
12733  {
12734  return true;
12735  }
12736  if(dstBlockIndex > srcBlockIndex)
12737  {
12738  return false;
12739  }
12740  if(dstOffset < srcOffset)
12741  {
12742  return true;
12743  }
12744  return false;
12745 }
12746 
12748 // VmaDefragmentationAlgorithm_Fast
12749 
12750 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12751  VmaAllocator hAllocator,
12752  VmaBlockVector* pBlockVector,
12753  uint32_t currentFrameIndex,
12754  bool overlappingMoveSupported) :
12755  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12756  m_OverlappingMoveSupported(overlappingMoveSupported),
12757  m_AllocationCount(0),
12758  m_AllAllocations(false),
12759  m_BytesMoved(0),
12760  m_AllocationsMoved(0),
12761  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12762 {
12763  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12764 
12765 }
12766 
12767 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12768 {
12769 }
12770 
12771 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12772  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12773  VkDeviceSize maxBytesToMove,
12774  uint32_t maxAllocationsToMove)
12775 {
12776  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12777 
12778  const size_t blockCount = m_pBlockVector->GetBlockCount();
12779  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12780  {
12781  return VK_SUCCESS;
12782  }
12783 
12784  PreprocessMetadata();
12785 
12786  // Sort blocks in order from most destination.
12787 
12788  m_BlockInfos.resize(blockCount);
12789  for(size_t i = 0; i < blockCount; ++i)
12790  {
12791  m_BlockInfos[i].origBlockIndex = i;
12792  }
12793 
12794  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
12795  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
12796  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
12797  });
12798 
12799  // THE MAIN ALGORITHM
12800 
12801  FreeSpaceDatabase freeSpaceDb;
12802 
12803  size_t dstBlockInfoIndex = 0;
12804  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12805  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12806  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12807  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
12808  VkDeviceSize dstOffset = 0;
12809 
12810  bool end = false;
12811  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
12812  {
12813  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
12814  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
12815  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
12816  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
12817  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
12818  {
12819  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
12820  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
12821  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
12822  if(m_AllocationsMoved == maxAllocationsToMove ||
12823  m_BytesMoved + srcAllocSize > maxBytesToMove)
12824  {
12825  end = true;
12826  break;
12827  }
12828  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
12829 
12830  // Try to place it in one of free spaces from the database.
12831  size_t freeSpaceInfoIndex;
12832  VkDeviceSize dstAllocOffset;
12833  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
12834  freeSpaceInfoIndex, dstAllocOffset))
12835  {
12836  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
12837  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
12838  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
12839  VkDeviceSize freeSpaceBlockSize = pFreeSpaceMetadata->GetSize();
12840 
12841  // Same block
12842  if(freeSpaceInfoIndex == srcBlockInfoIndex)
12843  {
12844  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12845 
12846  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12847 
12848  VmaSuballocation suballoc = *srcSuballocIt;
12849  suballoc.offset = dstAllocOffset;
12850  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
12851  m_BytesMoved += srcAllocSize;
12852  ++m_AllocationsMoved;
12853 
12854  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12855  ++nextSuballocIt;
12856  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12857  srcSuballocIt = nextSuballocIt;
12858 
12859  InsertSuballoc(pFreeSpaceMetadata, suballoc);
12860 
12861  VmaDefragmentationMove move = {
12862  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12863  srcAllocOffset, dstAllocOffset,
12864  srcAllocSize };
12865  moves.push_back(move);
12866  }
12867  // Different block
12868  else
12869  {
12870  // MOVE OPTION 2: Move the allocation to a different block.
12871 
12872  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
12873 
12874  VmaSuballocation suballoc = *srcSuballocIt;
12875  suballoc.offset = dstAllocOffset;
12876  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
12877  m_BytesMoved += srcAllocSize;
12878  ++m_AllocationsMoved;
12879 
12880  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12881  ++nextSuballocIt;
12882  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12883  srcSuballocIt = nextSuballocIt;
12884 
12885  InsertSuballoc(pFreeSpaceMetadata, suballoc);
12886 
12887  VmaDefragmentationMove move = {
12888  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12889  srcAllocOffset, dstAllocOffset,
12890  srcAllocSize };
12891  moves.push_back(move);
12892  }
12893  }
12894  else
12895  {
12896  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
12897 
12898  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
12899  while(dstBlockInfoIndex < srcBlockInfoIndex &&
12900  dstAllocOffset + srcAllocSize > dstBlockSize)
12901  {
12902  // But before that, register remaining free space at the end of dst block.
12903  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
12904 
12905  ++dstBlockInfoIndex;
12906  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12907  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12908  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12909  dstBlockSize = pDstMetadata->GetSize();
12910  dstOffset = 0;
12911  dstAllocOffset = 0;
12912  }
12913 
12914  // Same block
12915  if(dstBlockInfoIndex == srcBlockInfoIndex)
12916  {
12917  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12918 
12919  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
12920 
12921  bool skipOver = overlap;
12922  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
12923  {
12924  // If destination and source place overlap, skip if it would move it
12925  // by only < 1/64 of its size.
12926  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
12927  }
12928 
12929  if(skipOver)
12930  {
12931  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
12932 
12933  dstOffset = srcAllocOffset + srcAllocSize;
12934  ++srcSuballocIt;
12935  }
12936  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12937  else
12938  {
12939  srcSuballocIt->offset = dstAllocOffset;
12940  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
12941  dstOffset = dstAllocOffset + srcAllocSize;
12942  m_BytesMoved += srcAllocSize;
12943  ++m_AllocationsMoved;
12944  ++srcSuballocIt;
12945  VmaDefragmentationMove move = {
12946  srcOrigBlockIndex, dstOrigBlockIndex,
12947  srcAllocOffset, dstAllocOffset,
12948  srcAllocSize };
12949  moves.push_back(move);
12950  }
12951  }
12952  // Different block
12953  else
12954  {
12955  // MOVE OPTION 2: Move the allocation to a different block.
12956 
12957  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
12958  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
12959 
12960  VmaSuballocation suballoc = *srcSuballocIt;
12961  suballoc.offset = dstAllocOffset;
12962  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
12963  dstOffset = dstAllocOffset + srcAllocSize;
12964  m_BytesMoved += srcAllocSize;
12965  ++m_AllocationsMoved;
12966 
12967  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12968  ++nextSuballocIt;
12969  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12970  srcSuballocIt = nextSuballocIt;
12971 
12972  pDstMetadata->m_Suballocations.push_back(suballoc);
12973 
12974  VmaDefragmentationMove move = {
12975  srcOrigBlockIndex, dstOrigBlockIndex,
12976  srcAllocOffset, dstAllocOffset,
12977  srcAllocSize };
12978  moves.push_back(move);
12979  }
12980  }
12981  }
12982  }
12983 
12984  m_BlockInfos.clear();
12985 
12986  PostprocessMetadata();
12987 
12988  return VK_SUCCESS;
12989 }
12990 
12991 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
12992 {
12993  const size_t blockCount = m_pBlockVector->GetBlockCount();
12994  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12995  {
12996  VmaBlockMetadata_Generic* const pMetadata =
12997  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
12998  pMetadata->m_FreeCount = 0;
12999  pMetadata->m_SumFreeSize = pMetadata->GetSize();
13000  pMetadata->m_FreeSuballocationsBySize.clear();
13001  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13002  it != pMetadata->m_Suballocations.end(); )
13003  {
13004  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13005  {
13006  VmaSuballocationList::iterator nextIt = it;
13007  ++nextIt;
13008  pMetadata->m_Suballocations.erase(it);
13009  it = nextIt;
13010  }
13011  else
13012  {
13013  ++it;
13014  }
13015  }
13016  }
13017 }
13018 
13019 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13020 {
13021  const size_t blockCount = m_pBlockVector->GetBlockCount();
13022  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13023  {
13024  VmaBlockMetadata_Generic* const pMetadata =
13025  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13026  const VkDeviceSize blockSize = pMetadata->GetSize();
13027 
13028  // No allocations in this block - entire area is free.
13029  if(pMetadata->m_Suballocations.empty())
13030  {
13031  pMetadata->m_FreeCount = 1;
13032  //pMetadata->m_SumFreeSize is already set to blockSize.
13033  VmaSuballocation suballoc = {
13034  0, // offset
13035  blockSize, // size
13036  VMA_NULL, // hAllocation
13037  VMA_SUBALLOCATION_TYPE_FREE };
13038  pMetadata->m_Suballocations.push_back(suballoc);
13039  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13040  }
13041  // There are some allocations in this block.
13042  else
13043  {
13044  VkDeviceSize offset = 0;
13045  VmaSuballocationList::iterator it;
13046  for(it = pMetadata->m_Suballocations.begin();
13047  it != pMetadata->m_Suballocations.end();
13048  ++it)
13049  {
13050  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13051  VMA_ASSERT(it->offset >= offset);
13052 
13053  // Need to insert preceding free space.
13054  if(it->offset > offset)
13055  {
13056  ++pMetadata->m_FreeCount;
13057  const VkDeviceSize freeSize = it->offset - offset;
13058  VmaSuballocation suballoc = {
13059  offset, // offset
13060  freeSize, // size
13061  VMA_NULL, // hAllocation
13062  VMA_SUBALLOCATION_TYPE_FREE };
13063  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13064  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13065  {
13066  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13067  }
13068  }
13069 
13070  pMetadata->m_SumFreeSize -= it->size;
13071  offset = it->offset + it->size;
13072  }
13073 
13074  // Need to insert trailing free space.
13075  if(offset < blockSize)
13076  {
13077  ++pMetadata->m_FreeCount;
13078  const VkDeviceSize freeSize = blockSize - offset;
13079  VmaSuballocation suballoc = {
13080  offset, // offset
13081  freeSize, // size
13082  VMA_NULL, // hAllocation
13083  VMA_SUBALLOCATION_TYPE_FREE };
13084  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13085  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13086  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13087  {
13088  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13089  }
13090  }
13091 
13092  VMA_SORT(
13093  pMetadata->m_FreeSuballocationsBySize.begin(),
13094  pMetadata->m_FreeSuballocationsBySize.end(),
13095  VmaSuballocationItemSizeLess());
13096  }
13097 
13098  VMA_HEAVY_ASSERT(pMetadata->Validate());
13099  }
13100 }
13101 
13102 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13103 {
13104  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13105  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13106  while(it != pMetadata->m_Suballocations.end())
13107  {
13108  if(it->offset < suballoc.offset)
13109  {
13110  ++it;
13111  }
13112  }
13113  pMetadata->m_Suballocations.insert(it, suballoc);
13114 }
13115 
13117 // VmaBlockVectorDefragmentationContext
13118 
13119 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13120  VmaAllocator hAllocator,
13121  VmaPool hCustomPool,
13122  VmaBlockVector* pBlockVector,
13123  uint32_t currFrameIndex,
13124  uint32_t algorithmFlags) :
13125  res(VK_SUCCESS),
13126  mutexLocked(false),
13127  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13128  m_hAllocator(hAllocator),
13129  m_hCustomPool(hCustomPool),
13130  m_pBlockVector(pBlockVector),
13131  m_CurrFrameIndex(currFrameIndex),
13132  m_AlgorithmFlags(algorithmFlags),
13133  m_pAlgorithm(VMA_NULL),
13134  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13135  m_AllAllocations(false)
13136 {
13137 }
13138 
13139 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13140 {
13141  vma_delete(m_hAllocator, m_pAlgorithm);
13142 }
13143 
13144 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13145 {
13146  AllocInfo info = { hAlloc, pChanged };
13147  m_Allocations.push_back(info);
13148 }
13149 
13150 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13151 {
13152  const bool allAllocations = m_AllAllocations ||
13153  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13154 
13155  /********************************
13156  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13157  ********************************/
13158 
13159  /*
13160  Fast algorithm is supported only when certain criteria are met:
13161  - VMA_DEBUG_MARGIN is 0.
13162  - All allocations in this block vector are moveable.
13163  - There is no possibility of image/buffer granularity conflict.
13164  */
13165  if(VMA_DEBUG_MARGIN == 0 &&
13166  allAllocations &&
13167  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13168  {
13169  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13170  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13171  }
13172  else
13173  {
13174  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13175  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13176  }
13177 
13178  if(allAllocations)
13179  {
13180  m_pAlgorithm->AddAll();
13181  }
13182  else
13183  {
13184  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13185  {
13186  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13187  }
13188  }
13189 }
13190 
13192 // VmaDefragmentationContext
13193 
13194 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13195  VmaAllocator hAllocator,
13196  uint32_t currFrameIndex,
13197  uint32_t flags,
13198  VmaDefragmentationStats* pStats) :
13199  m_hAllocator(hAllocator),
13200  m_CurrFrameIndex(currFrameIndex),
13201  m_Flags(flags),
13202  m_pStats(pStats),
13203  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13204 {
13205  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13206 }
13207 
13208 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13209 {
13210  for(size_t i = m_CustomPoolContexts.size(); i--; )
13211  {
13212  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13213  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13214  vma_delete(m_hAllocator, pBlockVectorCtx);
13215  }
13216  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13217  {
13218  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13219  if(pBlockVectorCtx)
13220  {
13221  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13222  vma_delete(m_hAllocator, pBlockVectorCtx);
13223  }
13224  }
13225 }
13226 
13227 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13228 {
13229  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13230  {
13231  VmaPool pool = pPools[poolIndex];
13232  VMA_ASSERT(pool);
13233  // Pools with algorithm other than default are not defragmented.
13234  if(pool->m_BlockVector.GetAlgorithm() == 0)
13235  {
13236  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13237 
13238  for(size_t i = m_CustomPoolContexts.size(); i--; )
13239  {
13240  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13241  {
13242  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13243  break;
13244  }
13245  }
13246 
13247  if(!pBlockVectorDefragCtx)
13248  {
13249  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13250  m_hAllocator,
13251  pool,
13252  &pool->m_BlockVector,
13253  m_CurrFrameIndex,
13254  m_Flags);
13255  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13256  }
13257 
13258  pBlockVectorDefragCtx->AddAll();
13259  }
13260  }
13261 }
13262 
13263 void VmaDefragmentationContext_T::AddAllocations(
13264  uint32_t allocationCount,
13265  VmaAllocation* pAllocations,
13266  VkBool32* pAllocationsChanged)
13267 {
13268  // Dispatch pAllocations among defragmentators. Create them when necessary.
13269  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13270  {
13271  const VmaAllocation hAlloc = pAllocations[allocIndex];
13272  VMA_ASSERT(hAlloc);
13273  // DedicatedAlloc cannot be defragmented.
13274  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13275  // Lost allocation cannot be defragmented.
13276  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13277  {
13278  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13279 
13280  const VmaPool hAllocPool = hAlloc->GetPool();
13281  // This allocation belongs to custom pool.
13282  if(hAllocPool != VK_NULL_HANDLE)
13283  {
13284  // Pools with algorithm other than default are not defragmented.
13285  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13286  {
13287  for(size_t i = m_CustomPoolContexts.size(); i--; )
13288  {
13289  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13290  {
13291  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13292  break;
13293  }
13294  }
13295  if(!pBlockVectorDefragCtx)
13296  {
13297  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13298  m_hAllocator,
13299  hAllocPool,
13300  &hAllocPool->m_BlockVector,
13301  m_CurrFrameIndex,
13302  m_Flags);
13303  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13304  }
13305  }
13306  }
13307  // This allocation belongs to default pool.
13308  else
13309  {
13310  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13311  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13312  if(!pBlockVectorDefragCtx)
13313  {
13314  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13315  m_hAllocator,
13316  VMA_NULL, // hCustomPool
13317  m_hAllocator->m_pBlockVectors[memTypeIndex],
13318  m_CurrFrameIndex,
13319  m_Flags);
13320  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13321  }
13322  }
13323 
13324  if(pBlockVectorDefragCtx)
13325  {
13326  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13327  &pAllocationsChanged[allocIndex] : VMA_NULL;
13328  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13329  }
13330  }
13331  }
13332 }
13333 
13334 VkResult VmaDefragmentationContext_T::Defragment(
13335  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13336  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13337  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13338 {
13339  if(pStats)
13340  {
13341  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13342  }
13343 
13344  if(commandBuffer == VK_NULL_HANDLE)
13345  {
13346  maxGpuBytesToMove = 0;
13347  maxGpuAllocationsToMove = 0;
13348  }
13349 
13350  VkResult res = VK_SUCCESS;
13351 
13352  // Process default pools.
13353  for(uint32_t memTypeIndex = 0;
13354  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13355  ++memTypeIndex)
13356  {
13357  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13358  if(pBlockVectorCtx)
13359  {
13360  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13361  pBlockVectorCtx->GetBlockVector()->Defragment(
13362  pBlockVectorCtx,
13363  pStats,
13364  maxCpuBytesToMove, maxCpuAllocationsToMove,
13365  maxGpuBytesToMove, maxGpuAllocationsToMove,
13366  commandBuffer);
13367  if(pBlockVectorCtx->res != VK_SUCCESS)
13368  {
13369  res = pBlockVectorCtx->res;
13370  }
13371  }
13372  }
13373 
13374  // Process custom pools.
13375  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13376  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13377  ++customCtxIndex)
13378  {
13379  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13380  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13381  pBlockVectorCtx->GetBlockVector()->Defragment(
13382  pBlockVectorCtx,
13383  pStats,
13384  maxCpuBytesToMove, maxCpuAllocationsToMove,
13385  maxGpuBytesToMove, maxGpuAllocationsToMove,
13386  commandBuffer);
13387  if(pBlockVectorCtx->res != VK_SUCCESS)
13388  {
13389  res = pBlockVectorCtx->res;
13390  }
13391  }
13392 
13393  return res;
13394 }
13395 
13397 // VmaRecorder
13398 
13399 #if VMA_RECORDING_ENABLED
13400 
13401 VmaRecorder::VmaRecorder() :
13402  m_UseMutex(true),
13403  m_Flags(0),
13404  m_File(VMA_NULL),
13405  m_Freq(INT64_MAX),
13406  m_StartCounter(INT64_MAX)
13407 {
13408 }
13409 
13410 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13411 {
13412  m_UseMutex = useMutex;
13413  m_Flags = settings.flags;
13414 
13415  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13416  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13417 
13418  // Open file for writing.
13419  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13420  if(err != 0)
13421  {
13422  return VK_ERROR_INITIALIZATION_FAILED;
13423  }
13424 
13425  // Write header.
13426  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13427  fprintf(m_File, "%s\n", "1,5");
13428 
13429  return VK_SUCCESS;
13430 }
13431 
13432 VmaRecorder::~VmaRecorder()
13433 {
13434  if(m_File != VMA_NULL)
13435  {
13436  fclose(m_File);
13437  }
13438 }
13439 
13440 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13441 {
13442  CallParams callParams;
13443  GetBasicParams(callParams);
13444 
13445  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13446  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13447  Flush();
13448 }
13449 
13450 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13451 {
13452  CallParams callParams;
13453  GetBasicParams(callParams);
13454 
13455  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13456  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13457  Flush();
13458 }
13459 
13460 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13461 {
13462  CallParams callParams;
13463  GetBasicParams(callParams);
13464 
13465  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13466  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13467  createInfo.memoryTypeIndex,
13468  createInfo.flags,
13469  createInfo.blockSize,
13470  (uint64_t)createInfo.minBlockCount,
13471  (uint64_t)createInfo.maxBlockCount,
13472  createInfo.frameInUseCount,
13473  pool);
13474  Flush();
13475 }
13476 
13477 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13478 {
13479  CallParams callParams;
13480  GetBasicParams(callParams);
13481 
13482  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13483  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13484  pool);
13485  Flush();
13486 }
13487 
13488 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13489  const VkMemoryRequirements& vkMemReq,
13490  const VmaAllocationCreateInfo& createInfo,
13491  VmaAllocation allocation)
13492 {
13493  CallParams callParams;
13494  GetBasicParams(callParams);
13495 
13496  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13497  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13498  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13499  vkMemReq.size,
13500  vkMemReq.alignment,
13501  vkMemReq.memoryTypeBits,
13502  createInfo.flags,
13503  createInfo.usage,
13504  createInfo.requiredFlags,
13505  createInfo.preferredFlags,
13506  createInfo.memoryTypeBits,
13507  createInfo.pool,
13508  allocation,
13509  userDataStr.GetString());
13510  Flush();
13511 }
13512 
13513 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
13514  const VkMemoryRequirements& vkMemReq,
13515  const VmaAllocationCreateInfo& createInfo,
13516  uint64_t allocationCount,
13517  const VmaAllocation* pAllocations)
13518 {
13519  CallParams callParams;
13520  GetBasicParams(callParams);
13521 
13522  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13523  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13524  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
13525  vkMemReq.size,
13526  vkMemReq.alignment,
13527  vkMemReq.memoryTypeBits,
13528  createInfo.flags,
13529  createInfo.usage,
13530  createInfo.requiredFlags,
13531  createInfo.preferredFlags,
13532  createInfo.memoryTypeBits,
13533  createInfo.pool);
13534  PrintPointerList(allocationCount, pAllocations);
13535  fprintf(m_File, ",%s\n", userDataStr.GetString());
13536  Flush();
13537 }
13538 
13539 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13540  const VkMemoryRequirements& vkMemReq,
13541  bool requiresDedicatedAllocation,
13542  bool prefersDedicatedAllocation,
13543  const VmaAllocationCreateInfo& createInfo,
13544  VmaAllocation allocation)
13545 {
13546  CallParams callParams;
13547  GetBasicParams(callParams);
13548 
13549  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13550  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13551  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13552  vkMemReq.size,
13553  vkMemReq.alignment,
13554  vkMemReq.memoryTypeBits,
13555  requiresDedicatedAllocation ? 1 : 0,
13556  prefersDedicatedAllocation ? 1 : 0,
13557  createInfo.flags,
13558  createInfo.usage,
13559  createInfo.requiredFlags,
13560  createInfo.preferredFlags,
13561  createInfo.memoryTypeBits,
13562  createInfo.pool,
13563  allocation,
13564  userDataStr.GetString());
13565  Flush();
13566 }
13567 
13568 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13569  const VkMemoryRequirements& vkMemReq,
13570  bool requiresDedicatedAllocation,
13571  bool prefersDedicatedAllocation,
13572  const VmaAllocationCreateInfo& createInfo,
13573  VmaAllocation allocation)
13574 {
13575  CallParams callParams;
13576  GetBasicParams(callParams);
13577 
13578  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13579  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13580  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13581  vkMemReq.size,
13582  vkMemReq.alignment,
13583  vkMemReq.memoryTypeBits,
13584  requiresDedicatedAllocation ? 1 : 0,
13585  prefersDedicatedAllocation ? 1 : 0,
13586  createInfo.flags,
13587  createInfo.usage,
13588  createInfo.requiredFlags,
13589  createInfo.preferredFlags,
13590  createInfo.memoryTypeBits,
13591  createInfo.pool,
13592  allocation,
13593  userDataStr.GetString());
13594  Flush();
13595 }
13596 
13597 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13598  VmaAllocation allocation)
13599 {
13600  CallParams callParams;
13601  GetBasicParams(callParams);
13602 
13603  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13604  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13605  allocation);
13606  Flush();
13607 }
13608 
13609 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
13610  uint64_t allocationCount,
13611  const VmaAllocation* pAllocations)
13612 {
13613  CallParams callParams;
13614  GetBasicParams(callParams);
13615 
13616  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13617  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
13618  PrintPointerList(allocationCount, pAllocations);
13619  fprintf(m_File, "\n");
13620  Flush();
13621 }
13622 
13623 void VmaRecorder::RecordResizeAllocation(
13624  uint32_t frameIndex,
13625  VmaAllocation allocation,
13626  VkDeviceSize newSize)
13627 {
13628  CallParams callParams;
13629  GetBasicParams(callParams);
13630 
13631  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13632  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
13633  allocation, newSize);
13634  Flush();
13635 }
13636 
13637 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13638  VmaAllocation allocation,
13639  const void* pUserData)
13640 {
13641  CallParams callParams;
13642  GetBasicParams(callParams);
13643 
13644  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13645  UserDataString userDataStr(
13646  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
13647  pUserData);
13648  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13649  allocation,
13650  userDataStr.GetString());
13651  Flush();
13652 }
13653 
13654 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13655  VmaAllocation allocation)
13656 {
13657  CallParams callParams;
13658  GetBasicParams(callParams);
13659 
13660  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13661  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13662  allocation);
13663  Flush();
13664 }
13665 
13666 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13667  VmaAllocation allocation)
13668 {
13669  CallParams callParams;
13670  GetBasicParams(callParams);
13671 
13672  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13673  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13674  allocation);
13675  Flush();
13676 }
13677 
13678 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13679  VmaAllocation allocation)
13680 {
13681  CallParams callParams;
13682  GetBasicParams(callParams);
13683 
13684  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13685  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13686  allocation);
13687  Flush();
13688 }
13689 
13690 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13691  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13692 {
13693  CallParams callParams;
13694  GetBasicParams(callParams);
13695 
13696  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13697  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13698  allocation,
13699  offset,
13700  size);
13701  Flush();
13702 }
13703 
13704 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13705  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13706 {
13707  CallParams callParams;
13708  GetBasicParams(callParams);
13709 
13710  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13711  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13712  allocation,
13713  offset,
13714  size);
13715  Flush();
13716 }
13717 
13718 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13719  const VkBufferCreateInfo& bufCreateInfo,
13720  const VmaAllocationCreateInfo& allocCreateInfo,
13721  VmaAllocation allocation)
13722 {
13723  CallParams callParams;
13724  GetBasicParams(callParams);
13725 
13726  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13727  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13728  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13729  bufCreateInfo.flags,
13730  bufCreateInfo.size,
13731  bufCreateInfo.usage,
13732  bufCreateInfo.sharingMode,
13733  allocCreateInfo.flags,
13734  allocCreateInfo.usage,
13735  allocCreateInfo.requiredFlags,
13736  allocCreateInfo.preferredFlags,
13737  allocCreateInfo.memoryTypeBits,
13738  allocCreateInfo.pool,
13739  allocation,
13740  userDataStr.GetString());
13741  Flush();
13742 }
13743 
13744 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13745  const VkImageCreateInfo& imageCreateInfo,
13746  const VmaAllocationCreateInfo& allocCreateInfo,
13747  VmaAllocation allocation)
13748 {
13749  CallParams callParams;
13750  GetBasicParams(callParams);
13751 
13752  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13753  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13754  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13755  imageCreateInfo.flags,
13756  imageCreateInfo.imageType,
13757  imageCreateInfo.format,
13758  imageCreateInfo.extent.width,
13759  imageCreateInfo.extent.height,
13760  imageCreateInfo.extent.depth,
13761  imageCreateInfo.mipLevels,
13762  imageCreateInfo.arrayLayers,
13763  imageCreateInfo.samples,
13764  imageCreateInfo.tiling,
13765  imageCreateInfo.usage,
13766  imageCreateInfo.sharingMode,
13767  imageCreateInfo.initialLayout,
13768  allocCreateInfo.flags,
13769  allocCreateInfo.usage,
13770  allocCreateInfo.requiredFlags,
13771  allocCreateInfo.preferredFlags,
13772  allocCreateInfo.memoryTypeBits,
13773  allocCreateInfo.pool,
13774  allocation,
13775  userDataStr.GetString());
13776  Flush();
13777 }
13778 
13779 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13780  VmaAllocation allocation)
13781 {
13782  CallParams callParams;
13783  GetBasicParams(callParams);
13784 
13785  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13786  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13787  allocation);
13788  Flush();
13789 }
13790 
13791 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
13792  VmaAllocation allocation)
13793 {
13794  CallParams callParams;
13795  GetBasicParams(callParams);
13796 
13797  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13798  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
13799  allocation);
13800  Flush();
13801 }
13802 
13803 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
13804  VmaAllocation allocation)
13805 {
13806  CallParams callParams;
13807  GetBasicParams(callParams);
13808 
13809  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13810  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13811  allocation);
13812  Flush();
13813 }
13814 
13815 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
13816  VmaAllocation allocation)
13817 {
13818  CallParams callParams;
13819  GetBasicParams(callParams);
13820 
13821  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13822  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
13823  allocation);
13824  Flush();
13825 }
13826 
13827 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
13828  VmaPool pool)
13829 {
13830  CallParams callParams;
13831  GetBasicParams(callParams);
13832 
13833  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13834  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
13835  pool);
13836  Flush();
13837 }
13838 
13839 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
13840  const VmaDefragmentationInfo2& info,
13842 {
13843  CallParams callParams;
13844  GetBasicParams(callParams);
13845 
13846  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13847  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
13848  info.flags);
13849  PrintPointerList(info.allocationCount, info.pAllocations);
13850  fprintf(m_File, ",");
13851  PrintPointerList(info.poolCount, info.pPools);
13852  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
13853  info.maxCpuBytesToMove,
13855  info.maxGpuBytesToMove,
13857  info.commandBuffer,
13858  ctx);
13859  Flush();
13860 }
13861 
13862 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
13864 {
13865  CallParams callParams;
13866  GetBasicParams(callParams);
13867 
13868  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13869  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
13870  ctx);
13871  Flush();
13872 }
13873 
13874 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
13875 {
13876  if(pUserData != VMA_NULL)
13877  {
13878  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
13879  {
13880  m_Str = (const char*)pUserData;
13881  }
13882  else
13883  {
13884  sprintf_s(m_PtrStr, "%p", pUserData);
13885  m_Str = m_PtrStr;
13886  }
13887  }
13888  else
13889  {
13890  m_Str = "";
13891  }
13892 }
13893 
13894 void VmaRecorder::WriteConfiguration(
13895  const VkPhysicalDeviceProperties& devProps,
13896  const VkPhysicalDeviceMemoryProperties& memProps,
13897  bool dedicatedAllocationExtensionEnabled)
13898 {
13899  fprintf(m_File, "Config,Begin\n");
13900 
13901  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
13902  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
13903  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
13904  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
13905  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
13906  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
13907 
13908  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
13909  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
13910  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
13911 
13912  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
13913  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
13914  {
13915  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
13916  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
13917  }
13918  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
13919  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
13920  {
13921  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
13922  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
13923  }
13924 
13925  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
13926 
13927  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
13928  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
13929  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
13930  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
13931  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
13932  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
13933  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
13934  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
13935  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
13936 
13937  fprintf(m_File, "Config,End\n");
13938 }
13939 
13940 void VmaRecorder::GetBasicParams(CallParams& outParams)
13941 {
13942  outParams.threadId = GetCurrentThreadId();
13943 
13944  LARGE_INTEGER counter;
13945  QueryPerformanceCounter(&counter);
13946  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
13947 }
13948 
13949 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
13950 {
13951  if(count)
13952  {
13953  fprintf(m_File, "%p", pItems[0]);
13954  for(uint64_t i = 1; i < count; ++i)
13955  {
13956  fprintf(m_File, " %p", pItems[i]);
13957  }
13958  }
13959 }
13960 
13961 void VmaRecorder::Flush()
13962 {
13963  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
13964  {
13965  fflush(m_File);
13966  }
13967 }
13968 
13969 #endif // #if VMA_RECORDING_ENABLED
13970 
13972 // VmaAllocator_T
13973 
13974 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
13975  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
13976  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
13977  m_hDevice(pCreateInfo->device),
13978  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
13979  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
13980  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
13981  m_PreferredLargeHeapBlockSize(0),
13982  m_PhysicalDevice(pCreateInfo->physicalDevice),
13983  m_CurrentFrameIndex(0),
13984  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
13985  m_NextPoolId(0)
13987  ,m_pRecorder(VMA_NULL)
13988 #endif
13989 {
13990  if(VMA_DEBUG_DETECT_CORRUPTION)
13991  {
13992  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
13993  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
13994  }
13995 
13996  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
13997 
13998 #if !(VMA_DEDICATED_ALLOCATION)
14000  {
14001  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14002  }
14003 #endif
14004 
14005  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
14006  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14007  memset(&m_MemProps, 0, sizeof(m_MemProps));
14008 
14009  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14010  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
14011 
14012  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14013  {
14014  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
14015  }
14016 
14017  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14018  {
14019  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14020  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14021  }
14022 
14023  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14024 
14025  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14026  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14027 
14028  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14029  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14030  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14031  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14032 
14033  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14034  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14035 
14036  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14037  {
14038  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14039  {
14040  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14041  if(limit != VK_WHOLE_SIZE)
14042  {
14043  m_HeapSizeLimit[heapIndex] = limit;
14044  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14045  {
14046  m_MemProps.memoryHeaps[heapIndex].size = limit;
14047  }
14048  }
14049  }
14050  }
14051 
14052  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14053  {
14054  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14055 
14056  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14057  this,
14058  memTypeIndex,
14059  preferredBlockSize,
14060  0,
14061  SIZE_MAX,
14062  GetBufferImageGranularity(),
14063  pCreateInfo->frameInUseCount,
14064  false, // isCustomPool
14065  false, // explicitBlockSize
14066  false); // linearAlgorithm
14067  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14068  // becase minBlockCount is 0.
14069  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14070 
14071  }
14072 }
14073 
14074 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14075 {
14076  VkResult res = VK_SUCCESS;
14077 
14078  if(pCreateInfo->pRecordSettings != VMA_NULL &&
14079  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14080  {
14081 #if VMA_RECORDING_ENABLED
14082  m_pRecorder = vma_new(this, VmaRecorder)();
14083  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14084  if(res != VK_SUCCESS)
14085  {
14086  return res;
14087  }
14088  m_pRecorder->WriteConfiguration(
14089  m_PhysicalDeviceProperties,
14090  m_MemProps,
14091  m_UseKhrDedicatedAllocation);
14092  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14093 #else
14094  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14095  return VK_ERROR_FEATURE_NOT_PRESENT;
14096 #endif
14097  }
14098 
14099  return res;
14100 }
14101 
14102 VmaAllocator_T::~VmaAllocator_T()
14103 {
14104 #if VMA_RECORDING_ENABLED
14105  if(m_pRecorder != VMA_NULL)
14106  {
14107  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14108  vma_delete(this, m_pRecorder);
14109  }
14110 #endif
14111 
14112  VMA_ASSERT(m_Pools.empty());
14113 
14114  for(size_t i = GetMemoryTypeCount(); i--; )
14115  {
14116  vma_delete(this, m_pDedicatedAllocations[i]);
14117  vma_delete(this, m_pBlockVectors[i]);
14118  }
14119 }
14120 
14121 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14122 {
14123 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14124  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
14125  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
14126  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
14127  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
14128  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
14129  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
14130  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
14131  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
14132  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
14133  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
14134  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
14135  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
14136  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
14137  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
14138  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
14139  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
14140  m_VulkanFunctions.vkCmdCopyBuffer = &vkCmdCopyBuffer;
14141 #if VMA_DEDICATED_ALLOCATION
14142  if(m_UseKhrDedicatedAllocation)
14143  {
14144  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14145  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14146  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14147  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14148  }
14149 #endif // #if VMA_DEDICATED_ALLOCATION
14150 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14151 
14152 #define VMA_COPY_IF_NOT_NULL(funcName) \
14153  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14154 
14155  if(pVulkanFunctions != VMA_NULL)
14156  {
14157  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14158  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14159  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14160  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14161  VMA_COPY_IF_NOT_NULL(vkMapMemory);
14162  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14163  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14164  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14165  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14166  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14167  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14168  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14169  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14170  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14171  VMA_COPY_IF_NOT_NULL(vkCreateImage);
14172  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14173  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14174 #if VMA_DEDICATED_ALLOCATION
14175  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14176  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14177 #endif
14178  }
14179 
14180 #undef VMA_COPY_IF_NOT_NULL
14181 
14182  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14183  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14184  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14185  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14186  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14187  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14188  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14189  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14190  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14191  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14192  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14193  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14194  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14195  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14196  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14197  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14198  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14199  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14200  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14201 #if VMA_DEDICATED_ALLOCATION
14202  if(m_UseKhrDedicatedAllocation)
14203  {
14204  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14205  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14206  }
14207 #endif
14208 }
14209 
14210 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14211 {
14212  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14213  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14214  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14215  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
14216 }
14217 
14218 VkResult VmaAllocator_T::AllocateMemoryOfType(
14219  VkDeviceSize size,
14220  VkDeviceSize alignment,
14221  bool dedicatedAllocation,
14222  VkBuffer dedicatedBuffer,
14223  VkImage dedicatedImage,
14224  const VmaAllocationCreateInfo& createInfo,
14225  uint32_t memTypeIndex,
14226  VmaSuballocationType suballocType,
14227  size_t allocationCount,
14228  VmaAllocation* pAllocations)
14229 {
14230  VMA_ASSERT(pAllocations != VMA_NULL);
14231  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, vkMemReq.size);
14232 
14233  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14234 
14235  // If memory type is not HOST_VISIBLE, disable MAPPED.
14236  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14237  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14238  {
14239  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14240  }
14241 
14242  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14243  VMA_ASSERT(blockVector);
14244 
14245  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14246  bool preferDedicatedMemory =
14247  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14248  dedicatedAllocation ||
14249  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14250  size > preferredBlockSize / 2;
14251 
14252  if(preferDedicatedMemory &&
14253  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14254  finalCreateInfo.pool == VK_NULL_HANDLE)
14255  {
14257  }
14258 
14259  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14260  {
14261  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14262  {
14263  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14264  }
14265  else
14266  {
14267  return AllocateDedicatedMemory(
14268  size,
14269  suballocType,
14270  memTypeIndex,
14271  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14272  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14273  finalCreateInfo.pUserData,
14274  dedicatedBuffer,
14275  dedicatedImage,
14276  allocationCount,
14277  pAllocations);
14278  }
14279  }
14280  else
14281  {
14282  VkResult res = blockVector->Allocate(
14283  VK_NULL_HANDLE, // hCurrentPool
14284  m_CurrentFrameIndex.load(),
14285  size,
14286  alignment,
14287  finalCreateInfo,
14288  suballocType,
14289  allocationCount,
14290  pAllocations);
14291  if(res == VK_SUCCESS)
14292  {
14293  return res;
14294  }
14295 
14296  // 5. Try dedicated memory.
14297  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14298  {
14299  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14300  }
14301  else
14302  {
14303  res = AllocateDedicatedMemory(
14304  size,
14305  suballocType,
14306  memTypeIndex,
14307  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14308  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14309  finalCreateInfo.pUserData,
14310  dedicatedBuffer,
14311  dedicatedImage,
14312  allocationCount,
14313  pAllocations);
14314  if(res == VK_SUCCESS)
14315  {
14316  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14317  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14318  return VK_SUCCESS;
14319  }
14320  else
14321  {
14322  // Everything failed: Return error code.
14323  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14324  return res;
14325  }
14326  }
14327  }
14328 }
14329 
14330 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14331  VkDeviceSize size,
14332  VmaSuballocationType suballocType,
14333  uint32_t memTypeIndex,
14334  bool map,
14335  bool isUserDataString,
14336  void* pUserData,
14337  VkBuffer dedicatedBuffer,
14338  VkImage dedicatedImage,
14339  size_t allocationCount,
14340  VmaAllocation* pAllocations)
14341 {
14342  VMA_ASSERT(allocationCount > 0 && pAllocations);
14343 
14344  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14345  allocInfo.memoryTypeIndex = memTypeIndex;
14346  allocInfo.allocationSize = size;
14347 
14348 #if VMA_DEDICATED_ALLOCATION
14349  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14350  if(m_UseKhrDedicatedAllocation)
14351  {
14352  if(dedicatedBuffer != VK_NULL_HANDLE)
14353  {
14354  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14355  dedicatedAllocInfo.buffer = dedicatedBuffer;
14356  allocInfo.pNext = &dedicatedAllocInfo;
14357  }
14358  else if(dedicatedImage != VK_NULL_HANDLE)
14359  {
14360  dedicatedAllocInfo.image = dedicatedImage;
14361  allocInfo.pNext = &dedicatedAllocInfo;
14362  }
14363  }
14364 #endif // #if VMA_DEDICATED_ALLOCATION
14365 
14366  size_t allocIndex;
14367  VkResult res;
14368  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14369  {
14370  res = AllocateDedicatedMemoryPage(
14371  size,
14372  suballocType,
14373  memTypeIndex,
14374  allocInfo,
14375  map,
14376  isUserDataString,
14377  pUserData,
14378  pAllocations + allocIndex);
14379  if(res != VK_SUCCESS)
14380  {
14381  break;
14382  }
14383  }
14384 
14385  if(res == VK_SUCCESS)
14386  {
14387  // Register them in m_pDedicatedAllocations.
14388  {
14389  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14390  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14391  VMA_ASSERT(pDedicatedAllocations);
14392  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14393  {
14394  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
14395  }
14396  }
14397 
14398  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14399  }
14400  else
14401  {
14402  // Free all already created allocations.
14403  while(allocIndex--)
14404  {
14405  VmaAllocation currAlloc = pAllocations[allocIndex];
14406  VkDeviceMemory hMemory = currAlloc->GetMemory();
14407 
14408  /*
14409  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14410  before vkFreeMemory.
14411 
14412  if(currAlloc->GetMappedData() != VMA_NULL)
14413  {
14414  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14415  }
14416  */
14417 
14418  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14419 
14420  currAlloc->SetUserData(this, VMA_NULL);
14421  vma_delete(this, currAlloc);
14422  }
14423 
14424  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14425  }
14426 
14427  return res;
14428 }
14429 
14430 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14431  VkDeviceSize size,
14432  VmaSuballocationType suballocType,
14433  uint32_t memTypeIndex,
14434  const VkMemoryAllocateInfo& allocInfo,
14435  bool map,
14436  bool isUserDataString,
14437  void* pUserData,
14438  VmaAllocation* pAllocation)
14439 {
14440  VkDeviceMemory hMemory = VK_NULL_HANDLE;
14441  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14442  if(res < 0)
14443  {
14444  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14445  return res;
14446  }
14447 
14448  void* pMappedData = VMA_NULL;
14449  if(map)
14450  {
14451  res = (*m_VulkanFunctions.vkMapMemory)(
14452  m_hDevice,
14453  hMemory,
14454  0,
14455  VK_WHOLE_SIZE,
14456  0,
14457  &pMappedData);
14458  if(res < 0)
14459  {
14460  VMA_DEBUG_LOG(" vkMapMemory FAILED");
14461  FreeVulkanMemory(memTypeIndex, size, hMemory);
14462  return res;
14463  }
14464  }
14465 
14466  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
14467  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14468  (*pAllocation)->SetUserData(this, pUserData);
14469  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14470  {
14471  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14472  }
14473 
14474  return VK_SUCCESS;
14475 }
14476 
14477 void VmaAllocator_T::GetBufferMemoryRequirements(
14478  VkBuffer hBuffer,
14479  VkMemoryRequirements& memReq,
14480  bool& requiresDedicatedAllocation,
14481  bool& prefersDedicatedAllocation) const
14482 {
14483 #if VMA_DEDICATED_ALLOCATION
14484  if(m_UseKhrDedicatedAllocation)
14485  {
14486  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14487  memReqInfo.buffer = hBuffer;
14488 
14489  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14490 
14491  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14492  memReq2.pNext = &memDedicatedReq;
14493 
14494  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14495 
14496  memReq = memReq2.memoryRequirements;
14497  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14498  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14499  }
14500  else
14501 #endif // #if VMA_DEDICATED_ALLOCATION
14502  {
14503  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14504  requiresDedicatedAllocation = false;
14505  prefersDedicatedAllocation = false;
14506  }
14507 }
14508 
14509 void VmaAllocator_T::GetImageMemoryRequirements(
14510  VkImage hImage,
14511  VkMemoryRequirements& memReq,
14512  bool& requiresDedicatedAllocation,
14513  bool& prefersDedicatedAllocation) const
14514 {
14515 #if VMA_DEDICATED_ALLOCATION
14516  if(m_UseKhrDedicatedAllocation)
14517  {
14518  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14519  memReqInfo.image = hImage;
14520 
14521  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14522 
14523  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14524  memReq2.pNext = &memDedicatedReq;
14525 
14526  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14527 
14528  memReq = memReq2.memoryRequirements;
14529  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14530  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14531  }
14532  else
14533 #endif // #if VMA_DEDICATED_ALLOCATION
14534  {
14535  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14536  requiresDedicatedAllocation = false;
14537  prefersDedicatedAllocation = false;
14538  }
14539 }
14540 
14541 VkResult VmaAllocator_T::AllocateMemory(
14542  const VkMemoryRequirements& vkMemReq,
14543  bool requiresDedicatedAllocation,
14544  bool prefersDedicatedAllocation,
14545  VkBuffer dedicatedBuffer,
14546  VkImage dedicatedImage,
14547  const VmaAllocationCreateInfo& createInfo,
14548  VmaSuballocationType suballocType,
14549  size_t allocationCount,
14550  VmaAllocation* pAllocations)
14551 {
14552  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14553 
14554  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14555 
14556  if(vkMemReq.size == 0)
14557  {
14558  return VK_ERROR_VALIDATION_FAILED_EXT;
14559  }
14560  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14561  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14562  {
14563  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14564  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14565  }
14566  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14568  {
14569  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14570  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14571  }
14572  if(requiresDedicatedAllocation)
14573  {
14574  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14575  {
14576  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14577  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14578  }
14579  if(createInfo.pool != VK_NULL_HANDLE)
14580  {
14581  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
14582  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14583  }
14584  }
14585  if((createInfo.pool != VK_NULL_HANDLE) &&
14586  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
14587  {
14588  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14589  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14590  }
14591 
14592  if(createInfo.pool != VK_NULL_HANDLE)
14593  {
14594  const VkDeviceSize alignmentForPool = VMA_MAX(
14595  vkMemReq.alignment,
14596  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
14597  return createInfo.pool->m_BlockVector.Allocate(
14598  createInfo.pool,
14599  m_CurrentFrameIndex.load(),
14600  vkMemReq.size,
14601  alignmentForPool,
14602  createInfo,
14603  suballocType,
14604  allocationCount,
14605  pAllocations);
14606  }
14607  else
14608  {
14609  // Bit mask of memory Vulkan types acceptable for this allocation.
14610  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14611  uint32_t memTypeIndex = UINT32_MAX;
14612  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14613  if(res == VK_SUCCESS)
14614  {
14615  VkDeviceSize alignmentForMemType = VMA_MAX(
14616  vkMemReq.alignment,
14617  GetMemoryTypeMinAlignment(memTypeIndex));
14618 
14619  res = AllocateMemoryOfType(
14620  vkMemReq.size,
14621  alignmentForMemType,
14622  requiresDedicatedAllocation || prefersDedicatedAllocation,
14623  dedicatedBuffer,
14624  dedicatedImage,
14625  createInfo,
14626  memTypeIndex,
14627  suballocType,
14628  allocationCount,
14629  pAllocations);
14630  // Succeeded on first try.
14631  if(res == VK_SUCCESS)
14632  {
14633  return res;
14634  }
14635  // Allocation from this memory type failed. Try other compatible memory types.
14636  else
14637  {
14638  for(;;)
14639  {
14640  // Remove old memTypeIndex from list of possibilities.
14641  memoryTypeBits &= ~(1u << memTypeIndex);
14642  // Find alternative memTypeIndex.
14643  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14644  if(res == VK_SUCCESS)
14645  {
14646  alignmentForMemType = VMA_MAX(
14647  vkMemReq.alignment,
14648  GetMemoryTypeMinAlignment(memTypeIndex));
14649 
14650  res = AllocateMemoryOfType(
14651  vkMemReq.size,
14652  alignmentForMemType,
14653  requiresDedicatedAllocation || prefersDedicatedAllocation,
14654  dedicatedBuffer,
14655  dedicatedImage,
14656  createInfo,
14657  memTypeIndex,
14658  suballocType,
14659  allocationCount,
14660  pAllocations);
14661  // Allocation from this alternative memory type succeeded.
14662  if(res == VK_SUCCESS)
14663  {
14664  return res;
14665  }
14666  // else: Allocation from this memory type failed. Try next one - next loop iteration.
14667  }
14668  // No other matching memory type index could be found.
14669  else
14670  {
14671  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
14672  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14673  }
14674  }
14675  }
14676  }
14677  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
14678  else
14679  return res;
14680  }
14681 }
14682 
14683 void VmaAllocator_T::FreeMemory(
14684  size_t allocationCount,
14685  const VmaAllocation* pAllocations)
14686 {
14687  VMA_ASSERT(pAllocations);
14688 
14689  for(size_t allocIndex = allocationCount; allocIndex--; )
14690  {
14691  VmaAllocation allocation = pAllocations[allocIndex];
14692 
14693  if(allocation != VK_NULL_HANDLE)
14694  {
14695  if(TouchAllocation(allocation))
14696  {
14697  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14698  {
14699  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14700  }
14701 
14702  switch(allocation->GetType())
14703  {
14704  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14705  {
14706  VmaBlockVector* pBlockVector = VMA_NULL;
14707  VmaPool hPool = allocation->GetPool();
14708  if(hPool != VK_NULL_HANDLE)
14709  {
14710  pBlockVector = &hPool->m_BlockVector;
14711  }
14712  else
14713  {
14714  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14715  pBlockVector = m_pBlockVectors[memTypeIndex];
14716  }
14717  pBlockVector->Free(allocation);
14718  }
14719  break;
14720  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14721  FreeDedicatedMemory(allocation);
14722  break;
14723  default:
14724  VMA_ASSERT(0);
14725  }
14726  }
14727 
14728  allocation->SetUserData(this, VMA_NULL);
14729  vma_delete(this, allocation);
14730  }
14731  }
14732 }
14733 
14734 VkResult VmaAllocator_T::ResizeAllocation(
14735  const VmaAllocation alloc,
14736  VkDeviceSize newSize)
14737 {
14738  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14739  {
14740  return VK_ERROR_VALIDATION_FAILED_EXT;
14741  }
14742  if(newSize == alloc->GetSize())
14743  {
14744  return VK_SUCCESS;
14745  }
14746 
14747  switch(alloc->GetType())
14748  {
14749  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14750  return VK_ERROR_FEATURE_NOT_PRESENT;
14751  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14752  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
14753  {
14754  alloc->ChangeSize(newSize);
14755  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
14756  return VK_SUCCESS;
14757  }
14758  else
14759  {
14760  return VK_ERROR_OUT_OF_POOL_MEMORY;
14761  }
14762  default:
14763  VMA_ASSERT(0);
14764  return VK_ERROR_VALIDATION_FAILED_EXT;
14765  }
14766 }
14767 
14768 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
14769 {
14770  // Initialize.
14771  InitStatInfo(pStats->total);
14772  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
14773  InitStatInfo(pStats->memoryType[i]);
14774  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14775  InitStatInfo(pStats->memoryHeap[i]);
14776 
14777  // Process default pools.
14778  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14779  {
14780  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14781  VMA_ASSERT(pBlockVector);
14782  pBlockVector->AddStats(pStats);
14783  }
14784 
14785  // Process custom pools.
14786  {
14787  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14788  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14789  {
14790  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
14791  }
14792  }
14793 
14794  // Process dedicated allocations.
14795  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14796  {
14797  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14798  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14799  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
14800  VMA_ASSERT(pDedicatedAllocVector);
14801  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
14802  {
14803  VmaStatInfo allocationStatInfo;
14804  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
14805  VmaAddStatInfo(pStats->total, allocationStatInfo);
14806  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
14807  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
14808  }
14809  }
14810 
14811  // Postprocess.
14812  VmaPostprocessCalcStatInfo(pStats->total);
14813  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
14814  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
14815  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
14816  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
14817 }
14818 
14819 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
14820 
14821 VkResult VmaAllocator_T::DefragmentationBegin(
14822  const VmaDefragmentationInfo2& info,
14823  VmaDefragmentationStats* pStats,
14824  VmaDefragmentationContext* pContext)
14825 {
14826  if(info.pAllocationsChanged != VMA_NULL)
14827  {
14828  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
14829  }
14830 
14831  *pContext = vma_new(this, VmaDefragmentationContext_T)(
14832  this, m_CurrentFrameIndex.load(), info.flags, pStats);
14833 
14834  (*pContext)->AddPools(info.poolCount, info.pPools);
14835  (*pContext)->AddAllocations(
14837 
14838  VkResult res = (*pContext)->Defragment(
14841  info.commandBuffer, pStats);
14842 
14843  if(res != VK_NOT_READY)
14844  {
14845  vma_delete(this, *pContext);
14846  *pContext = VMA_NULL;
14847  }
14848 
14849  return res;
14850 }
14851 
14852 VkResult VmaAllocator_T::DefragmentationEnd(
14853  VmaDefragmentationContext context)
14854 {
14855  vma_delete(this, context);
14856  return VK_SUCCESS;
14857 }
14858 
14859 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
14860 {
14861  if(hAllocation->CanBecomeLost())
14862  {
14863  /*
14864  Warning: This is a carefully designed algorithm.
14865  Do not modify unless you really know what you're doing :)
14866  */
14867  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14868  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14869  for(;;)
14870  {
14871  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14872  {
14873  pAllocationInfo->memoryType = UINT32_MAX;
14874  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
14875  pAllocationInfo->offset = 0;
14876  pAllocationInfo->size = hAllocation->GetSize();
14877  pAllocationInfo->pMappedData = VMA_NULL;
14878  pAllocationInfo->pUserData = hAllocation->GetUserData();
14879  return;
14880  }
14881  else if(localLastUseFrameIndex == localCurrFrameIndex)
14882  {
14883  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
14884  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
14885  pAllocationInfo->offset = hAllocation->GetOffset();
14886  pAllocationInfo->size = hAllocation->GetSize();
14887  pAllocationInfo->pMappedData = VMA_NULL;
14888  pAllocationInfo->pUserData = hAllocation->GetUserData();
14889  return;
14890  }
14891  else // Last use time earlier than current time.
14892  {
14893  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14894  {
14895  localLastUseFrameIndex = localCurrFrameIndex;
14896  }
14897  }
14898  }
14899  }
14900  else
14901  {
14902 #if VMA_STATS_STRING_ENABLED
14903  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14904  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14905  for(;;)
14906  {
14907  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14908  if(localLastUseFrameIndex == localCurrFrameIndex)
14909  {
14910  break;
14911  }
14912  else // Last use time earlier than current time.
14913  {
14914  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14915  {
14916  localLastUseFrameIndex = localCurrFrameIndex;
14917  }
14918  }
14919  }
14920 #endif
14921 
14922  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
14923  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
14924  pAllocationInfo->offset = hAllocation->GetOffset();
14925  pAllocationInfo->size = hAllocation->GetSize();
14926  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
14927  pAllocationInfo->pUserData = hAllocation->GetUserData();
14928  }
14929 }
14930 
14931 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
14932 {
14933  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
14934  if(hAllocation->CanBecomeLost())
14935  {
14936  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14937  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14938  for(;;)
14939  {
14940  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14941  {
14942  return false;
14943  }
14944  else if(localLastUseFrameIndex == localCurrFrameIndex)
14945  {
14946  return true;
14947  }
14948  else // Last use time earlier than current time.
14949  {
14950  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14951  {
14952  localLastUseFrameIndex = localCurrFrameIndex;
14953  }
14954  }
14955  }
14956  }
14957  else
14958  {
14959 #if VMA_STATS_STRING_ENABLED
14960  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14961  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14962  for(;;)
14963  {
14964  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14965  if(localLastUseFrameIndex == localCurrFrameIndex)
14966  {
14967  break;
14968  }
14969  else // Last use time earlier than current time.
14970  {
14971  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14972  {
14973  localLastUseFrameIndex = localCurrFrameIndex;
14974  }
14975  }
14976  }
14977 #endif
14978 
14979  return true;
14980  }
14981 }
14982 
14983 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
14984 {
14985  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
14986 
14987  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
14988 
14989  if(newCreateInfo.maxBlockCount == 0)
14990  {
14991  newCreateInfo.maxBlockCount = SIZE_MAX;
14992  }
14993  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
14994  {
14995  return VK_ERROR_INITIALIZATION_FAILED;
14996  }
14997 
14998  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
14999 
15000  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
15001 
15002  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15003  if(res != VK_SUCCESS)
15004  {
15005  vma_delete(this, *pPool);
15006  *pPool = VMA_NULL;
15007  return res;
15008  }
15009 
15010  // Add to m_Pools.
15011  {
15012  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15013  (*pPool)->SetId(m_NextPoolId++);
15014  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
15015  }
15016 
15017  return VK_SUCCESS;
15018 }
15019 
15020 void VmaAllocator_T::DestroyPool(VmaPool pool)
15021 {
15022  // Remove from m_Pools.
15023  {
15024  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15025  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15026  VMA_ASSERT(success && "Pool not found in Allocator.");
15027  }
15028 
15029  vma_delete(this, pool);
15030 }
15031 
15032 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15033 {
15034  pool->m_BlockVector.GetPoolStats(pPoolStats);
15035 }
15036 
15037 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15038 {
15039  m_CurrentFrameIndex.store(frameIndex);
15040 }
15041 
15042 void VmaAllocator_T::MakePoolAllocationsLost(
15043  VmaPool hPool,
15044  size_t* pLostAllocationCount)
15045 {
15046  hPool->m_BlockVector.MakePoolAllocationsLost(
15047  m_CurrentFrameIndex.load(),
15048  pLostAllocationCount);
15049 }
15050 
15051 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15052 {
15053  return hPool->m_BlockVector.CheckCorruption();
15054 }
15055 
15056 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15057 {
15058  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15059 
15060  // Process default pools.
15061  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15062  {
15063  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15064  {
15065  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15066  VMA_ASSERT(pBlockVector);
15067  VkResult localRes = pBlockVector->CheckCorruption();
15068  switch(localRes)
15069  {
15070  case VK_ERROR_FEATURE_NOT_PRESENT:
15071  break;
15072  case VK_SUCCESS:
15073  finalRes = VK_SUCCESS;
15074  break;
15075  default:
15076  return localRes;
15077  }
15078  }
15079  }
15080 
15081  // Process custom pools.
15082  {
15083  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15084  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15085  {
15086  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15087  {
15088  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15089  switch(localRes)
15090  {
15091  case VK_ERROR_FEATURE_NOT_PRESENT:
15092  break;
15093  case VK_SUCCESS:
15094  finalRes = VK_SUCCESS;
15095  break;
15096  default:
15097  return localRes;
15098  }
15099  }
15100  }
15101  }
15102 
15103  return finalRes;
15104 }
15105 
15106 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15107 {
15108  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
15109  (*pAllocation)->InitLost();
15110 }
15111 
15112 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15113 {
15114  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15115 
15116  VkResult res;
15117  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15118  {
15119  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15120  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
15121  {
15122  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15123  if(res == VK_SUCCESS)
15124  {
15125  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
15126  }
15127  }
15128  else
15129  {
15130  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
15131  }
15132  }
15133  else
15134  {
15135  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15136  }
15137 
15138  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15139  {
15140  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15141  }
15142 
15143  return res;
15144 }
15145 
15146 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15147 {
15148  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15149  {
15150  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15151  }
15152 
15153  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15154 
15155  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15156  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15157  {
15158  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15159  m_HeapSizeLimit[heapIndex] += size;
15160  }
15161 }
15162 
15163 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15164 {
15165  if(hAllocation->CanBecomeLost())
15166  {
15167  return VK_ERROR_MEMORY_MAP_FAILED;
15168  }
15169 
15170  switch(hAllocation->GetType())
15171  {
15172  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15173  {
15174  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15175  char *pBytes = VMA_NULL;
15176  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15177  if(res == VK_SUCCESS)
15178  {
15179  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15180  hAllocation->BlockAllocMap();
15181  }
15182  return res;
15183  }
15184  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15185  return hAllocation->DedicatedAllocMap(this, ppData);
15186  default:
15187  VMA_ASSERT(0);
15188  return VK_ERROR_MEMORY_MAP_FAILED;
15189  }
15190 }
15191 
15192 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15193 {
15194  switch(hAllocation->GetType())
15195  {
15196  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15197  {
15198  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15199  hAllocation->BlockAllocUnmap();
15200  pBlock->Unmap(this, 1);
15201  }
15202  break;
15203  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15204  hAllocation->DedicatedAllocUnmap(this);
15205  break;
15206  default:
15207  VMA_ASSERT(0);
15208  }
15209 }
15210 
15211 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
15212 {
15213  VkResult res = VK_SUCCESS;
15214  switch(hAllocation->GetType())
15215  {
15216  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15217  res = GetVulkanFunctions().vkBindBufferMemory(
15218  m_hDevice,
15219  hBuffer,
15220  hAllocation->GetMemory(),
15221  0); //memoryOffset
15222  break;
15223  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15224  {
15225  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15226  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
15227  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
15228  break;
15229  }
15230  default:
15231  VMA_ASSERT(0);
15232  }
15233  return res;
15234 }
15235 
15236 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
15237 {
15238  VkResult res = VK_SUCCESS;
15239  switch(hAllocation->GetType())
15240  {
15241  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15242  res = GetVulkanFunctions().vkBindImageMemory(
15243  m_hDevice,
15244  hImage,
15245  hAllocation->GetMemory(),
15246  0); //memoryOffset
15247  break;
15248  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15249  {
15250  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15251  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
15252  res = pBlock->BindImageMemory(this, hAllocation, hImage);
15253  break;
15254  }
15255  default:
15256  VMA_ASSERT(0);
15257  }
15258  return res;
15259 }
15260 
15261 void VmaAllocator_T::FlushOrInvalidateAllocation(
15262  VmaAllocation hAllocation,
15263  VkDeviceSize offset, VkDeviceSize size,
15264  VMA_CACHE_OPERATION op)
15265 {
15266  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
15267  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15268  {
15269  const VkDeviceSize allocationSize = hAllocation->GetSize();
15270  VMA_ASSERT(offset <= allocationSize);
15271 
15272  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15273 
15274  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
15275  memRange.memory = hAllocation->GetMemory();
15276 
15277  switch(hAllocation->GetType())
15278  {
15279  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15280  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15281  if(size == VK_WHOLE_SIZE)
15282  {
15283  memRange.size = allocationSize - memRange.offset;
15284  }
15285  else
15286  {
15287  VMA_ASSERT(offset + size <= allocationSize);
15288  memRange.size = VMA_MIN(
15289  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
15290  allocationSize - memRange.offset);
15291  }
15292  break;
15293 
15294  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15295  {
15296  // 1. Still within this allocation.
15297  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15298  if(size == VK_WHOLE_SIZE)
15299  {
15300  size = allocationSize - offset;
15301  }
15302  else
15303  {
15304  VMA_ASSERT(offset + size <= allocationSize);
15305  }
15306  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
15307 
15308  // 2. Adjust to whole block.
15309  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
15310  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15311  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
15312  memRange.offset += allocationOffset;
15313  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
15314 
15315  break;
15316  }
15317 
15318  default:
15319  VMA_ASSERT(0);
15320  }
15321 
15322  switch(op)
15323  {
15324  case VMA_CACHE_FLUSH:
15325  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15326  break;
15327  case VMA_CACHE_INVALIDATE:
15328  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15329  break;
15330  default:
15331  VMA_ASSERT(0);
15332  }
15333  }
15334  // else: Just ignore this call.
15335 }
15336 
15337 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
15338 {
15339  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15340 
15341  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15342  {
15343  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15344  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15345  VMA_ASSERT(pDedicatedAllocations);
15346  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
15347  VMA_ASSERT(success);
15348  }
15349 
15350  VkDeviceMemory hMemory = allocation->GetMemory();
15351 
15352  /*
15353  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15354  before vkFreeMemory.
15355 
15356  if(allocation->GetMappedData() != VMA_NULL)
15357  {
15358  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15359  }
15360  */
15361 
15362  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15363 
15364  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15365 }
15366 
15367 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15368 {
15369  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15370  !hAllocation->CanBecomeLost() &&
15371  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15372  {
15373  void* pData = VMA_NULL;
15374  VkResult res = Map(hAllocation, &pData);
15375  if(res == VK_SUCCESS)
15376  {
15377  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15378  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15379  Unmap(hAllocation);
15380  }
15381  else
15382  {
15383  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15384  }
15385  }
15386 }
15387 
15388 #if VMA_STATS_STRING_ENABLED
15389 
15390 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15391 {
15392  bool dedicatedAllocationsStarted = false;
15393  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15394  {
15395  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15396  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15397  VMA_ASSERT(pDedicatedAllocVector);
15398  if(pDedicatedAllocVector->empty() == false)
15399  {
15400  if(dedicatedAllocationsStarted == false)
15401  {
15402  dedicatedAllocationsStarted = true;
15403  json.WriteString("DedicatedAllocations");
15404  json.BeginObject();
15405  }
15406 
15407  json.BeginString("Type ");
15408  json.ContinueString(memTypeIndex);
15409  json.EndString();
15410 
15411  json.BeginArray();
15412 
15413  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15414  {
15415  json.BeginObject(true);
15416  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
15417  hAlloc->PrintParameters(json);
15418  json.EndObject();
15419  }
15420 
15421  json.EndArray();
15422  }
15423  }
15424  if(dedicatedAllocationsStarted)
15425  {
15426  json.EndObject();
15427  }
15428 
15429  {
15430  bool allocationsStarted = false;
15431  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15432  {
15433  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
15434  {
15435  if(allocationsStarted == false)
15436  {
15437  allocationsStarted = true;
15438  json.WriteString("DefaultPools");
15439  json.BeginObject();
15440  }
15441 
15442  json.BeginString("Type ");
15443  json.ContinueString(memTypeIndex);
15444  json.EndString();
15445 
15446  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15447  }
15448  }
15449  if(allocationsStarted)
15450  {
15451  json.EndObject();
15452  }
15453  }
15454 
15455  // Custom pools
15456  {
15457  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15458  const size_t poolCount = m_Pools.size();
15459  if(poolCount > 0)
15460  {
15461  json.WriteString("Pools");
15462  json.BeginObject();
15463  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15464  {
15465  json.BeginString();
15466  json.ContinueString(m_Pools[poolIndex]->GetId());
15467  json.EndString();
15468 
15469  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15470  }
15471  json.EndObject();
15472  }
15473  }
15474 }
15475 
15476 #endif // #if VMA_STATS_STRING_ENABLED
15477 
15479 // Public interface
15480 
15481 VkResult vmaCreateAllocator(
15482  const VmaAllocatorCreateInfo* pCreateInfo,
15483  VmaAllocator* pAllocator)
15484 {
15485  VMA_ASSERT(pCreateInfo && pAllocator);
15486  VMA_DEBUG_LOG("vmaCreateAllocator");
15487  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
15488  return (*pAllocator)->Init(pCreateInfo);
15489 }
15490 
15491 void vmaDestroyAllocator(
15492  VmaAllocator allocator)
15493 {
15494  if(allocator != VK_NULL_HANDLE)
15495  {
15496  VMA_DEBUG_LOG("vmaDestroyAllocator");
15497  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15498  vma_delete(&allocationCallbacks, allocator);
15499  }
15500 }
15501 
15503  VmaAllocator allocator,
15504  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15505 {
15506  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15507  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15508 }
15509 
15511  VmaAllocator allocator,
15512  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15513 {
15514  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15515  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15516 }
15517 
15519  VmaAllocator allocator,
15520  uint32_t memoryTypeIndex,
15521  VkMemoryPropertyFlags* pFlags)
15522 {
15523  VMA_ASSERT(allocator && pFlags);
15524  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15525  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15526 }
15527 
15529  VmaAllocator allocator,
15530  uint32_t frameIndex)
15531 {
15532  VMA_ASSERT(allocator);
15533  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15534 
15535  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15536 
15537  allocator->SetCurrentFrameIndex(frameIndex);
15538 }
15539 
15540 void vmaCalculateStats(
15541  VmaAllocator allocator,
15542  VmaStats* pStats)
15543 {
15544  VMA_ASSERT(allocator && pStats);
15545  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15546  allocator->CalculateStats(pStats);
15547 }
15548 
15549 #if VMA_STATS_STRING_ENABLED
15550 
15551 void vmaBuildStatsString(
15552  VmaAllocator allocator,
15553  char** ppStatsString,
15554  VkBool32 detailedMap)
15555 {
15556  VMA_ASSERT(allocator && ppStatsString);
15557  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15558 
15559  VmaStringBuilder sb(allocator);
15560  {
15561  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15562  json.BeginObject();
15563 
15564  VmaStats stats;
15565  allocator->CalculateStats(&stats);
15566 
15567  json.WriteString("Total");
15568  VmaPrintStatInfo(json, stats.total);
15569 
15570  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15571  {
15572  json.BeginString("Heap ");
15573  json.ContinueString(heapIndex);
15574  json.EndString();
15575  json.BeginObject();
15576 
15577  json.WriteString("Size");
15578  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15579 
15580  json.WriteString("Flags");
15581  json.BeginArray(true);
15582  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15583  {
15584  json.WriteString("DEVICE_LOCAL");
15585  }
15586  json.EndArray();
15587 
15588  if(stats.memoryHeap[heapIndex].blockCount > 0)
15589  {
15590  json.WriteString("Stats");
15591  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
15592  }
15593 
15594  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15595  {
15596  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15597  {
15598  json.BeginString("Type ");
15599  json.ContinueString(typeIndex);
15600  json.EndString();
15601 
15602  json.BeginObject();
15603 
15604  json.WriteString("Flags");
15605  json.BeginArray(true);
15606  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15607  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15608  {
15609  json.WriteString("DEVICE_LOCAL");
15610  }
15611  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15612  {
15613  json.WriteString("HOST_VISIBLE");
15614  }
15615  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15616  {
15617  json.WriteString("HOST_COHERENT");
15618  }
15619  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15620  {
15621  json.WriteString("HOST_CACHED");
15622  }
15623  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15624  {
15625  json.WriteString("LAZILY_ALLOCATED");
15626  }
15627  json.EndArray();
15628 
15629  if(stats.memoryType[typeIndex].blockCount > 0)
15630  {
15631  json.WriteString("Stats");
15632  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
15633  }
15634 
15635  json.EndObject();
15636  }
15637  }
15638 
15639  json.EndObject();
15640  }
15641  if(detailedMap == VK_TRUE)
15642  {
15643  allocator->PrintDetailedMap(json);
15644  }
15645 
15646  json.EndObject();
15647  }
15648 
15649  const size_t len = sb.GetLength();
15650  char* const pChars = vma_new_array(allocator, char, len + 1);
15651  if(len > 0)
15652  {
15653  memcpy(pChars, sb.GetData(), len);
15654  }
15655  pChars[len] = '\0';
15656  *ppStatsString = pChars;
15657 }
15658 
15659 void vmaFreeStatsString(
15660  VmaAllocator allocator,
15661  char* pStatsString)
15662 {
15663  if(pStatsString != VMA_NULL)
15664  {
15665  VMA_ASSERT(allocator);
15666  size_t len = strlen(pStatsString);
15667  vma_delete_array(allocator, pStatsString, len + 1);
15668  }
15669 }
15670 
15671 #endif // #if VMA_STATS_STRING_ENABLED
15672 
15673 /*
15674 This function is not protected by any mutex because it just reads immutable data.
15675 */
15676 VkResult vmaFindMemoryTypeIndex(
15677  VmaAllocator allocator,
15678  uint32_t memoryTypeBits,
15679  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15680  uint32_t* pMemoryTypeIndex)
15681 {
15682  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15683  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15684  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15685 
15686  if(pAllocationCreateInfo->memoryTypeBits != 0)
15687  {
15688  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
15689  }
15690 
15691  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
15692  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
15693 
15694  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
15695  if(mapped)
15696  {
15697  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15698  }
15699 
15700  // Convert usage to requiredFlags and preferredFlags.
15701  switch(pAllocationCreateInfo->usage)
15702  {
15704  break;
15706  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15707  {
15708  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15709  }
15710  break;
15712  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15713  break;
15715  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15716  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15717  {
15718  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15719  }
15720  break;
15722  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15723  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
15724  break;
15725  default:
15726  break;
15727  }
15728 
15729  *pMemoryTypeIndex = UINT32_MAX;
15730  uint32_t minCost = UINT32_MAX;
15731  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
15732  memTypeIndex < allocator->GetMemoryTypeCount();
15733  ++memTypeIndex, memTypeBit <<= 1)
15734  {
15735  // This memory type is acceptable according to memoryTypeBits bitmask.
15736  if((memTypeBit & memoryTypeBits) != 0)
15737  {
15738  const VkMemoryPropertyFlags currFlags =
15739  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
15740  // This memory type contains requiredFlags.
15741  if((requiredFlags & ~currFlags) == 0)
15742  {
15743  // Calculate cost as number of bits from preferredFlags not present in this memory type.
15744  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
15745  // Remember memory type with lowest cost.
15746  if(currCost < minCost)
15747  {
15748  *pMemoryTypeIndex = memTypeIndex;
15749  if(currCost == 0)
15750  {
15751  return VK_SUCCESS;
15752  }
15753  minCost = currCost;
15754  }
15755  }
15756  }
15757  }
15758  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
15759 }
15760 
15762  VmaAllocator allocator,
15763  const VkBufferCreateInfo* pBufferCreateInfo,
15764  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15765  uint32_t* pMemoryTypeIndex)
15766 {
15767  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15768  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
15769  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15770  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15771 
15772  const VkDevice hDev = allocator->m_hDevice;
15773  VkBuffer hBuffer = VK_NULL_HANDLE;
15774  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
15775  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
15776  if(res == VK_SUCCESS)
15777  {
15778  VkMemoryRequirements memReq = {};
15779  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
15780  hDev, hBuffer, &memReq);
15781 
15782  res = vmaFindMemoryTypeIndex(
15783  allocator,
15784  memReq.memoryTypeBits,
15785  pAllocationCreateInfo,
15786  pMemoryTypeIndex);
15787 
15788  allocator->GetVulkanFunctions().vkDestroyBuffer(
15789  hDev, hBuffer, allocator->GetAllocationCallbacks());
15790  }
15791  return res;
15792 }
15793 
15795  VmaAllocator allocator,
15796  const VkImageCreateInfo* pImageCreateInfo,
15797  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15798  uint32_t* pMemoryTypeIndex)
15799 {
15800  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15801  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
15802  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15803  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15804 
15805  const VkDevice hDev = allocator->m_hDevice;
15806  VkImage hImage = VK_NULL_HANDLE;
15807  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
15808  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
15809  if(res == VK_SUCCESS)
15810  {
15811  VkMemoryRequirements memReq = {};
15812  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
15813  hDev, hImage, &memReq);
15814 
15815  res = vmaFindMemoryTypeIndex(
15816  allocator,
15817  memReq.memoryTypeBits,
15818  pAllocationCreateInfo,
15819  pMemoryTypeIndex);
15820 
15821  allocator->GetVulkanFunctions().vkDestroyImage(
15822  hDev, hImage, allocator->GetAllocationCallbacks());
15823  }
15824  return res;
15825 }
15826 
15827 VkResult vmaCreatePool(
15828  VmaAllocator allocator,
15829  const VmaPoolCreateInfo* pCreateInfo,
15830  VmaPool* pPool)
15831 {
15832  VMA_ASSERT(allocator && pCreateInfo && pPool);
15833 
15834  VMA_DEBUG_LOG("vmaCreatePool");
15835 
15836  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15837 
15838  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
15839 
15840 #if VMA_RECORDING_ENABLED
15841  if(allocator->GetRecorder() != VMA_NULL)
15842  {
15843  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
15844  }
15845 #endif
15846 
15847  return res;
15848 }
15849 
15850 void vmaDestroyPool(
15851  VmaAllocator allocator,
15852  VmaPool pool)
15853 {
15854  VMA_ASSERT(allocator);
15855 
15856  if(pool == VK_NULL_HANDLE)
15857  {
15858  return;
15859  }
15860 
15861  VMA_DEBUG_LOG("vmaDestroyPool");
15862 
15863  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15864 
15865 #if VMA_RECORDING_ENABLED
15866  if(allocator->GetRecorder() != VMA_NULL)
15867  {
15868  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
15869  }
15870 #endif
15871 
15872  allocator->DestroyPool(pool);
15873 }
15874 
15875 void vmaGetPoolStats(
15876  VmaAllocator allocator,
15877  VmaPool pool,
15878  VmaPoolStats* pPoolStats)
15879 {
15880  VMA_ASSERT(allocator && pool && pPoolStats);
15881 
15882  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15883 
15884  allocator->GetPoolStats(pool, pPoolStats);
15885 }
15886 
15888  VmaAllocator allocator,
15889  VmaPool pool,
15890  size_t* pLostAllocationCount)
15891 {
15892  VMA_ASSERT(allocator && pool);
15893 
15894  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15895 
15896 #if VMA_RECORDING_ENABLED
15897  if(allocator->GetRecorder() != VMA_NULL)
15898  {
15899  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
15900  }
15901 #endif
15902 
15903  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
15904 }
15905 
15906 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
15907 {
15908  VMA_ASSERT(allocator && pool);
15909 
15910  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15911 
15912  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
15913 
15914  return allocator->CheckPoolCorruption(pool);
15915 }
15916 
15917 VkResult vmaAllocateMemory(
15918  VmaAllocator allocator,
15919  const VkMemoryRequirements* pVkMemoryRequirements,
15920  const VmaAllocationCreateInfo* pCreateInfo,
15921  VmaAllocation* pAllocation,
15922  VmaAllocationInfo* pAllocationInfo)
15923 {
15924  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
15925 
15926  VMA_DEBUG_LOG("vmaAllocateMemory");
15927 
15928  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15929 
15930  VkResult result = allocator->AllocateMemory(
15931  *pVkMemoryRequirements,
15932  false, // requiresDedicatedAllocation
15933  false, // prefersDedicatedAllocation
15934  VK_NULL_HANDLE, // dedicatedBuffer
15935  VK_NULL_HANDLE, // dedicatedImage
15936  *pCreateInfo,
15937  VMA_SUBALLOCATION_TYPE_UNKNOWN,
15938  1, // allocationCount
15939  pAllocation);
15940 
15941 #if VMA_RECORDING_ENABLED
15942  if(allocator->GetRecorder() != VMA_NULL)
15943  {
15944  allocator->GetRecorder()->RecordAllocateMemory(
15945  allocator->GetCurrentFrameIndex(),
15946  *pVkMemoryRequirements,
15947  *pCreateInfo,
15948  *pAllocation);
15949  }
15950 #endif
15951 
15952  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
15953  {
15954  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
15955  }
15956 
15957  return result;
15958 }
15959 
15960 VkResult vmaAllocateMemoryPages(
15961  VmaAllocator allocator,
15962  const VkMemoryRequirements* pVkMemoryRequirements,
15963  const VmaAllocationCreateInfo* pCreateInfo,
15964  size_t allocationCount,
15965  VmaAllocation* pAllocations,
15966  VmaAllocationInfo* pAllocationInfo)
15967 {
15968  if(allocationCount == 0)
15969  {
15970  return VK_SUCCESS;
15971  }
15972 
15973  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
15974 
15975  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
15976 
15977  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15978 
15979  VkResult result = allocator->AllocateMemory(
15980  *pVkMemoryRequirements,
15981  false, // requiresDedicatedAllocation
15982  false, // prefersDedicatedAllocation
15983  VK_NULL_HANDLE, // dedicatedBuffer
15984  VK_NULL_HANDLE, // dedicatedImage
15985  *pCreateInfo,
15986  VMA_SUBALLOCATION_TYPE_UNKNOWN,
15987  allocationCount,
15988  pAllocations);
15989 
15990 #if VMA_RECORDING_ENABLED
15991  if(allocator->GetRecorder() != VMA_NULL)
15992  {
15993  allocator->GetRecorder()->RecordAllocateMemoryPages(
15994  allocator->GetCurrentFrameIndex(),
15995  *pVkMemoryRequirements,
15996  *pCreateInfo,
15997  (uint64_t)allocationCount,
15998  pAllocations);
15999  }
16000 #endif
16001 
16002  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16003  {
16004  for(size_t i = 0; i < allocationCount; ++i)
16005  {
16006  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16007  }
16008  }
16009 
16010  return result;
16011 }
16012 
16014  VmaAllocator allocator,
16015  VkBuffer buffer,
16016  const VmaAllocationCreateInfo* pCreateInfo,
16017  VmaAllocation* pAllocation,
16018  VmaAllocationInfo* pAllocationInfo)
16019 {
16020  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16021 
16022  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16023 
16024  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16025 
16026  VkMemoryRequirements vkMemReq = {};
16027  bool requiresDedicatedAllocation = false;
16028  bool prefersDedicatedAllocation = false;
16029  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16030  requiresDedicatedAllocation,
16031  prefersDedicatedAllocation);
16032 
16033  VkResult result = allocator->AllocateMemory(
16034  vkMemReq,
16035  requiresDedicatedAllocation,
16036  prefersDedicatedAllocation,
16037  buffer, // dedicatedBuffer
16038  VK_NULL_HANDLE, // dedicatedImage
16039  *pCreateInfo,
16040  VMA_SUBALLOCATION_TYPE_BUFFER,
16041  1, // allocationCount
16042  pAllocation);
16043 
16044 #if VMA_RECORDING_ENABLED
16045  if(allocator->GetRecorder() != VMA_NULL)
16046  {
16047  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16048  allocator->GetCurrentFrameIndex(),
16049  vkMemReq,
16050  requiresDedicatedAllocation,
16051  prefersDedicatedAllocation,
16052  *pCreateInfo,
16053  *pAllocation);
16054  }
16055 #endif
16056 
16057  if(pAllocationInfo && result == VK_SUCCESS)
16058  {
16059  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16060  }
16061 
16062  return result;
16063 }
16064 
16065 VkResult vmaAllocateMemoryForImage(
16066  VmaAllocator allocator,
16067  VkImage image,
16068  const VmaAllocationCreateInfo* pCreateInfo,
16069  VmaAllocation* pAllocation,
16070  VmaAllocationInfo* pAllocationInfo)
16071 {
16072  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16073 
16074  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
16075 
16076  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16077 
16078  VkMemoryRequirements vkMemReq = {};
16079  bool requiresDedicatedAllocation = false;
16080  bool prefersDedicatedAllocation = false;
16081  allocator->GetImageMemoryRequirements(image, vkMemReq,
16082  requiresDedicatedAllocation, prefersDedicatedAllocation);
16083 
16084  VkResult result = allocator->AllocateMemory(
16085  vkMemReq,
16086  requiresDedicatedAllocation,
16087  prefersDedicatedAllocation,
16088  VK_NULL_HANDLE, // dedicatedBuffer
16089  image, // dedicatedImage
16090  *pCreateInfo,
16091  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16092  1, // allocationCount
16093  pAllocation);
16094 
16095 #if VMA_RECORDING_ENABLED
16096  if(allocator->GetRecorder() != VMA_NULL)
16097  {
16098  allocator->GetRecorder()->RecordAllocateMemoryForImage(
16099  allocator->GetCurrentFrameIndex(),
16100  vkMemReq,
16101  requiresDedicatedAllocation,
16102  prefersDedicatedAllocation,
16103  *pCreateInfo,
16104  *pAllocation);
16105  }
16106 #endif
16107 
16108  if(pAllocationInfo && result == VK_SUCCESS)
16109  {
16110  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16111  }
16112 
16113  return result;
16114 }
16115 
16116 void vmaFreeMemory(
16117  VmaAllocator allocator,
16118  VmaAllocation allocation)
16119 {
16120  VMA_ASSERT(allocator);
16121 
16122  if(allocation == VK_NULL_HANDLE)
16123  {
16124  return;
16125  }
16126 
16127  VMA_DEBUG_LOG("vmaFreeMemory");
16128 
16129  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16130 
16131 #if VMA_RECORDING_ENABLED
16132  if(allocator->GetRecorder() != VMA_NULL)
16133  {
16134  allocator->GetRecorder()->RecordFreeMemory(
16135  allocator->GetCurrentFrameIndex(),
16136  allocation);
16137  }
16138 #endif
16139 
16140  allocator->FreeMemory(
16141  1, // allocationCount
16142  &allocation);
16143 }
16144 
16145 void vmaFreeMemoryPages(
16146  VmaAllocator allocator,
16147  size_t allocationCount,
16148  VmaAllocation* pAllocations)
16149 {
16150  if(allocationCount == 0)
16151  {
16152  return;
16153  }
16154 
16155  VMA_ASSERT(allocator);
16156 
16157  VMA_DEBUG_LOG("vmaFreeMemoryPages");
16158 
16159  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16160 
16161 #if VMA_RECORDING_ENABLED
16162  if(allocator->GetRecorder() != VMA_NULL)
16163  {
16164  allocator->GetRecorder()->RecordFreeMemoryPages(
16165  allocator->GetCurrentFrameIndex(),
16166  (uint64_t)allocationCount,
16167  pAllocations);
16168  }
16169 #endif
16170 
16171  allocator->FreeMemory(allocationCount, pAllocations);
16172 }
16173 
16174 VkResult vmaResizeAllocation(
16175  VmaAllocator allocator,
16176  VmaAllocation allocation,
16177  VkDeviceSize newSize)
16178 {
16179  VMA_ASSERT(allocator && allocation);
16180 
16181  VMA_DEBUG_LOG("vmaResizeAllocation");
16182 
16183  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16184 
16185 #if VMA_RECORDING_ENABLED
16186  if(allocator->GetRecorder() != VMA_NULL)
16187  {
16188  allocator->GetRecorder()->RecordResizeAllocation(
16189  allocator->GetCurrentFrameIndex(),
16190  allocation,
16191  newSize);
16192  }
16193 #endif
16194 
16195  return allocator->ResizeAllocation(allocation, newSize);
16196 }
16197 
16199  VmaAllocator allocator,
16200  VmaAllocation allocation,
16201  VmaAllocationInfo* pAllocationInfo)
16202 {
16203  VMA_ASSERT(allocator && allocation && pAllocationInfo);
16204 
16205  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16206 
16207 #if VMA_RECORDING_ENABLED
16208  if(allocator->GetRecorder() != VMA_NULL)
16209  {
16210  allocator->GetRecorder()->RecordGetAllocationInfo(
16211  allocator->GetCurrentFrameIndex(),
16212  allocation);
16213  }
16214 #endif
16215 
16216  allocator->GetAllocationInfo(allocation, pAllocationInfo);
16217 }
16218 
16219 VkBool32 vmaTouchAllocation(
16220  VmaAllocator allocator,
16221  VmaAllocation allocation)
16222 {
16223  VMA_ASSERT(allocator && allocation);
16224 
16225  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16226 
16227 #if VMA_RECORDING_ENABLED
16228  if(allocator->GetRecorder() != VMA_NULL)
16229  {
16230  allocator->GetRecorder()->RecordTouchAllocation(
16231  allocator->GetCurrentFrameIndex(),
16232  allocation);
16233  }
16234 #endif
16235 
16236  return allocator->TouchAllocation(allocation);
16237 }
16238 
16240  VmaAllocator allocator,
16241  VmaAllocation allocation,
16242  void* pUserData)
16243 {
16244  VMA_ASSERT(allocator && allocation);
16245 
16246  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16247 
16248  allocation->SetUserData(allocator, pUserData);
16249 
16250 #if VMA_RECORDING_ENABLED
16251  if(allocator->GetRecorder() != VMA_NULL)
16252  {
16253  allocator->GetRecorder()->RecordSetAllocationUserData(
16254  allocator->GetCurrentFrameIndex(),
16255  allocation,
16256  pUserData);
16257  }
16258 #endif
16259 }
16260 
16262  VmaAllocator allocator,
16263  VmaAllocation* pAllocation)
16264 {
16265  VMA_ASSERT(allocator && pAllocation);
16266 
16267  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
16268 
16269  allocator->CreateLostAllocation(pAllocation);
16270 
16271 #if VMA_RECORDING_ENABLED
16272  if(allocator->GetRecorder() != VMA_NULL)
16273  {
16274  allocator->GetRecorder()->RecordCreateLostAllocation(
16275  allocator->GetCurrentFrameIndex(),
16276  *pAllocation);
16277  }
16278 #endif
16279 }
16280 
16281 VkResult vmaMapMemory(
16282  VmaAllocator allocator,
16283  VmaAllocation allocation,
16284  void** ppData)
16285 {
16286  VMA_ASSERT(allocator && allocation && ppData);
16287 
16288  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16289 
16290  VkResult res = allocator->Map(allocation, ppData);
16291 
16292 #if VMA_RECORDING_ENABLED
16293  if(allocator->GetRecorder() != VMA_NULL)
16294  {
16295  allocator->GetRecorder()->RecordMapMemory(
16296  allocator->GetCurrentFrameIndex(),
16297  allocation);
16298  }
16299 #endif
16300 
16301  return res;
16302 }
16303 
16304 void vmaUnmapMemory(
16305  VmaAllocator allocator,
16306  VmaAllocation allocation)
16307 {
16308  VMA_ASSERT(allocator && allocation);
16309 
16310  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16311 
16312 #if VMA_RECORDING_ENABLED
16313  if(allocator->GetRecorder() != VMA_NULL)
16314  {
16315  allocator->GetRecorder()->RecordUnmapMemory(
16316  allocator->GetCurrentFrameIndex(),
16317  allocation);
16318  }
16319 #endif
16320 
16321  allocator->Unmap(allocation);
16322 }
16323 
16324 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16325 {
16326  VMA_ASSERT(allocator && allocation);
16327 
16328  VMA_DEBUG_LOG("vmaFlushAllocation");
16329 
16330  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16331 
16332  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
16333 
16334 #if VMA_RECORDING_ENABLED
16335  if(allocator->GetRecorder() != VMA_NULL)
16336  {
16337  allocator->GetRecorder()->RecordFlushAllocation(
16338  allocator->GetCurrentFrameIndex(),
16339  allocation, offset, size);
16340  }
16341 #endif
16342 }
16343 
16344 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16345 {
16346  VMA_ASSERT(allocator && allocation);
16347 
16348  VMA_DEBUG_LOG("vmaInvalidateAllocation");
16349 
16350  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16351 
16352  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
16353 
16354 #if VMA_RECORDING_ENABLED
16355  if(allocator->GetRecorder() != VMA_NULL)
16356  {
16357  allocator->GetRecorder()->RecordInvalidateAllocation(
16358  allocator->GetCurrentFrameIndex(),
16359  allocation, offset, size);
16360  }
16361 #endif
16362 }
16363 
16364 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
16365 {
16366  VMA_ASSERT(allocator);
16367 
16368  VMA_DEBUG_LOG("vmaCheckCorruption");
16369 
16370  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16371 
16372  return allocator->CheckCorruption(memoryTypeBits);
16373 }
16374 
16375 VkResult vmaDefragment(
16376  VmaAllocator allocator,
16377  VmaAllocation* pAllocations,
16378  size_t allocationCount,
16379  VkBool32* pAllocationsChanged,
16380  const VmaDefragmentationInfo *pDefragmentationInfo,
16381  VmaDefragmentationStats* pDefragmentationStats)
16382 {
16383  // Deprecated interface, reimplemented using new one.
16384 
16385  VmaDefragmentationInfo2 info2 = {};
16386  info2.allocationCount = (uint32_t)allocationCount;
16387  info2.pAllocations = pAllocations;
16388  info2.pAllocationsChanged = pAllocationsChanged;
16389  if(pDefragmentationInfo != VMA_NULL)
16390  {
16391  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
16392  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
16393  }
16394  else
16395  {
16396  info2.maxCpuAllocationsToMove = UINT32_MAX;
16397  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
16398  }
16399  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
16400 
16402  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
16403  if(res == VK_NOT_READY)
16404  {
16405  res = vmaDefragmentationEnd( allocator, ctx);
16406  }
16407  return res;
16408 }
16409 
16410 VkResult vmaDefragmentationBegin(
16411  VmaAllocator allocator,
16412  const VmaDefragmentationInfo2* pInfo,
16413  VmaDefragmentationStats* pStats,
16414  VmaDefragmentationContext *pContext)
16415 {
16416  VMA_ASSERT(allocator && pInfo && pContext);
16417 
16418  // Degenerate case: Nothing to defragment.
16419  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
16420  {
16421  return VK_SUCCESS;
16422  }
16423 
16424  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
16425  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
16426  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
16427  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
16428 
16429  VMA_DEBUG_LOG("vmaDefragmentationBegin");
16430 
16431  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16432 
16433  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
16434 
16435 #if VMA_RECORDING_ENABLED
16436  if(allocator->GetRecorder() != VMA_NULL)
16437  {
16438  allocator->GetRecorder()->RecordDefragmentationBegin(
16439  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
16440  }
16441 #endif
16442 
16443  return res;
16444 }
16445 
16446 VkResult vmaDefragmentationEnd(
16447  VmaAllocator allocator,
16448  VmaDefragmentationContext context)
16449 {
16450  VMA_ASSERT(allocator);
16451 
16452  VMA_DEBUG_LOG("vmaDefragmentationEnd");
16453 
16454  if(context != VK_NULL_HANDLE)
16455  {
16456  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16457 
16458 #if VMA_RECORDING_ENABLED
16459  if(allocator->GetRecorder() != VMA_NULL)
16460  {
16461  allocator->GetRecorder()->RecordDefragmentationEnd(
16462  allocator->GetCurrentFrameIndex(), context);
16463  }
16464 #endif
16465 
16466  return allocator->DefragmentationEnd(context);
16467  }
16468  else
16469  {
16470  return VK_SUCCESS;
16471  }
16472 }
16473 
16474 VkResult vmaBindBufferMemory(
16475  VmaAllocator allocator,
16476  VmaAllocation allocation,
16477  VkBuffer buffer)
16478 {
16479  VMA_ASSERT(allocator && allocation && buffer);
16480 
16481  VMA_DEBUG_LOG("vmaBindBufferMemory");
16482 
16483  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16484 
16485  return allocator->BindBufferMemory(allocation, buffer);
16486 }
16487 
16488 VkResult vmaBindImageMemory(
16489  VmaAllocator allocator,
16490  VmaAllocation allocation,
16491  VkImage image)
16492 {
16493  VMA_ASSERT(allocator && allocation && image);
16494 
16495  VMA_DEBUG_LOG("vmaBindImageMemory");
16496 
16497  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16498 
16499  return allocator->BindImageMemory(allocation, image);
16500 }
16501 
16502 VkResult vmaCreateBuffer(
16503  VmaAllocator allocator,
16504  const VkBufferCreateInfo* pBufferCreateInfo,
16505  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16506  VkBuffer* pBuffer,
16507  VmaAllocation* pAllocation,
16508  VmaAllocationInfo* pAllocationInfo)
16509 {
16510  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16511 
16512  if(pBufferCreateInfo->size == 0)
16513  {
16514  return VK_ERROR_VALIDATION_FAILED_EXT;
16515  }
16516 
16517  VMA_DEBUG_LOG("vmaCreateBuffer");
16518 
16519  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16520 
16521  *pBuffer = VK_NULL_HANDLE;
16522  *pAllocation = VK_NULL_HANDLE;
16523 
16524  // 1. Create VkBuffer.
16525  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16526  allocator->m_hDevice,
16527  pBufferCreateInfo,
16528  allocator->GetAllocationCallbacks(),
16529  pBuffer);
16530  if(res >= 0)
16531  {
16532  // 2. vkGetBufferMemoryRequirements.
16533  VkMemoryRequirements vkMemReq = {};
16534  bool requiresDedicatedAllocation = false;
16535  bool prefersDedicatedAllocation = false;
16536  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16537  requiresDedicatedAllocation, prefersDedicatedAllocation);
16538 
16539  // Make sure alignment requirements for specific buffer usages reported
16540  // in Physical Device Properties are included in alignment reported by memory requirements.
16541  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16542  {
16543  VMA_ASSERT(vkMemReq.alignment %
16544  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16545  }
16546  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16547  {
16548  VMA_ASSERT(vkMemReq.alignment %
16549  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16550  }
16551  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16552  {
16553  VMA_ASSERT(vkMemReq.alignment %
16554  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16555  }
16556 
16557  // 3. Allocate memory using allocator.
16558  res = allocator->AllocateMemory(
16559  vkMemReq,
16560  requiresDedicatedAllocation,
16561  prefersDedicatedAllocation,
16562  *pBuffer, // dedicatedBuffer
16563  VK_NULL_HANDLE, // dedicatedImage
16564  *pAllocationCreateInfo,
16565  VMA_SUBALLOCATION_TYPE_BUFFER,
16566  1, // allocationCount
16567  pAllocation);
16568 
16569 #if VMA_RECORDING_ENABLED
16570  if(allocator->GetRecorder() != VMA_NULL)
16571  {
16572  allocator->GetRecorder()->RecordCreateBuffer(
16573  allocator->GetCurrentFrameIndex(),
16574  *pBufferCreateInfo,
16575  *pAllocationCreateInfo,
16576  *pAllocation);
16577  }
16578 #endif
16579 
16580  if(res >= 0)
16581  {
16582  // 3. Bind buffer with memory.
16583  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
16584  if(res >= 0)
16585  {
16586  // All steps succeeded.
16587  #if VMA_STATS_STRING_ENABLED
16588  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16589  #endif
16590  if(pAllocationInfo != VMA_NULL)
16591  {
16592  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16593  }
16594 
16595  return VK_SUCCESS;
16596  }
16597  allocator->FreeMemory(
16598  1, // allocationCount
16599  pAllocation);
16600  *pAllocation = VK_NULL_HANDLE;
16601  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16602  *pBuffer = VK_NULL_HANDLE;
16603  return res;
16604  }
16605  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16606  *pBuffer = VK_NULL_HANDLE;
16607  return res;
16608  }
16609  return res;
16610 }
16611 
16612 void vmaDestroyBuffer(
16613  VmaAllocator allocator,
16614  VkBuffer buffer,
16615  VmaAllocation allocation)
16616 {
16617  VMA_ASSERT(allocator);
16618 
16619  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16620  {
16621  return;
16622  }
16623 
16624  VMA_DEBUG_LOG("vmaDestroyBuffer");
16625 
16626  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16627 
16628 #if VMA_RECORDING_ENABLED
16629  if(allocator->GetRecorder() != VMA_NULL)
16630  {
16631  allocator->GetRecorder()->RecordDestroyBuffer(
16632  allocator->GetCurrentFrameIndex(),
16633  allocation);
16634  }
16635 #endif
16636 
16637  if(buffer != VK_NULL_HANDLE)
16638  {
16639  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16640  }
16641 
16642  if(allocation != VK_NULL_HANDLE)
16643  {
16644  allocator->FreeMemory(
16645  1, // allocationCount
16646  &allocation);
16647  }
16648 }
16649 
16650 VkResult vmaCreateImage(
16651  VmaAllocator allocator,
16652  const VkImageCreateInfo* pImageCreateInfo,
16653  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16654  VkImage* pImage,
16655  VmaAllocation* pAllocation,
16656  VmaAllocationInfo* pAllocationInfo)
16657 {
16658  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16659 
16660  if(pImageCreateInfo->extent.width == 0 ||
16661  pImageCreateInfo->extent.height == 0 ||
16662  pImageCreateInfo->extent.depth == 0 ||
16663  pImageCreateInfo->mipLevels == 0 ||
16664  pImageCreateInfo->arrayLayers == 0)
16665  {
16666  return VK_ERROR_VALIDATION_FAILED_EXT;
16667  }
16668 
16669  VMA_DEBUG_LOG("vmaCreateImage");
16670 
16671  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16672 
16673  *pImage = VK_NULL_HANDLE;
16674  *pAllocation = VK_NULL_HANDLE;
16675 
16676  // 1. Create VkImage.
16677  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16678  allocator->m_hDevice,
16679  pImageCreateInfo,
16680  allocator->GetAllocationCallbacks(),
16681  pImage);
16682  if(res >= 0)
16683  {
16684  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16685  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16686  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16687 
16688  // 2. Allocate memory using allocator.
16689  VkMemoryRequirements vkMemReq = {};
16690  bool requiresDedicatedAllocation = false;
16691  bool prefersDedicatedAllocation = false;
16692  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16693  requiresDedicatedAllocation, prefersDedicatedAllocation);
16694 
16695  res = allocator->AllocateMemory(
16696  vkMemReq,
16697  requiresDedicatedAllocation,
16698  prefersDedicatedAllocation,
16699  VK_NULL_HANDLE, // dedicatedBuffer
16700  *pImage, // dedicatedImage
16701  *pAllocationCreateInfo,
16702  suballocType,
16703  1, // allocationCount
16704  pAllocation);
16705 
16706 #if VMA_RECORDING_ENABLED
16707  if(allocator->GetRecorder() != VMA_NULL)
16708  {
16709  allocator->GetRecorder()->RecordCreateImage(
16710  allocator->GetCurrentFrameIndex(),
16711  *pImageCreateInfo,
16712  *pAllocationCreateInfo,
16713  *pAllocation);
16714  }
16715 #endif
16716 
16717  if(res >= 0)
16718  {
16719  // 3. Bind image with memory.
16720  res = allocator->BindImageMemory(*pAllocation, *pImage);
16721  if(res >= 0)
16722  {
16723  // All steps succeeded.
16724  #if VMA_STATS_STRING_ENABLED
16725  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
16726  #endif
16727  if(pAllocationInfo != VMA_NULL)
16728  {
16729  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16730  }
16731 
16732  return VK_SUCCESS;
16733  }
16734  allocator->FreeMemory(
16735  1, // allocationCount
16736  pAllocation);
16737  *pAllocation = VK_NULL_HANDLE;
16738  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16739  *pImage = VK_NULL_HANDLE;
16740  return res;
16741  }
16742  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16743  *pImage = VK_NULL_HANDLE;
16744  return res;
16745  }
16746  return res;
16747 }
16748 
16749 void vmaDestroyImage(
16750  VmaAllocator allocator,
16751  VkImage image,
16752  VmaAllocation allocation)
16753 {
16754  VMA_ASSERT(allocator);
16755 
16756  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16757  {
16758  return;
16759  }
16760 
16761  VMA_DEBUG_LOG("vmaDestroyImage");
16762 
16763  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16764 
16765 #if VMA_RECORDING_ENABLED
16766  if(allocator->GetRecorder() != VMA_NULL)
16767  {
16768  allocator->GetRecorder()->RecordDestroyImage(
16769  allocator->GetCurrentFrameIndex(),
16770  allocation);
16771  }
16772 #endif
16773 
16774  if(image != VK_NULL_HANDLE)
16775  {
16776  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
16777  }
16778  if(allocation != VK_NULL_HANDLE)
16779  {
16780  allocator->FreeMemory(
16781  1, // allocationCount
16782  &allocation);
16783  }
16784 }
16785 
16786 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1744
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2042
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1802
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side...
Definition: vk_mem_alloc.h:2839
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1776
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2367
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1756
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2004
Definition: vk_mem_alloc.h:2102
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:2792
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1748
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2467
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1799
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2875
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2256
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1643
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2348
Definition: vk_mem_alloc.h:2079
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:2795
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1737
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2155
Definition: vk_mem_alloc.h:2031
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1811
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2284
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1865
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1796
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2035
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1937
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1753
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:2829
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1936
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2879
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1828
VmaStatInfo total
Definition: vk_mem_alloc.h:1946
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2887
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2139
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2870
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1754
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1679
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1805
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2298
Definition: vk_mem_alloc.h:2292
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:1760
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1872
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2477
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1749
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1774
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2176
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2318
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:2354
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1735
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2301
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2844
VmaMemoryUsage
Definition: vk_mem_alloc.h:1982
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:2804
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2865
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:2883
Definition: vk_mem_alloc.h:2021
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2163
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1752
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1942
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1685
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:2783
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
Definition: vk_mem_alloc.h:2781
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:2810
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1706
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1778
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1711
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2885
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2150
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2364
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1745
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1925
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
Definition: vk_mem_alloc.h:2313
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1698
Definition: vk_mem_alloc.h:2288
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2086
Represents Opaque object that represents started defragmentation process.
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1938
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1702
Definition: vk_mem_alloc.h:2113
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2304
Definition: vk_mem_alloc.h:2030
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1751
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2145
Definition: vk_mem_alloc.h:2136
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1928
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1747
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2326
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1814
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2357
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2134
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2834
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2169
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1853
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1944
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:2066
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1937
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1758
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1784
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use...
Definition: vk_mem_alloc.h:2780
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:2858
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1700
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1757
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2340
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1750
Definition: vk_mem_alloc.h:2097
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1792
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2491
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1808
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1937
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1934
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2345
Parameters for defragmentation.
Definition: vk_mem_alloc.h:2789
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions...
Definition: vk_mem_alloc.h:2106
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:2472
Definition: vk_mem_alloc.h:2120
Definition: vk_mem_alloc.h:2132
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2881
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1743
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1932
Definition: vk_mem_alloc.h:1987
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2294
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1781
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1930
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1755
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1759
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2053
Definition: vk_mem_alloc.h:2127
Definition: vk_mem_alloc.h:2014
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2486
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1733
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1746
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2273
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Tries to resize an allocation in place, if there is enough free memory after it.
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2453
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2117
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2238
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1938
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1768
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1945
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2351
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1938
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side...
Definition: vk_mem_alloc.h:2849
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2458
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:2813