Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1644 /*
1645 Define this macro to 0/1 to disable/enable support for recording functionality,
1646 available through VmaAllocatorCreateInfo::pRecordSettings.
1647 */
1648 #ifndef VMA_RECORDING_ENABLED
1649  #ifdef _WIN32
1650  #define VMA_RECORDING_ENABLED 1
1651  #else
1652  #define VMA_RECORDING_ENABLED 0
1653  #endif
1654 #endif
1655 
1656 #ifndef NOMINMAX
1657  #define NOMINMAX // For windows.h
1658 #endif
1659 
1660 #ifndef VULKAN_H_
1661  #include <vulkan/vulkan.h>
1662 #endif
1663 
1664 #if VMA_RECORDING_ENABLED
1665  #include <windows.h>
1666 #endif
1667 
1668 #if !defined(VMA_DEDICATED_ALLOCATION)
1669  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1670  #define VMA_DEDICATED_ALLOCATION 1
1671  #else
1672  #define VMA_DEDICATED_ALLOCATION 0
1673  #endif
1674 #endif
1675 
1685 VK_DEFINE_HANDLE(VmaAllocator)
1686 
1687 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1689  VmaAllocator allocator,
1690  uint32_t memoryType,
1691  VkDeviceMemory memory,
1692  VkDeviceSize size);
1694 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1695  VmaAllocator allocator,
1696  uint32_t memoryType,
1697  VkDeviceMemory memory,
1698  VkDeviceSize size);
1699 
1713 
1743 
1746 typedef VkFlags VmaAllocatorCreateFlags;
1747 
1752 typedef struct VmaVulkanFunctions {
1753  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1754  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1755  PFN_vkAllocateMemory vkAllocateMemory;
1756  PFN_vkFreeMemory vkFreeMemory;
1757  PFN_vkMapMemory vkMapMemory;
1758  PFN_vkUnmapMemory vkUnmapMemory;
1759  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1760  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1761  PFN_vkBindBufferMemory vkBindBufferMemory;
1762  PFN_vkBindImageMemory vkBindImageMemory;
1763  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1764  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1765  PFN_vkCreateBuffer vkCreateBuffer;
1766  PFN_vkDestroyBuffer vkDestroyBuffer;
1767  PFN_vkCreateImage vkCreateImage;
1768  PFN_vkDestroyImage vkDestroyImage;
1769  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1770 #if VMA_DEDICATED_ALLOCATION
1771  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1772  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1773 #endif
1775 
1777 typedef enum VmaRecordFlagBits {
1784 
1787 typedef VkFlags VmaRecordFlags;
1788 
1790 typedef struct VmaRecordSettings
1791 {
1801  const char* pFilePath;
1803 
1806 {
1810 
1811  VkPhysicalDevice physicalDevice;
1813 
1814  VkDevice device;
1816 
1819 
1820  const VkAllocationCallbacks* pAllocationCallbacks;
1822 
1862  const VkDeviceSize* pHeapSizeLimit;
1883 
1885 VkResult vmaCreateAllocator(
1886  const VmaAllocatorCreateInfo* pCreateInfo,
1887  VmaAllocator* pAllocator);
1888 
1890 void vmaDestroyAllocator(
1891  VmaAllocator allocator);
1892 
1898  VmaAllocator allocator,
1899  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1900 
1906  VmaAllocator allocator,
1907  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1908 
1916  VmaAllocator allocator,
1917  uint32_t memoryTypeIndex,
1918  VkMemoryPropertyFlags* pFlags);
1919 
1929  VmaAllocator allocator,
1930  uint32_t frameIndex);
1931 
1934 typedef struct VmaStatInfo
1935 {
1937  uint32_t blockCount;
1943  VkDeviceSize usedBytes;
1945  VkDeviceSize unusedBytes;
1948 } VmaStatInfo;
1949 
1951 typedef struct VmaStats
1952 {
1953  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1954  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1956 } VmaStats;
1957 
1959 void vmaCalculateStats(
1960  VmaAllocator allocator,
1961  VmaStats* pStats);
1962 
1963 #ifndef VMA_STATS_STRING_ENABLED
1964 #define VMA_STATS_STRING_ENABLED 1
1965 #endif
1966 
1967 #if VMA_STATS_STRING_ENABLED
1968 
1970 
1972 void vmaBuildStatsString(
1973  VmaAllocator allocator,
1974  char** ppStatsString,
1975  VkBool32 detailedMap);
1976 
1977 void vmaFreeStatsString(
1978  VmaAllocator allocator,
1979  char* pStatsString);
1980 
1981 #endif // #if VMA_STATS_STRING_ENABLED
1982 
1991 VK_DEFINE_HANDLE(VmaPool)
1992 
1993 typedef enum VmaMemoryUsage
1994 {
2043 } VmaMemoryUsage;
2044 
2054 
2115 
2131 
2141 
2148 
2152 
2154 {
2167  VkMemoryPropertyFlags requiredFlags;
2172  VkMemoryPropertyFlags preferredFlags;
2180  uint32_t memoryTypeBits;
2193  void* pUserData;
2195 
2212 VkResult vmaFindMemoryTypeIndex(
2213  VmaAllocator allocator,
2214  uint32_t memoryTypeBits,
2215  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2216  uint32_t* pMemoryTypeIndex);
2217 
2231  VmaAllocator allocator,
2232  const VkBufferCreateInfo* pBufferCreateInfo,
2233  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2234  uint32_t* pMemoryTypeIndex);
2235 
2249  VmaAllocator allocator,
2250  const VkImageCreateInfo* pImageCreateInfo,
2251  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2252  uint32_t* pMemoryTypeIndex);
2253 
2274 
2291 
2302 
2308 
2311 typedef VkFlags VmaPoolCreateFlags;
2312 
2315 typedef struct VmaPoolCreateInfo {
2330  VkDeviceSize blockSize;
2359 
2362 typedef struct VmaPoolStats {
2365  VkDeviceSize size;
2368  VkDeviceSize unusedSize;
2381  VkDeviceSize unusedRangeSizeMax;
2384  size_t blockCount;
2385 } VmaPoolStats;
2386 
2393 VkResult vmaCreatePool(
2394  VmaAllocator allocator,
2395  const VmaPoolCreateInfo* pCreateInfo,
2396  VmaPool* pPool);
2397 
2400 void vmaDestroyPool(
2401  VmaAllocator allocator,
2402  VmaPool pool);
2403 
2410 void vmaGetPoolStats(
2411  VmaAllocator allocator,
2412  VmaPool pool,
2413  VmaPoolStats* pPoolStats);
2414 
2422  VmaAllocator allocator,
2423  VmaPool pool,
2424  size_t* pLostAllocationCount);
2425 
2440 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2441 
2466 VK_DEFINE_HANDLE(VmaAllocation)
2467 
2468 
2470 typedef struct VmaAllocationInfo {
2475  uint32_t memoryType;
2484  VkDeviceMemory deviceMemory;
2489  VkDeviceSize offset;
2494  VkDeviceSize size;
2508  void* pUserData;
2510 
2521 VkResult vmaAllocateMemory(
2522  VmaAllocator allocator,
2523  const VkMemoryRequirements* pVkMemoryRequirements,
2524  const VmaAllocationCreateInfo* pCreateInfo,
2525  VmaAllocation* pAllocation,
2526  VmaAllocationInfo* pAllocationInfo);
2527 
2547 VkResult vmaAllocateMemoryPages(
2548  VmaAllocator allocator,
2549  const VkMemoryRequirements* pVkMemoryRequirements,
2550  const VmaAllocationCreateInfo* pCreateInfo,
2551  size_t allocationCount,
2552  VmaAllocation* pAllocations,
2553  VmaAllocationInfo* pAllocationInfo);
2554 
2562  VmaAllocator allocator,
2563  VkBuffer buffer,
2564  const VmaAllocationCreateInfo* pCreateInfo,
2565  VmaAllocation* pAllocation,
2566  VmaAllocationInfo* pAllocationInfo);
2567 
2569 VkResult vmaAllocateMemoryForImage(
2570  VmaAllocator allocator,
2571  VkImage image,
2572  const VmaAllocationCreateInfo* pCreateInfo,
2573  VmaAllocation* pAllocation,
2574  VmaAllocationInfo* pAllocationInfo);
2575 
2580 void vmaFreeMemory(
2581  VmaAllocator allocator,
2582  VmaAllocation allocation);
2583 
2594 void vmaFreeMemoryPages(
2595  VmaAllocator allocator,
2596  size_t allocationCount,
2597  VmaAllocation* pAllocations);
2598 
2619 VkResult vmaResizeAllocation(
2620  VmaAllocator allocator,
2621  VmaAllocation allocation,
2622  VkDeviceSize newSize);
2623 
2641  VmaAllocator allocator,
2642  VmaAllocation allocation,
2643  VmaAllocationInfo* pAllocationInfo);
2644 
2659 VkBool32 vmaTouchAllocation(
2660  VmaAllocator allocator,
2661  VmaAllocation allocation);
2662 
2677  VmaAllocator allocator,
2678  VmaAllocation allocation,
2679  void* pUserData);
2680 
2692  VmaAllocator allocator,
2693  VmaAllocation* pAllocation);
2694 
2729 VkResult vmaMapMemory(
2730  VmaAllocator allocator,
2731  VmaAllocation allocation,
2732  void** ppData);
2733 
2738 void vmaUnmapMemory(
2739  VmaAllocator allocator,
2740  VmaAllocation allocation);
2741 
2754 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2755 
2768 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2769 
2786 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2787 
2794 VK_DEFINE_HANDLE(VmaDefragmentationContext)
2795 
2796 typedef enum VmaDefragmentationFlagBits {
2800 typedef VkFlags VmaDefragmentationFlags;
2801 
2806 typedef struct VmaDefragmentationInfo2 {
2830  uint32_t poolCount;
2851  VkDeviceSize maxCpuBytesToMove;
2861  VkDeviceSize maxGpuBytesToMove;
2875  VkCommandBuffer commandBuffer;
2877 
2882 typedef struct VmaDefragmentationInfo {
2887  VkDeviceSize maxBytesToMove;
2894 
2896 typedef struct VmaDefragmentationStats {
2898  VkDeviceSize bytesMoved;
2900  VkDeviceSize bytesFreed;
2906 
2933 VkResult vmaDefragmentationBegin(
2934  VmaAllocator allocator,
2935  const VmaDefragmentationInfo2* pInfo,
2936  VmaDefragmentationStats* pStats,
2937  VmaDefragmentationContext *pContext);
2938 
2944 VkResult vmaDefragmentationEnd(
2945  VmaAllocator allocator,
2946  VmaDefragmentationContext context);
2947 
2988 VkResult vmaDefragment(
2989  VmaAllocator allocator,
2990  VmaAllocation* pAllocations,
2991  size_t allocationCount,
2992  VkBool32* pAllocationsChanged,
2993  const VmaDefragmentationInfo *pDefragmentationInfo,
2994  VmaDefragmentationStats* pDefragmentationStats);
2995 
3008 VkResult vmaBindBufferMemory(
3009  VmaAllocator allocator,
3010  VmaAllocation allocation,
3011  VkBuffer buffer);
3012 
3025 VkResult vmaBindImageMemory(
3026  VmaAllocator allocator,
3027  VmaAllocation allocation,
3028  VkImage image);
3029 
3056 VkResult vmaCreateBuffer(
3057  VmaAllocator allocator,
3058  const VkBufferCreateInfo* pBufferCreateInfo,
3059  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3060  VkBuffer* pBuffer,
3061  VmaAllocation* pAllocation,
3062  VmaAllocationInfo* pAllocationInfo);
3063 
3075 void vmaDestroyBuffer(
3076  VmaAllocator allocator,
3077  VkBuffer buffer,
3078  VmaAllocation allocation);
3079 
3081 VkResult vmaCreateImage(
3082  VmaAllocator allocator,
3083  const VkImageCreateInfo* pImageCreateInfo,
3084  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3085  VkImage* pImage,
3086  VmaAllocation* pAllocation,
3087  VmaAllocationInfo* pAllocationInfo);
3088 
3100 void vmaDestroyImage(
3101  VmaAllocator allocator,
3102  VkImage image,
3103  VmaAllocation allocation);
3104 
3105 #ifdef __cplusplus
3106 }
3107 #endif
3108 
3109 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3110 
3111 // For Visual Studio IntelliSense.
3112 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3113 #define VMA_IMPLEMENTATION
3114 #endif
3115 
3116 #ifdef VMA_IMPLEMENTATION
3117 #undef VMA_IMPLEMENTATION
3118 
3119 #include <cstdint>
3120 #include <cstdlib>
3121 #include <cstring>
3122 
3123 /*******************************************************************************
3124 CONFIGURATION SECTION
3125 
3126 Define some of these macros before each #include of this header or change them
3127 here if you need other then default behavior depending on your environment.
3128 */
3129 
3130 /*
3131 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3132 internally, like:
3133 
3134  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3135 
3136 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3137 VmaAllocatorCreateInfo::pVulkanFunctions.
3138 */
3139 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3140 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3141 #endif
3142 
3143 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3144 //#define VMA_USE_STL_CONTAINERS 1
3145 
3146 /* Set this macro to 1 to make the library including and using STL containers:
3147 std::pair, std::vector, std::list, std::unordered_map.
3148 
3149 Set it to 0 or undefined to make the library using its own implementation of
3150 the containers.
3151 */
3152 #if VMA_USE_STL_CONTAINERS
3153  #define VMA_USE_STL_VECTOR 1
3154  #define VMA_USE_STL_UNORDERED_MAP 1
3155  #define VMA_USE_STL_LIST 1
3156 #endif
3157 
3158 #ifndef VMA_USE_STL_SHARED_MUTEX
3159  // Compiler conforms to C++17.
3160  #if __cplusplus >= 201703L
3161  #define VMA_USE_STL_SHARED_MUTEX 1
3162  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3163  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3164  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3165  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3166  #define VMA_USE_STL_SHARED_MUTEX 1
3167  #else
3168  #define VMA_USE_STL_SHARED_MUTEX 0
3169  #endif
3170 #endif
3171 
3172 #if VMA_USE_STL_VECTOR
3173  #include <vector>
3174 #endif
3175 
3176 #if VMA_USE_STL_UNORDERED_MAP
3177  #include <unordered_map>
3178 #endif
3179 
3180 #if VMA_USE_STL_LIST
3181  #include <list>
3182 #endif
3183 
3184 /*
3185 Following headers are used in this CONFIGURATION section only, so feel free to
3186 remove them if not needed.
3187 */
3188 #include <cassert> // for assert
3189 #include <algorithm> // for min, max
3190 #include <mutex>
3191 #include <atomic> // for std::atomic
3192 
3193 #ifndef VMA_NULL
3194  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3195  #define VMA_NULL nullptr
3196 #endif
3197 
3198 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3199 #include <cstdlib>
3200 void *aligned_alloc(size_t alignment, size_t size)
3201 {
3202  // alignment must be >= sizeof(void*)
3203  if(alignment < sizeof(void*))
3204  {
3205  alignment = sizeof(void*);
3206  }
3207 
3208  return memalign(alignment, size);
3209 }
3210 #elif defined(__APPLE__) || defined(__ANDROID__)
3211 #include <cstdlib>
3212 void *aligned_alloc(size_t alignment, size_t size)
3213 {
3214  // alignment must be >= sizeof(void*)
3215  if(alignment < sizeof(void*))
3216  {
3217  alignment = sizeof(void*);
3218  }
3219 
3220  void *pointer;
3221  if(posix_memalign(&pointer, alignment, size) == 0)
3222  return pointer;
3223  return VMA_NULL;
3224 }
3225 #endif
3226 
3227 // If your compiler is not compatible with C++11 and definition of
3228 // aligned_alloc() function is missing, uncommeting following line may help:
3229 
3230 //#include <malloc.h>
3231 
3232 // Normal assert to check for programmer's errors, especially in Debug configuration.
3233 #ifndef VMA_ASSERT
3234  #ifdef _DEBUG
3235  #define VMA_ASSERT(expr) assert(expr)
3236  #else
3237  #define VMA_ASSERT(expr)
3238  #endif
3239 #endif
3240 
3241 // Assert that will be called very often, like inside data structures e.g. operator[].
3242 // Making it non-empty can make program slow.
3243 #ifndef VMA_HEAVY_ASSERT
3244  #ifdef _DEBUG
3245  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3246  #else
3247  #define VMA_HEAVY_ASSERT(expr)
3248  #endif
3249 #endif
3250 
3251 #ifndef VMA_ALIGN_OF
3252  #define VMA_ALIGN_OF(type) (__alignof(type))
3253 #endif
3254 
3255 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3256  #if defined(_WIN32)
3257  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3258  #else
3259  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3260  #endif
3261 #endif
3262 
3263 #ifndef VMA_SYSTEM_FREE
3264  #if defined(_WIN32)
3265  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3266  #else
3267  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3268  #endif
3269 #endif
3270 
3271 #ifndef VMA_MIN
3272  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3273 #endif
3274 
3275 #ifndef VMA_MAX
3276  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3277 #endif
3278 
3279 #ifndef VMA_SWAP
3280  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3281 #endif
3282 
3283 #ifndef VMA_SORT
3284  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3285 #endif
3286 
3287 #ifndef VMA_DEBUG_LOG
3288  #define VMA_DEBUG_LOG(format, ...)
3289  /*
3290  #define VMA_DEBUG_LOG(format, ...) do { \
3291  printf(format, __VA_ARGS__); \
3292  printf("\n"); \
3293  } while(false)
3294  */
3295 #endif
3296 
3297 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3298 #if VMA_STATS_STRING_ENABLED
3299  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3300  {
3301  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3302  }
3303  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3304  {
3305  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3306  }
3307  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3308  {
3309  snprintf(outStr, strLen, "%p", ptr);
3310  }
3311 #endif
3312 
3313 #ifndef VMA_MUTEX
3314  class VmaMutex
3315  {
3316  public:
3317  void Lock() { m_Mutex.lock(); }
3318  void Unlock() { m_Mutex.unlock(); }
3319  private:
3320  std::mutex m_Mutex;
3321  };
3322  #define VMA_MUTEX VmaMutex
3323 #endif
3324 
3325 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3326 #ifndef VMA_RW_MUTEX
3327  #if VMA_USE_STL_SHARED_MUTEX
3328  // Use std::shared_mutex from C++17.
3329  #include <shared_mutex>
3330  class VmaRWMutex
3331  {
3332  public:
3333  void LockRead() { m_Mutex.lock_shared(); }
3334  void UnlockRead() { m_Mutex.unlock_shared(); }
3335  void LockWrite() { m_Mutex.lock(); }
3336  void UnlockWrite() { m_Mutex.unlock(); }
3337  private:
3338  std::shared_mutex m_Mutex;
3339  };
3340  #define VMA_RW_MUTEX VmaRWMutex
3341  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
3342  // Use SRWLOCK from WinAPI.
3343  // Minimum supported client = Windows Vista, server = Windows Server 2008.
3344  class VmaRWMutex
3345  {
3346  public:
3347  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3348  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3349  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3350  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3351  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3352  private:
3353  SRWLOCK m_Lock;
3354  };
3355  #define VMA_RW_MUTEX VmaRWMutex
3356  #else
3357  // Less efficient fallback: Use normal mutex.
3358  class VmaRWMutex
3359  {
3360  public:
3361  void LockRead() { m_Mutex.Lock(); }
3362  void UnlockRead() { m_Mutex.Unlock(); }
3363  void LockWrite() { m_Mutex.Lock(); }
3364  void UnlockWrite() { m_Mutex.Unlock(); }
3365  private:
3366  VMA_MUTEX m_Mutex;
3367  };
3368  #define VMA_RW_MUTEX VmaRWMutex
3369  #endif // #if VMA_USE_STL_SHARED_MUTEX
3370 #endif // #ifndef VMA_RW_MUTEX
3371 
3372 /*
3373 If providing your own implementation, you need to implement a subset of std::atomic:
3374 
3375 - Constructor(uint32_t desired)
3376 - uint32_t load() const
3377 - void store(uint32_t desired)
3378 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3379 */
3380 #ifndef VMA_ATOMIC_UINT32
3381  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3382 #endif
3383 
3384 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3385 
3389  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3390 #endif
3391 
3392 #ifndef VMA_DEBUG_ALIGNMENT
3393 
3397  #define VMA_DEBUG_ALIGNMENT (1)
3398 #endif
3399 
3400 #ifndef VMA_DEBUG_MARGIN
3401 
3405  #define VMA_DEBUG_MARGIN (0)
3406 #endif
3407 
3408 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3409 
3413  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3414 #endif
3415 
3416 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3417 
3422  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3423 #endif
3424 
3425 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3426 
3430  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3431 #endif
3432 
3433 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3434 
3438  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3439 #endif
3440 
3441 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3442  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3444 #endif
3445 
3446 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3447  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3449 #endif
3450 
3451 #ifndef VMA_CLASS_NO_COPY
3452  #define VMA_CLASS_NO_COPY(className) \
3453  private: \
3454  className(const className&) = delete; \
3455  className& operator=(const className&) = delete;
3456 #endif
3457 
3458 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3459 
3460 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3461 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3462 
3463 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3464 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3465 
3466 /*******************************************************************************
3467 END OF CONFIGURATION
3468 */
3469 
3470 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3471 
3472 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3473  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3474 
3475 // Returns number of bits set to 1 in (v).
3476 static inline uint32_t VmaCountBitsSet(uint32_t v)
3477 {
3478  uint32_t c = v - ((v >> 1) & 0x55555555);
3479  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3480  c = ((c >> 4) + c) & 0x0F0F0F0F;
3481  c = ((c >> 8) + c) & 0x00FF00FF;
3482  c = ((c >> 16) + c) & 0x0000FFFF;
3483  return c;
3484 }
3485 
3486 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3487 // Use types like uint32_t, uint64_t as T.
3488 template <typename T>
3489 static inline T VmaAlignUp(T val, T align)
3490 {
3491  return (val + align - 1) / align * align;
3492 }
3493 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3494 // Use types like uint32_t, uint64_t as T.
3495 template <typename T>
3496 static inline T VmaAlignDown(T val, T align)
3497 {
3498  return val / align * align;
3499 }
3500 
3501 // Division with mathematical rounding to nearest number.
3502 template <typename T>
3503 static inline T VmaRoundDiv(T x, T y)
3504 {
3505  return (x + (y / (T)2)) / y;
3506 }
3507 
3508 /*
3509 Returns true if given number is a power of two.
3510 T must be unsigned integer number or signed integer but always nonnegative.
3511 For 0 returns true.
3512 */
3513 template <typename T>
3514 inline bool VmaIsPow2(T x)
3515 {
3516  return (x & (x-1)) == 0;
3517 }
3518 
3519 // Returns smallest power of 2 greater or equal to v.
3520 static inline uint32_t VmaNextPow2(uint32_t v)
3521 {
3522  v--;
3523  v |= v >> 1;
3524  v |= v >> 2;
3525  v |= v >> 4;
3526  v |= v >> 8;
3527  v |= v >> 16;
3528  v++;
3529  return v;
3530 }
3531 static inline uint64_t VmaNextPow2(uint64_t v)
3532 {
3533  v--;
3534  v |= v >> 1;
3535  v |= v >> 2;
3536  v |= v >> 4;
3537  v |= v >> 8;
3538  v |= v >> 16;
3539  v |= v >> 32;
3540  v++;
3541  return v;
3542 }
3543 
3544 // Returns largest power of 2 less or equal to v.
3545 static inline uint32_t VmaPrevPow2(uint32_t v)
3546 {
3547  v |= v >> 1;
3548  v |= v >> 2;
3549  v |= v >> 4;
3550  v |= v >> 8;
3551  v |= v >> 16;
3552  v = v ^ (v >> 1);
3553  return v;
3554 }
3555 static inline uint64_t VmaPrevPow2(uint64_t v)
3556 {
3557  v |= v >> 1;
3558  v |= v >> 2;
3559  v |= v >> 4;
3560  v |= v >> 8;
3561  v |= v >> 16;
3562  v |= v >> 32;
3563  v = v ^ (v >> 1);
3564  return v;
3565 }
3566 
3567 static inline bool VmaStrIsEmpty(const char* pStr)
3568 {
3569  return pStr == VMA_NULL || *pStr == '\0';
3570 }
3571 
3572 #if VMA_STATS_STRING_ENABLED
3573 
3574 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3575 {
3576  switch(algorithm)
3577  {
3579  return "Linear";
3581  return "Buddy";
3582  case 0:
3583  return "Default";
3584  default:
3585  VMA_ASSERT(0);
3586  return "";
3587  }
3588 }
3589 
3590 #endif // #if VMA_STATS_STRING_ENABLED
3591 
3592 #ifndef VMA_SORT
3593 
3594 template<typename Iterator, typename Compare>
3595 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3596 {
3597  Iterator centerValue = end; --centerValue;
3598  Iterator insertIndex = beg;
3599  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3600  {
3601  if(cmp(*memTypeIndex, *centerValue))
3602  {
3603  if(insertIndex != memTypeIndex)
3604  {
3605  VMA_SWAP(*memTypeIndex, *insertIndex);
3606  }
3607  ++insertIndex;
3608  }
3609  }
3610  if(insertIndex != centerValue)
3611  {
3612  VMA_SWAP(*insertIndex, *centerValue);
3613  }
3614  return insertIndex;
3615 }
3616 
3617 template<typename Iterator, typename Compare>
3618 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3619 {
3620  if(beg < end)
3621  {
3622  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3623  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3624  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3625  }
3626 }
3627 
3628 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3629 
3630 #endif // #ifndef VMA_SORT
3631 
3632 /*
3633 Returns true if two memory blocks occupy overlapping pages.
3634 ResourceA must be in less memory offset than ResourceB.
3635 
3636 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3637 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3638 */
3639 static inline bool VmaBlocksOnSamePage(
3640  VkDeviceSize resourceAOffset,
3641  VkDeviceSize resourceASize,
3642  VkDeviceSize resourceBOffset,
3643  VkDeviceSize pageSize)
3644 {
3645  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3646  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3647  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3648  VkDeviceSize resourceBStart = resourceBOffset;
3649  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3650  return resourceAEndPage == resourceBStartPage;
3651 }
3652 
3653 enum VmaSuballocationType
3654 {
3655  VMA_SUBALLOCATION_TYPE_FREE = 0,
3656  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3657  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3658  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3659  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3660  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3661  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3662 };
3663 
3664 /*
3665 Returns true if given suballocation types could conflict and must respect
3666 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3667 or linear image and another one is optimal image. If type is unknown, behave
3668 conservatively.
3669 */
3670 static inline bool VmaIsBufferImageGranularityConflict(
3671  VmaSuballocationType suballocType1,
3672  VmaSuballocationType suballocType2)
3673 {
3674  if(suballocType1 > suballocType2)
3675  {
3676  VMA_SWAP(suballocType1, suballocType2);
3677  }
3678 
3679  switch(suballocType1)
3680  {
3681  case VMA_SUBALLOCATION_TYPE_FREE:
3682  return false;
3683  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3684  return true;
3685  case VMA_SUBALLOCATION_TYPE_BUFFER:
3686  return
3687  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3688  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3689  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3690  return
3691  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3692  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3693  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3694  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3695  return
3696  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3697  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3698  return false;
3699  default:
3700  VMA_ASSERT(0);
3701  return true;
3702  }
3703 }
3704 
3705 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3706 {
3707  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3708  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3709  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3710  {
3711  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3712  }
3713 }
3714 
3715 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3716 {
3717  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3718  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3719  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3720  {
3721  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3722  {
3723  return false;
3724  }
3725  }
3726  return true;
3727 }
3728 
3729 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3730 struct VmaMutexLock
3731 {
3732  VMA_CLASS_NO_COPY(VmaMutexLock)
3733 public:
3734  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
3735  m_pMutex(useMutex ? &mutex : VMA_NULL)
3736  { if(m_pMutex) { m_pMutex->Lock(); } }
3737  ~VmaMutexLock()
3738  { if(m_pMutex) { m_pMutex->Unlock(); } }
3739 private:
3740  VMA_MUTEX* m_pMutex;
3741 };
3742 
3743 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3744 struct VmaMutexLockRead
3745 {
3746  VMA_CLASS_NO_COPY(VmaMutexLockRead)
3747 public:
3748  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3749  m_pMutex(useMutex ? &mutex : VMA_NULL)
3750  { if(m_pMutex) { m_pMutex->LockRead(); } }
3751  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
3752 private:
3753  VMA_RW_MUTEX* m_pMutex;
3754 };
3755 
3756 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3757 struct VmaMutexLockWrite
3758 {
3759  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3760 public:
3761  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
3762  m_pMutex(useMutex ? &mutex : VMA_NULL)
3763  { if(m_pMutex) { m_pMutex->LockWrite(); } }
3764  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3765 private:
3766  VMA_RW_MUTEX* m_pMutex;
3767 };
3768 
3769 #if VMA_DEBUG_GLOBAL_MUTEX
3770  static VMA_MUTEX gDebugGlobalMutex;
3771  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3772 #else
3773  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3774 #endif
3775 
3776 // Minimum size of a free suballocation to register it in the free suballocation collection.
3777 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3778 
3779 /*
3780 Performs binary search and returns iterator to first element that is greater or
3781 equal to (key), according to comparison (cmp).
3782 
3783 Cmp should return true if first argument is less than second argument.
3784 
3785 Returned value is the found element, if present in the collection or place where
3786 new element with value (key) should be inserted.
3787 */
3788 template <typename CmpLess, typename IterT, typename KeyT>
3789 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3790 {
3791  size_t down = 0, up = (end - beg);
3792  while(down < up)
3793  {
3794  const size_t mid = (down + up) / 2;
3795  if(cmp(*(beg+mid), key))
3796  {
3797  down = mid + 1;
3798  }
3799  else
3800  {
3801  up = mid;
3802  }
3803  }
3804  return beg + down;
3805 }
3806 
3807 /*
3808 Returns true if all pointers in the array are not-null and unique.
3809 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3810 T must be pointer type, e.g. VmaAllocation, VmaPool.
3811 */
3812 template<typename T>
3813 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3814 {
3815  for(uint32_t i = 0; i < count; ++i)
3816  {
3817  const T iPtr = arr[i];
3818  if(iPtr == VMA_NULL)
3819  {
3820  return false;
3821  }
3822  for(uint32_t j = i + 1; j < count; ++j)
3823  {
3824  if(iPtr == arr[j])
3825  {
3826  return false;
3827  }
3828  }
3829  }
3830  return true;
3831 }
3832 
3834 // Memory allocation
3835 
3836 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3837 {
3838  if((pAllocationCallbacks != VMA_NULL) &&
3839  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3840  {
3841  return (*pAllocationCallbacks->pfnAllocation)(
3842  pAllocationCallbacks->pUserData,
3843  size,
3844  alignment,
3845  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3846  }
3847  else
3848  {
3849  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3850  }
3851 }
3852 
3853 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3854 {
3855  if((pAllocationCallbacks != VMA_NULL) &&
3856  (pAllocationCallbacks->pfnFree != VMA_NULL))
3857  {
3858  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3859  }
3860  else
3861  {
3862  VMA_SYSTEM_FREE(ptr);
3863  }
3864 }
3865 
3866 template<typename T>
3867 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3868 {
3869  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3870 }
3871 
3872 template<typename T>
3873 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3874 {
3875  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3876 }
3877 
3878 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3879 
3880 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3881 
3882 template<typename T>
3883 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3884 {
3885  ptr->~T();
3886  VmaFree(pAllocationCallbacks, ptr);
3887 }
3888 
3889 template<typename T>
3890 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3891 {
3892  if(ptr != VMA_NULL)
3893  {
3894  for(size_t i = count; i--; )
3895  {
3896  ptr[i].~T();
3897  }
3898  VmaFree(pAllocationCallbacks, ptr);
3899  }
3900 }
3901 
3902 // STL-compatible allocator.
3903 template<typename T>
3904 class VmaStlAllocator
3905 {
3906 public:
3907  const VkAllocationCallbacks* const m_pCallbacks;
3908  typedef T value_type;
3909 
3910  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3911  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3912 
3913  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3914  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3915 
3916  template<typename U>
3917  bool operator==(const VmaStlAllocator<U>& rhs) const
3918  {
3919  return m_pCallbacks == rhs.m_pCallbacks;
3920  }
3921  template<typename U>
3922  bool operator!=(const VmaStlAllocator<U>& rhs) const
3923  {
3924  return m_pCallbacks != rhs.m_pCallbacks;
3925  }
3926 
3927  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3928 };
3929 
3930 #if VMA_USE_STL_VECTOR
3931 
3932 #define VmaVector std::vector
3933 
3934 template<typename T, typename allocatorT>
3935 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3936 {
3937  vec.insert(vec.begin() + index, item);
3938 }
3939 
3940 template<typename T, typename allocatorT>
3941 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3942 {
3943  vec.erase(vec.begin() + index);
3944 }
3945 
3946 #else // #if VMA_USE_STL_VECTOR
3947 
3948 /* Class with interface compatible with subset of std::vector.
3949 T must be POD because constructors and destructors are not called and memcpy is
3950 used for these objects. */
3951 template<typename T, typename AllocatorT>
3952 class VmaVector
3953 {
3954 public:
3955  typedef T value_type;
3956 
3957  VmaVector(const AllocatorT& allocator) :
3958  m_Allocator(allocator),
3959  m_pArray(VMA_NULL),
3960  m_Count(0),
3961  m_Capacity(0)
3962  {
3963  }
3964 
3965  VmaVector(size_t count, const AllocatorT& allocator) :
3966  m_Allocator(allocator),
3967  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3968  m_Count(count),
3969  m_Capacity(count)
3970  {
3971  }
3972 
3973  VmaVector(const VmaVector<T, AllocatorT>& src) :
3974  m_Allocator(src.m_Allocator),
3975  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3976  m_Count(src.m_Count),
3977  m_Capacity(src.m_Count)
3978  {
3979  if(m_Count != 0)
3980  {
3981  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3982  }
3983  }
3984 
3985  ~VmaVector()
3986  {
3987  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3988  }
3989 
3990  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3991  {
3992  if(&rhs != this)
3993  {
3994  resize(rhs.m_Count);
3995  if(m_Count != 0)
3996  {
3997  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3998  }
3999  }
4000  return *this;
4001  }
4002 
4003  bool empty() const { return m_Count == 0; }
4004  size_t size() const { return m_Count; }
4005  T* data() { return m_pArray; }
4006  const T* data() const { return m_pArray; }
4007 
4008  T& operator[](size_t index)
4009  {
4010  VMA_HEAVY_ASSERT(index < m_Count);
4011  return m_pArray[index];
4012  }
4013  const T& operator[](size_t index) const
4014  {
4015  VMA_HEAVY_ASSERT(index < m_Count);
4016  return m_pArray[index];
4017  }
4018 
4019  T& front()
4020  {
4021  VMA_HEAVY_ASSERT(m_Count > 0);
4022  return m_pArray[0];
4023  }
4024  const T& front() const
4025  {
4026  VMA_HEAVY_ASSERT(m_Count > 0);
4027  return m_pArray[0];
4028  }
4029  T& back()
4030  {
4031  VMA_HEAVY_ASSERT(m_Count > 0);
4032  return m_pArray[m_Count - 1];
4033  }
4034  const T& back() const
4035  {
4036  VMA_HEAVY_ASSERT(m_Count > 0);
4037  return m_pArray[m_Count - 1];
4038  }
4039 
4040  void reserve(size_t newCapacity, bool freeMemory = false)
4041  {
4042  newCapacity = VMA_MAX(newCapacity, m_Count);
4043 
4044  if((newCapacity < m_Capacity) && !freeMemory)
4045  {
4046  newCapacity = m_Capacity;
4047  }
4048 
4049  if(newCapacity != m_Capacity)
4050  {
4051  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4052  if(m_Count != 0)
4053  {
4054  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4055  }
4056  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4057  m_Capacity = newCapacity;
4058  m_pArray = newArray;
4059  }
4060  }
4061 
4062  void resize(size_t newCount, bool freeMemory = false)
4063  {
4064  size_t newCapacity = m_Capacity;
4065  if(newCount > m_Capacity)
4066  {
4067  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4068  }
4069  else if(freeMemory)
4070  {
4071  newCapacity = newCount;
4072  }
4073 
4074  if(newCapacity != m_Capacity)
4075  {
4076  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4077  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4078  if(elementsToCopy != 0)
4079  {
4080  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4081  }
4082  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4083  m_Capacity = newCapacity;
4084  m_pArray = newArray;
4085  }
4086 
4087  m_Count = newCount;
4088  }
4089 
4090  void clear(bool freeMemory = false)
4091  {
4092  resize(0, freeMemory);
4093  }
4094 
4095  void insert(size_t index, const T& src)
4096  {
4097  VMA_HEAVY_ASSERT(index <= m_Count);
4098  const size_t oldCount = size();
4099  resize(oldCount + 1);
4100  if(index < oldCount)
4101  {
4102  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4103  }
4104  m_pArray[index] = src;
4105  }
4106 
4107  void remove(size_t index)
4108  {
4109  VMA_HEAVY_ASSERT(index < m_Count);
4110  const size_t oldCount = size();
4111  if(index < oldCount - 1)
4112  {
4113  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4114  }
4115  resize(oldCount - 1);
4116  }
4117 
4118  void push_back(const T& src)
4119  {
4120  const size_t newIndex = size();
4121  resize(newIndex + 1);
4122  m_pArray[newIndex] = src;
4123  }
4124 
4125  void pop_back()
4126  {
4127  VMA_HEAVY_ASSERT(m_Count > 0);
4128  resize(size() - 1);
4129  }
4130 
4131  void push_front(const T& src)
4132  {
4133  insert(0, src);
4134  }
4135 
4136  void pop_front()
4137  {
4138  VMA_HEAVY_ASSERT(m_Count > 0);
4139  remove(0);
4140  }
4141 
4142  typedef T* iterator;
4143 
4144  iterator begin() { return m_pArray; }
4145  iterator end() { return m_pArray + m_Count; }
4146 
4147 private:
4148  AllocatorT m_Allocator;
4149  T* m_pArray;
4150  size_t m_Count;
4151  size_t m_Capacity;
4152 };
4153 
4154 template<typename T, typename allocatorT>
4155 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4156 {
4157  vec.insert(index, item);
4158 }
4159 
4160 template<typename T, typename allocatorT>
4161 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4162 {
4163  vec.remove(index);
4164 }
4165 
4166 #endif // #if VMA_USE_STL_VECTOR
4167 
4168 template<typename CmpLess, typename VectorT>
4169 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4170 {
4171  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4172  vector.data(),
4173  vector.data() + vector.size(),
4174  value,
4175  CmpLess()) - vector.data();
4176  VmaVectorInsert(vector, indexToInsert, value);
4177  return indexToInsert;
4178 }
4179 
4180 template<typename CmpLess, typename VectorT>
4181 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4182 {
4183  CmpLess comparator;
4184  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4185  vector.begin(),
4186  vector.end(),
4187  value,
4188  comparator);
4189  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4190  {
4191  size_t indexToRemove = it - vector.begin();
4192  VmaVectorRemove(vector, indexToRemove);
4193  return true;
4194  }
4195  return false;
4196 }
4197 
4198 template<typename CmpLess, typename IterT, typename KeyT>
4199 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
4200 {
4201  CmpLess comparator;
4202  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4203  beg, end, value, comparator);
4204  if(it == end ||
4205  (!comparator(*it, value) && !comparator(value, *it)))
4206  {
4207  return it;
4208  }
4209  return end;
4210 }
4211 
4213 // class VmaPoolAllocator
4214 
4215 /*
4216 Allocator for objects of type T using a list of arrays (pools) to speed up
4217 allocation. Number of elements that can be allocated is not bounded because
4218 allocator can create multiple blocks.
4219 */
4220 template<typename T>
4221 class VmaPoolAllocator
4222 {
4223  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4224 public:
4225  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4226  ~VmaPoolAllocator();
4227  void Clear();
4228  T* Alloc();
4229  void Free(T* ptr);
4230 
4231 private:
4232  union Item
4233  {
4234  uint32_t NextFreeIndex;
4235  T Value;
4236  };
4237 
4238  struct ItemBlock
4239  {
4240  Item* pItems;
4241  uint32_t Capacity;
4242  uint32_t FirstFreeIndex;
4243  };
4244 
4245  const VkAllocationCallbacks* m_pAllocationCallbacks;
4246  const uint32_t m_FirstBlockCapacity;
4247  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4248 
4249  ItemBlock& CreateNewBlock();
4250 };
4251 
4252 template<typename T>
4253 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
4254  m_pAllocationCallbacks(pAllocationCallbacks),
4255  m_FirstBlockCapacity(firstBlockCapacity),
4256  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4257 {
4258  VMA_ASSERT(m_FirstBlockCapacity > 1);
4259 }
4260 
4261 template<typename T>
4262 VmaPoolAllocator<T>::~VmaPoolAllocator()
4263 {
4264  Clear();
4265 }
4266 
4267 template<typename T>
4268 void VmaPoolAllocator<T>::Clear()
4269 {
4270  for(size_t i = m_ItemBlocks.size(); i--; )
4271  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4272  m_ItemBlocks.clear();
4273 }
4274 
4275 template<typename T>
4276 T* VmaPoolAllocator<T>::Alloc()
4277 {
4278  for(size_t i = m_ItemBlocks.size(); i--; )
4279  {
4280  ItemBlock& block = m_ItemBlocks[i];
4281  // This block has some free items: Use first one.
4282  if(block.FirstFreeIndex != UINT32_MAX)
4283  {
4284  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4285  block.FirstFreeIndex = pItem->NextFreeIndex;
4286  return &pItem->Value;
4287  }
4288  }
4289 
4290  // No block has free item: Create new one and use it.
4291  ItemBlock& newBlock = CreateNewBlock();
4292  Item* const pItem = &newBlock.pItems[0];
4293  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4294  return &pItem->Value;
4295 }
4296 
4297 template<typename T>
4298 void VmaPoolAllocator<T>::Free(T* ptr)
4299 {
4300  // Search all memory blocks to find ptr.
4301  for(size_t i = m_ItemBlocks.size(); i--; )
4302  {
4303  ItemBlock& block = m_ItemBlocks[i];
4304 
4305  // Casting to union.
4306  Item* pItemPtr;
4307  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4308 
4309  // Check if pItemPtr is in address range of this block.
4310  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4311  {
4312  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4313  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4314  block.FirstFreeIndex = index;
4315  return;
4316  }
4317  }
4318  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4319 }
4320 
4321 template<typename T>
4322 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4323 {
4324  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4325  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4326 
4327  const ItemBlock newBlock = {
4328  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4329  newBlockCapacity,
4330  0 };
4331 
4332  m_ItemBlocks.push_back(newBlock);
4333 
4334  // Setup singly-linked list of all free items in this block.
4335  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4336  newBlock.pItems[i].NextFreeIndex = i + 1;
4337  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4338  return m_ItemBlocks.back();
4339 }
4340 
4342 // class VmaRawList, VmaList
4343 
4344 #if VMA_USE_STL_LIST
4345 
4346 #define VmaList std::list
4347 
4348 #else // #if VMA_USE_STL_LIST
4349 
4350 template<typename T>
4351 struct VmaListItem
4352 {
4353  VmaListItem* pPrev;
4354  VmaListItem* pNext;
4355  T Value;
4356 };
4357 
4358 // Doubly linked list.
4359 template<typename T>
4360 class VmaRawList
4361 {
4362  VMA_CLASS_NO_COPY(VmaRawList)
4363 public:
4364  typedef VmaListItem<T> ItemType;
4365 
4366  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4367  ~VmaRawList();
4368  void Clear();
4369 
4370  size_t GetCount() const { return m_Count; }
4371  bool IsEmpty() const { return m_Count == 0; }
4372 
4373  ItemType* Front() { return m_pFront; }
4374  const ItemType* Front() const { return m_pFront; }
4375  ItemType* Back() { return m_pBack; }
4376  const ItemType* Back() const { return m_pBack; }
4377 
4378  ItemType* PushBack();
4379  ItemType* PushFront();
4380  ItemType* PushBack(const T& value);
4381  ItemType* PushFront(const T& value);
4382  void PopBack();
4383  void PopFront();
4384 
4385  // Item can be null - it means PushBack.
4386  ItemType* InsertBefore(ItemType* pItem);
4387  // Item can be null - it means PushFront.
4388  ItemType* InsertAfter(ItemType* pItem);
4389 
4390  ItemType* InsertBefore(ItemType* pItem, const T& value);
4391  ItemType* InsertAfter(ItemType* pItem, const T& value);
4392 
4393  void Remove(ItemType* pItem);
4394 
4395 private:
4396  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4397  VmaPoolAllocator<ItemType> m_ItemAllocator;
4398  ItemType* m_pFront;
4399  ItemType* m_pBack;
4400  size_t m_Count;
4401 };
4402 
4403 template<typename T>
4404 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4405  m_pAllocationCallbacks(pAllocationCallbacks),
4406  m_ItemAllocator(pAllocationCallbacks, 128),
4407  m_pFront(VMA_NULL),
4408  m_pBack(VMA_NULL),
4409  m_Count(0)
4410 {
4411 }
4412 
4413 template<typename T>
4414 VmaRawList<T>::~VmaRawList()
4415 {
4416  // Intentionally not calling Clear, because that would be unnecessary
4417  // computations to return all items to m_ItemAllocator as free.
4418 }
4419 
4420 template<typename T>
4421 void VmaRawList<T>::Clear()
4422 {
4423  if(IsEmpty() == false)
4424  {
4425  ItemType* pItem = m_pBack;
4426  while(pItem != VMA_NULL)
4427  {
4428  ItemType* const pPrevItem = pItem->pPrev;
4429  m_ItemAllocator.Free(pItem);
4430  pItem = pPrevItem;
4431  }
4432  m_pFront = VMA_NULL;
4433  m_pBack = VMA_NULL;
4434  m_Count = 0;
4435  }
4436 }
4437 
4438 template<typename T>
4439 VmaListItem<T>* VmaRawList<T>::PushBack()
4440 {
4441  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4442  pNewItem->pNext = VMA_NULL;
4443  if(IsEmpty())
4444  {
4445  pNewItem->pPrev = VMA_NULL;
4446  m_pFront = pNewItem;
4447  m_pBack = pNewItem;
4448  m_Count = 1;
4449  }
4450  else
4451  {
4452  pNewItem->pPrev = m_pBack;
4453  m_pBack->pNext = pNewItem;
4454  m_pBack = pNewItem;
4455  ++m_Count;
4456  }
4457  return pNewItem;
4458 }
4459 
4460 template<typename T>
4461 VmaListItem<T>* VmaRawList<T>::PushFront()
4462 {
4463  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4464  pNewItem->pPrev = VMA_NULL;
4465  if(IsEmpty())
4466  {
4467  pNewItem->pNext = VMA_NULL;
4468  m_pFront = pNewItem;
4469  m_pBack = pNewItem;
4470  m_Count = 1;
4471  }
4472  else
4473  {
4474  pNewItem->pNext = m_pFront;
4475  m_pFront->pPrev = pNewItem;
4476  m_pFront = pNewItem;
4477  ++m_Count;
4478  }
4479  return pNewItem;
4480 }
4481 
4482 template<typename T>
4483 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4484 {
4485  ItemType* const pNewItem = PushBack();
4486  pNewItem->Value = value;
4487  return pNewItem;
4488 }
4489 
4490 template<typename T>
4491 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4492 {
4493  ItemType* const pNewItem = PushFront();
4494  pNewItem->Value = value;
4495  return pNewItem;
4496 }
4497 
4498 template<typename T>
4499 void VmaRawList<T>::PopBack()
4500 {
4501  VMA_HEAVY_ASSERT(m_Count > 0);
4502  ItemType* const pBackItem = m_pBack;
4503  ItemType* const pPrevItem = pBackItem->pPrev;
4504  if(pPrevItem != VMA_NULL)
4505  {
4506  pPrevItem->pNext = VMA_NULL;
4507  }
4508  m_pBack = pPrevItem;
4509  m_ItemAllocator.Free(pBackItem);
4510  --m_Count;
4511 }
4512 
4513 template<typename T>
4514 void VmaRawList<T>::PopFront()
4515 {
4516  VMA_HEAVY_ASSERT(m_Count > 0);
4517  ItemType* const pFrontItem = m_pFront;
4518  ItemType* const pNextItem = pFrontItem->pNext;
4519  if(pNextItem != VMA_NULL)
4520  {
4521  pNextItem->pPrev = VMA_NULL;
4522  }
4523  m_pFront = pNextItem;
4524  m_ItemAllocator.Free(pFrontItem);
4525  --m_Count;
4526 }
4527 
4528 template<typename T>
4529 void VmaRawList<T>::Remove(ItemType* pItem)
4530 {
4531  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4532  VMA_HEAVY_ASSERT(m_Count > 0);
4533 
4534  if(pItem->pPrev != VMA_NULL)
4535  {
4536  pItem->pPrev->pNext = pItem->pNext;
4537  }
4538  else
4539  {
4540  VMA_HEAVY_ASSERT(m_pFront == pItem);
4541  m_pFront = pItem->pNext;
4542  }
4543 
4544  if(pItem->pNext != VMA_NULL)
4545  {
4546  pItem->pNext->pPrev = pItem->pPrev;
4547  }
4548  else
4549  {
4550  VMA_HEAVY_ASSERT(m_pBack == pItem);
4551  m_pBack = pItem->pPrev;
4552  }
4553 
4554  m_ItemAllocator.Free(pItem);
4555  --m_Count;
4556 }
4557 
4558 template<typename T>
4559 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4560 {
4561  if(pItem != VMA_NULL)
4562  {
4563  ItemType* const prevItem = pItem->pPrev;
4564  ItemType* const newItem = m_ItemAllocator.Alloc();
4565  newItem->pPrev = prevItem;
4566  newItem->pNext = pItem;
4567  pItem->pPrev = newItem;
4568  if(prevItem != VMA_NULL)
4569  {
4570  prevItem->pNext = newItem;
4571  }
4572  else
4573  {
4574  VMA_HEAVY_ASSERT(m_pFront == pItem);
4575  m_pFront = newItem;
4576  }
4577  ++m_Count;
4578  return newItem;
4579  }
4580  else
4581  return PushBack();
4582 }
4583 
4584 template<typename T>
4585 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4586 {
4587  if(pItem != VMA_NULL)
4588  {
4589  ItemType* const nextItem = pItem->pNext;
4590  ItemType* const newItem = m_ItemAllocator.Alloc();
4591  newItem->pNext = nextItem;
4592  newItem->pPrev = pItem;
4593  pItem->pNext = newItem;
4594  if(nextItem != VMA_NULL)
4595  {
4596  nextItem->pPrev = newItem;
4597  }
4598  else
4599  {
4600  VMA_HEAVY_ASSERT(m_pBack == pItem);
4601  m_pBack = newItem;
4602  }
4603  ++m_Count;
4604  return newItem;
4605  }
4606  else
4607  return PushFront();
4608 }
4609 
4610 template<typename T>
4611 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4612 {
4613  ItemType* const newItem = InsertBefore(pItem);
4614  newItem->Value = value;
4615  return newItem;
4616 }
4617 
4618 template<typename T>
4619 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4620 {
4621  ItemType* const newItem = InsertAfter(pItem);
4622  newItem->Value = value;
4623  return newItem;
4624 }
4625 
4626 template<typename T, typename AllocatorT>
4627 class VmaList
4628 {
4629  VMA_CLASS_NO_COPY(VmaList)
4630 public:
4631  class iterator
4632  {
4633  public:
4634  iterator() :
4635  m_pList(VMA_NULL),
4636  m_pItem(VMA_NULL)
4637  {
4638  }
4639 
4640  T& operator*() const
4641  {
4642  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4643  return m_pItem->Value;
4644  }
4645  T* operator->() const
4646  {
4647  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4648  return &m_pItem->Value;
4649  }
4650 
4651  iterator& operator++()
4652  {
4653  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4654  m_pItem = m_pItem->pNext;
4655  return *this;
4656  }
4657  iterator& operator--()
4658  {
4659  if(m_pItem != VMA_NULL)
4660  {
4661  m_pItem = m_pItem->pPrev;
4662  }
4663  else
4664  {
4665  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4666  m_pItem = m_pList->Back();
4667  }
4668  return *this;
4669  }
4670 
4671  iterator operator++(int)
4672  {
4673  iterator result = *this;
4674  ++*this;
4675  return result;
4676  }
4677  iterator operator--(int)
4678  {
4679  iterator result = *this;
4680  --*this;
4681  return result;
4682  }
4683 
4684  bool operator==(const iterator& rhs) const
4685  {
4686  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4687  return m_pItem == rhs.m_pItem;
4688  }
4689  bool operator!=(const iterator& rhs) const
4690  {
4691  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4692  return m_pItem != rhs.m_pItem;
4693  }
4694 
4695  private:
4696  VmaRawList<T>* m_pList;
4697  VmaListItem<T>* m_pItem;
4698 
4699  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4700  m_pList(pList),
4701  m_pItem(pItem)
4702  {
4703  }
4704 
4705  friend class VmaList<T, AllocatorT>;
4706  };
4707 
4708  class const_iterator
4709  {
4710  public:
4711  const_iterator() :
4712  m_pList(VMA_NULL),
4713  m_pItem(VMA_NULL)
4714  {
4715  }
4716 
4717  const_iterator(const iterator& src) :
4718  m_pList(src.m_pList),
4719  m_pItem(src.m_pItem)
4720  {
4721  }
4722 
4723  const T& operator*() const
4724  {
4725  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4726  return m_pItem->Value;
4727  }
4728  const T* operator->() const
4729  {
4730  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4731  return &m_pItem->Value;
4732  }
4733 
4734  const_iterator& operator++()
4735  {
4736  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4737  m_pItem = m_pItem->pNext;
4738  return *this;
4739  }
4740  const_iterator& operator--()
4741  {
4742  if(m_pItem != VMA_NULL)
4743  {
4744  m_pItem = m_pItem->pPrev;
4745  }
4746  else
4747  {
4748  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4749  m_pItem = m_pList->Back();
4750  }
4751  return *this;
4752  }
4753 
4754  const_iterator operator++(int)
4755  {
4756  const_iterator result = *this;
4757  ++*this;
4758  return result;
4759  }
4760  const_iterator operator--(int)
4761  {
4762  const_iterator result = *this;
4763  --*this;
4764  return result;
4765  }
4766 
4767  bool operator==(const const_iterator& rhs) const
4768  {
4769  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4770  return m_pItem == rhs.m_pItem;
4771  }
4772  bool operator!=(const const_iterator& rhs) const
4773  {
4774  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4775  return m_pItem != rhs.m_pItem;
4776  }
4777 
4778  private:
4779  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4780  m_pList(pList),
4781  m_pItem(pItem)
4782  {
4783  }
4784 
4785  const VmaRawList<T>* m_pList;
4786  const VmaListItem<T>* m_pItem;
4787 
4788  friend class VmaList<T, AllocatorT>;
4789  };
4790 
4791  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4792 
4793  bool empty() const { return m_RawList.IsEmpty(); }
4794  size_t size() const { return m_RawList.GetCount(); }
4795 
4796  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4797  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4798 
4799  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4800  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4801 
4802  void clear() { m_RawList.Clear(); }
4803  void push_back(const T& value) { m_RawList.PushBack(value); }
4804  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4805  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4806 
4807 private:
4808  VmaRawList<T> m_RawList;
4809 };
4810 
4811 #endif // #if VMA_USE_STL_LIST
4812 
4814 // class VmaMap
4815 
4816 // Unused in this version.
4817 #if 0
4818 
4819 #if VMA_USE_STL_UNORDERED_MAP
4820 
4821 #define VmaPair std::pair
4822 
4823 #define VMA_MAP_TYPE(KeyT, ValueT) \
4824  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4825 
4826 #else // #if VMA_USE_STL_UNORDERED_MAP
4827 
4828 template<typename T1, typename T2>
4829 struct VmaPair
4830 {
4831  T1 first;
4832  T2 second;
4833 
4834  VmaPair() : first(), second() { }
4835  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4836 };
4837 
4838 /* Class compatible with subset of interface of std::unordered_map.
4839 KeyT, ValueT must be POD because they will be stored in VmaVector.
4840 */
4841 template<typename KeyT, typename ValueT>
4842 class VmaMap
4843 {
4844 public:
4845  typedef VmaPair<KeyT, ValueT> PairType;
4846  typedef PairType* iterator;
4847 
4848  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4849 
4850  iterator begin() { return m_Vector.begin(); }
4851  iterator end() { return m_Vector.end(); }
4852 
4853  void insert(const PairType& pair);
4854  iterator find(const KeyT& key);
4855  void erase(iterator it);
4856 
4857 private:
4858  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4859 };
4860 
4861 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4862 
4863 template<typename FirstT, typename SecondT>
4864 struct VmaPairFirstLess
4865 {
4866  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4867  {
4868  return lhs.first < rhs.first;
4869  }
4870  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4871  {
4872  return lhs.first < rhsFirst;
4873  }
4874 };
4875 
4876 template<typename KeyT, typename ValueT>
4877 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4878 {
4879  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4880  m_Vector.data(),
4881  m_Vector.data() + m_Vector.size(),
4882  pair,
4883  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4884  VmaVectorInsert(m_Vector, indexToInsert, pair);
4885 }
4886 
4887 template<typename KeyT, typename ValueT>
4888 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4889 {
4890  PairType* it = VmaBinaryFindFirstNotLess(
4891  m_Vector.data(),
4892  m_Vector.data() + m_Vector.size(),
4893  key,
4894  VmaPairFirstLess<KeyT, ValueT>());
4895  if((it != m_Vector.end()) && (it->first == key))
4896  {
4897  return it;
4898  }
4899  else
4900  {
4901  return m_Vector.end();
4902  }
4903 }
4904 
4905 template<typename KeyT, typename ValueT>
4906 void VmaMap<KeyT, ValueT>::erase(iterator it)
4907 {
4908  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4909 }
4910 
4911 #endif // #if VMA_USE_STL_UNORDERED_MAP
4912 
4913 #endif // #if 0
4914 
4916 
4917 class VmaDeviceMemoryBlock;
4918 
4919 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4920 
4921 struct VmaAllocation_T
4922 {
4923 private:
4924  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4925 
4926  enum FLAGS
4927  {
4928  FLAG_USER_DATA_STRING = 0x01,
4929  };
4930 
4931 public:
4932  enum ALLOCATION_TYPE
4933  {
4934  ALLOCATION_TYPE_NONE,
4935  ALLOCATION_TYPE_BLOCK,
4936  ALLOCATION_TYPE_DEDICATED,
4937  };
4938 
4939  /*
4940  This struct cannot have constructor or destructor. It must be POD because it is
4941  allocated using VmaPoolAllocator.
4942  */
4943 
4944  void Ctor(uint32_t currentFrameIndex, bool userDataString)
4945  {
4946  m_Alignment = 1;
4947  m_Size = 0;
4948  m_pUserData = VMA_NULL;
4949  m_LastUseFrameIndex = currentFrameIndex;
4950  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
4951  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
4952  m_MapCount = 0;
4953  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
4954 
4955 #if VMA_STATS_STRING_ENABLED
4956  m_CreationFrameIndex = currentFrameIndex;
4957  m_BufferImageUsage = 0;
4958 #endif
4959  }
4960 
4961  void Dtor()
4962  {
4963  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4964 
4965  // Check if owned string was freed.
4966  VMA_ASSERT(m_pUserData == VMA_NULL);
4967  }
4968 
4969  void InitBlockAllocation(
4970  VmaDeviceMemoryBlock* block,
4971  VkDeviceSize offset,
4972  VkDeviceSize alignment,
4973  VkDeviceSize size,
4974  VmaSuballocationType suballocationType,
4975  bool mapped,
4976  bool canBecomeLost)
4977  {
4978  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4979  VMA_ASSERT(block != VMA_NULL);
4980  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4981  m_Alignment = alignment;
4982  m_Size = size;
4983  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4984  m_SuballocationType = (uint8_t)suballocationType;
4985  m_BlockAllocation.m_Block = block;
4986  m_BlockAllocation.m_Offset = offset;
4987  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4988  }
4989 
4990  void InitLost()
4991  {
4992  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4993  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4994  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4995  m_BlockAllocation.m_Block = VMA_NULL;
4996  m_BlockAllocation.m_Offset = 0;
4997  m_BlockAllocation.m_CanBecomeLost = true;
4998  }
4999 
5000  void ChangeBlockAllocation(
5001  VmaAllocator hAllocator,
5002  VmaDeviceMemoryBlock* block,
5003  VkDeviceSize offset);
5004 
5005  void ChangeSize(VkDeviceSize newSize);
5006  void ChangeOffset(VkDeviceSize newOffset);
5007 
5008  // pMappedData not null means allocation is created with MAPPED flag.
5009  void InitDedicatedAllocation(
5010  uint32_t memoryTypeIndex,
5011  VkDeviceMemory hMemory,
5012  VmaSuballocationType suballocationType,
5013  void* pMappedData,
5014  VkDeviceSize size)
5015  {
5016  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5017  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
5018  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
5019  m_Alignment = 0;
5020  m_Size = size;
5021  m_SuballocationType = (uint8_t)suballocationType;
5022  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5023  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
5024  m_DedicatedAllocation.m_hMemory = hMemory;
5025  m_DedicatedAllocation.m_pMappedData = pMappedData;
5026  }
5027 
5028  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5029  VkDeviceSize GetAlignment() const { return m_Alignment; }
5030  VkDeviceSize GetSize() const { return m_Size; }
5031  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5032  void* GetUserData() const { return m_pUserData; }
5033  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5034  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5035 
5036  VmaDeviceMemoryBlock* GetBlock() const
5037  {
5038  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5039  return m_BlockAllocation.m_Block;
5040  }
5041  VkDeviceSize GetOffset() const;
5042  VkDeviceMemory GetMemory() const;
5043  uint32_t GetMemoryTypeIndex() const;
5044  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5045  void* GetMappedData() const;
5046  bool CanBecomeLost() const;
5047 
5048  uint32_t GetLastUseFrameIndex() const
5049  {
5050  return m_LastUseFrameIndex.load();
5051  }
5052  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5053  {
5054  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5055  }
5056  /*
5057  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5058  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5059  - Else, returns false.
5060 
5061  If hAllocation is already lost, assert - you should not call it then.
5062  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5063  */
5064  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5065 
5066  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5067  {
5068  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5069  outInfo.blockCount = 1;
5070  outInfo.allocationCount = 1;
5071  outInfo.unusedRangeCount = 0;
5072  outInfo.usedBytes = m_Size;
5073  outInfo.unusedBytes = 0;
5074  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5075  outInfo.unusedRangeSizeMin = UINT64_MAX;
5076  outInfo.unusedRangeSizeMax = 0;
5077  }
5078 
5079  void BlockAllocMap();
5080  void BlockAllocUnmap();
5081  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5082  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5083 
5084 #if VMA_STATS_STRING_ENABLED
5085  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5086  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5087 
5088  void InitBufferImageUsage(uint32_t bufferImageUsage)
5089  {
5090  VMA_ASSERT(m_BufferImageUsage == 0);
5091  m_BufferImageUsage = bufferImageUsage;
5092  }
5093 
5094  void PrintParameters(class VmaJsonWriter& json) const;
5095 #endif
5096 
5097 private:
5098  VkDeviceSize m_Alignment;
5099  VkDeviceSize m_Size;
5100  void* m_pUserData;
5101  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5102  uint8_t m_Type; // ALLOCATION_TYPE
5103  uint8_t m_SuballocationType; // VmaSuballocationType
5104  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5105  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5106  uint8_t m_MapCount;
5107  uint8_t m_Flags; // enum FLAGS
5108 
5109  // Allocation out of VmaDeviceMemoryBlock.
5110  struct BlockAllocation
5111  {
5112  VmaDeviceMemoryBlock* m_Block;
5113  VkDeviceSize m_Offset;
5114  bool m_CanBecomeLost;
5115  };
5116 
5117  // Allocation for an object that has its own private VkDeviceMemory.
5118  struct DedicatedAllocation
5119  {
5120  uint32_t m_MemoryTypeIndex;
5121  VkDeviceMemory m_hMemory;
5122  void* m_pMappedData; // Not null means memory is mapped.
5123  };
5124 
5125  union
5126  {
5127  // Allocation out of VmaDeviceMemoryBlock.
5128  BlockAllocation m_BlockAllocation;
5129  // Allocation for an object that has its own private VkDeviceMemory.
5130  DedicatedAllocation m_DedicatedAllocation;
5131  };
5132 
5133 #if VMA_STATS_STRING_ENABLED
5134  uint32_t m_CreationFrameIndex;
5135  uint32_t m_BufferImageUsage; // 0 if unknown.
5136 #endif
5137 
5138  void FreeUserDataString(VmaAllocator hAllocator);
5139 };
5140 
5141 /*
5142 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5143 allocated memory block or free.
5144 */
5145 struct VmaSuballocation
5146 {
5147  VkDeviceSize offset;
5148  VkDeviceSize size;
5149  VmaAllocation hAllocation;
5150  VmaSuballocationType type;
5151 };
5152 
5153 // Comparator for offsets.
5154 struct VmaSuballocationOffsetLess
5155 {
5156  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5157  {
5158  return lhs.offset < rhs.offset;
5159  }
5160 };
5161 struct VmaSuballocationOffsetGreater
5162 {
5163  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5164  {
5165  return lhs.offset > rhs.offset;
5166  }
5167 };
5168 
5169 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5170 
5171 // Cost of one additional allocation lost, as equivalent in bytes.
5172 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5173 
5174 enum class VmaAllocationRequestType
5175 {
5176  Normal,
5177  // Used by "Linear" algorithm.
5178  UpperAddress,
5179  EndOf1st,
5180  EndOf2nd,
5181 };
5182 
5183 /*
5184 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5185 
5186 If canMakeOtherLost was false:
5187 - item points to a FREE suballocation.
5188 - itemsToMakeLostCount is 0.
5189 
5190 If canMakeOtherLost was true:
5191 - item points to first of sequence of suballocations, which are either FREE,
5192  or point to VmaAllocations that can become lost.
5193 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5194  the requested allocation to succeed.
5195 */
5196 struct VmaAllocationRequest
5197 {
5198  VkDeviceSize offset;
5199  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5200  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5201  VmaSuballocationList::iterator item;
5202  size_t itemsToMakeLostCount;
5203  void* customData;
5204  VmaAllocationRequestType type;
5205 
5206  VkDeviceSize CalcCost() const
5207  {
5208  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5209  }
5210 };
5211 
5212 /*
5213 Data structure used for bookkeeping of allocations and unused ranges of memory
5214 in a single VkDeviceMemory block.
5215 */
5216 class VmaBlockMetadata
5217 {
5218 public:
5219  VmaBlockMetadata(VmaAllocator hAllocator);
5220  virtual ~VmaBlockMetadata() { }
5221  virtual void Init(VkDeviceSize size) { m_Size = size; }
5222 
5223  // Validates all data structures inside this object. If not valid, returns false.
5224  virtual bool Validate() const = 0;
5225  VkDeviceSize GetSize() const { return m_Size; }
5226  virtual size_t GetAllocationCount() const = 0;
5227  virtual VkDeviceSize GetSumFreeSize() const = 0;
5228  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5229  // Returns true if this block is empty - contains only single free suballocation.
5230  virtual bool IsEmpty() const = 0;
5231 
5232  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5233  // Shouldn't modify blockCount.
5234  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5235 
5236 #if VMA_STATS_STRING_ENABLED
5237  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5238 #endif
5239 
5240  // Tries to find a place for suballocation with given parameters inside this block.
5241  // If succeeded, fills pAllocationRequest and returns true.
5242  // If failed, returns false.
5243  virtual bool CreateAllocationRequest(
5244  uint32_t currentFrameIndex,
5245  uint32_t frameInUseCount,
5246  VkDeviceSize bufferImageGranularity,
5247  VkDeviceSize allocSize,
5248  VkDeviceSize allocAlignment,
5249  bool upperAddress,
5250  VmaSuballocationType allocType,
5251  bool canMakeOtherLost,
5252  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5253  uint32_t strategy,
5254  VmaAllocationRequest* pAllocationRequest) = 0;
5255 
5256  virtual bool MakeRequestedAllocationsLost(
5257  uint32_t currentFrameIndex,
5258  uint32_t frameInUseCount,
5259  VmaAllocationRequest* pAllocationRequest) = 0;
5260 
5261  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5262 
5263  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5264 
5265  // Makes actual allocation based on request. Request must already be checked and valid.
5266  virtual void Alloc(
5267  const VmaAllocationRequest& request,
5268  VmaSuballocationType type,
5269  VkDeviceSize allocSize,
5270  VmaAllocation hAllocation) = 0;
5271 
5272  // Frees suballocation assigned to given memory region.
5273  virtual void Free(const VmaAllocation allocation) = 0;
5274  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5275 
5276  // Tries to resize (grow or shrink) space for given allocation, in place.
5277  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
5278 
5279 protected:
5280  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5281 
5282 #if VMA_STATS_STRING_ENABLED
5283  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5284  VkDeviceSize unusedBytes,
5285  size_t allocationCount,
5286  size_t unusedRangeCount) const;
5287  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5288  VkDeviceSize offset,
5289  VmaAllocation hAllocation) const;
5290  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5291  VkDeviceSize offset,
5292  VkDeviceSize size) const;
5293  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5294 #endif
5295 
5296 private:
5297  VkDeviceSize m_Size;
5298  const VkAllocationCallbacks* m_pAllocationCallbacks;
5299 };
5300 
5301 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5302  VMA_ASSERT(0 && "Validation failed: " #cond); \
5303  return false; \
5304  } } while(false)
5305 
5306 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5307 {
5308  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5309 public:
5310  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5311  virtual ~VmaBlockMetadata_Generic();
5312  virtual void Init(VkDeviceSize size);
5313 
5314  virtual bool Validate() const;
5315  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5316  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5317  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5318  virtual bool IsEmpty() const;
5319 
5320  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5321  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5322 
5323 #if VMA_STATS_STRING_ENABLED
5324  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5325 #endif
5326 
5327  virtual bool CreateAllocationRequest(
5328  uint32_t currentFrameIndex,
5329  uint32_t frameInUseCount,
5330  VkDeviceSize bufferImageGranularity,
5331  VkDeviceSize allocSize,
5332  VkDeviceSize allocAlignment,
5333  bool upperAddress,
5334  VmaSuballocationType allocType,
5335  bool canMakeOtherLost,
5336  uint32_t strategy,
5337  VmaAllocationRequest* pAllocationRequest);
5338 
5339  virtual bool MakeRequestedAllocationsLost(
5340  uint32_t currentFrameIndex,
5341  uint32_t frameInUseCount,
5342  VmaAllocationRequest* pAllocationRequest);
5343 
5344  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5345 
5346  virtual VkResult CheckCorruption(const void* pBlockData);
5347 
5348  virtual void Alloc(
5349  const VmaAllocationRequest& request,
5350  VmaSuballocationType type,
5351  VkDeviceSize allocSize,
5352  VmaAllocation hAllocation);
5353 
5354  virtual void Free(const VmaAllocation allocation);
5355  virtual void FreeAtOffset(VkDeviceSize offset);
5356 
5357  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
5358 
5360  // For defragmentation
5361 
5362  bool IsBufferImageGranularityConflictPossible(
5363  VkDeviceSize bufferImageGranularity,
5364  VmaSuballocationType& inOutPrevSuballocType) const;
5365 
5366 private:
5367  friend class VmaDefragmentationAlgorithm_Generic;
5368  friend class VmaDefragmentationAlgorithm_Fast;
5369 
5370  uint32_t m_FreeCount;
5371  VkDeviceSize m_SumFreeSize;
5372  VmaSuballocationList m_Suballocations;
5373  // Suballocations that are free and have size greater than certain threshold.
5374  // Sorted by size, ascending.
5375  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5376 
5377  bool ValidateFreeSuballocationList() const;
5378 
5379  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5380  // If yes, fills pOffset and returns true. If no, returns false.
5381  bool CheckAllocation(
5382  uint32_t currentFrameIndex,
5383  uint32_t frameInUseCount,
5384  VkDeviceSize bufferImageGranularity,
5385  VkDeviceSize allocSize,
5386  VkDeviceSize allocAlignment,
5387  VmaSuballocationType allocType,
5388  VmaSuballocationList::const_iterator suballocItem,
5389  bool canMakeOtherLost,
5390  VkDeviceSize* pOffset,
5391  size_t* itemsToMakeLostCount,
5392  VkDeviceSize* pSumFreeSize,
5393  VkDeviceSize* pSumItemSize) const;
5394  // Given free suballocation, it merges it with following one, which must also be free.
5395  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5396  // Releases given suballocation, making it free.
5397  // Merges it with adjacent free suballocations if applicable.
5398  // Returns iterator to new free suballocation at this place.
5399  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5400  // Given free suballocation, it inserts it into sorted list of
5401  // m_FreeSuballocationsBySize if it's suitable.
5402  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5403  // Given free suballocation, it removes it from sorted list of
5404  // m_FreeSuballocationsBySize if it's suitable.
5405  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5406 };
5407 
5408 /*
5409 Allocations and their references in internal data structure look like this:
5410 
5411 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5412 
5413  0 +-------+
5414  | |
5415  | |
5416  | |
5417  +-------+
5418  | Alloc | 1st[m_1stNullItemsBeginCount]
5419  +-------+
5420  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5421  +-------+
5422  | ... |
5423  +-------+
5424  | Alloc | 1st[1st.size() - 1]
5425  +-------+
5426  | |
5427  | |
5428  | |
5429 GetSize() +-------+
5430 
5431 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5432 
5433  0 +-------+
5434  | Alloc | 2nd[0]
5435  +-------+
5436  | Alloc | 2nd[1]
5437  +-------+
5438  | ... |
5439  +-------+
5440  | Alloc | 2nd[2nd.size() - 1]
5441  +-------+
5442  | |
5443  | |
5444  | |
5445  +-------+
5446  | Alloc | 1st[m_1stNullItemsBeginCount]
5447  +-------+
5448  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5449  +-------+
5450  | ... |
5451  +-------+
5452  | Alloc | 1st[1st.size() - 1]
5453  +-------+
5454  | |
5455 GetSize() +-------+
5456 
5457 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5458 
5459  0 +-------+
5460  | |
5461  | |
5462  | |
5463  +-------+
5464  | Alloc | 1st[m_1stNullItemsBeginCount]
5465  +-------+
5466  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5467  +-------+
5468  | ... |
5469  +-------+
5470  | Alloc | 1st[1st.size() - 1]
5471  +-------+
5472  | |
5473  | |
5474  | |
5475  +-------+
5476  | Alloc | 2nd[2nd.size() - 1]
5477  +-------+
5478  | ... |
5479  +-------+
5480  | Alloc | 2nd[1]
5481  +-------+
5482  | Alloc | 2nd[0]
5483 GetSize() +-------+
5484 
5485 */
5486 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5487 {
5488  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5489 public:
5490  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5491  virtual ~VmaBlockMetadata_Linear();
5492  virtual void Init(VkDeviceSize size);
5493 
5494  virtual bool Validate() const;
5495  virtual size_t GetAllocationCount() const;
5496  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5497  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5498  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5499 
5500  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5501  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5502 
5503 #if VMA_STATS_STRING_ENABLED
5504  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5505 #endif
5506 
5507  virtual bool CreateAllocationRequest(
5508  uint32_t currentFrameIndex,
5509  uint32_t frameInUseCount,
5510  VkDeviceSize bufferImageGranularity,
5511  VkDeviceSize allocSize,
5512  VkDeviceSize allocAlignment,
5513  bool upperAddress,
5514  VmaSuballocationType allocType,
5515  bool canMakeOtherLost,
5516  uint32_t strategy,
5517  VmaAllocationRequest* pAllocationRequest);
5518 
5519  virtual bool MakeRequestedAllocationsLost(
5520  uint32_t currentFrameIndex,
5521  uint32_t frameInUseCount,
5522  VmaAllocationRequest* pAllocationRequest);
5523 
5524  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5525 
5526  virtual VkResult CheckCorruption(const void* pBlockData);
5527 
5528  virtual void Alloc(
5529  const VmaAllocationRequest& request,
5530  VmaSuballocationType type,
5531  VkDeviceSize allocSize,
5532  VmaAllocation hAllocation);
5533 
5534  virtual void Free(const VmaAllocation allocation);
5535  virtual void FreeAtOffset(VkDeviceSize offset);
5536 
5537 private:
5538  /*
5539  There are two suballocation vectors, used in ping-pong way.
5540  The one with index m_1stVectorIndex is called 1st.
5541  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5542  2nd can be non-empty only when 1st is not empty.
5543  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5544  */
5545  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5546 
5547  enum SECOND_VECTOR_MODE
5548  {
5549  SECOND_VECTOR_EMPTY,
5550  /*
5551  Suballocations in 2nd vector are created later than the ones in 1st, but they
5552  all have smaller offset.
5553  */
5554  SECOND_VECTOR_RING_BUFFER,
5555  /*
5556  Suballocations in 2nd vector are upper side of double stack.
5557  They all have offsets higher than those in 1st vector.
5558  Top of this stack means smaller offsets, but higher indices in this vector.
5559  */
5560  SECOND_VECTOR_DOUBLE_STACK,
5561  };
5562 
5563  VkDeviceSize m_SumFreeSize;
5564  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5565  uint32_t m_1stVectorIndex;
5566  SECOND_VECTOR_MODE m_2ndVectorMode;
5567 
5568  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5569  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5570  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5571  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5572 
5573  // Number of items in 1st vector with hAllocation = null at the beginning.
5574  size_t m_1stNullItemsBeginCount;
5575  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5576  size_t m_1stNullItemsMiddleCount;
5577  // Number of items in 2nd vector with hAllocation = null.
5578  size_t m_2ndNullItemsCount;
5579 
5580  bool ShouldCompact1st() const;
5581  void CleanupAfterFree();
5582 
5583  bool CreateAllocationRequest_LowerAddress(
5584  uint32_t currentFrameIndex,
5585  uint32_t frameInUseCount,
5586  VkDeviceSize bufferImageGranularity,
5587  VkDeviceSize allocSize,
5588  VkDeviceSize allocAlignment,
5589  VmaSuballocationType allocType,
5590  bool canMakeOtherLost,
5591  uint32_t strategy,
5592  VmaAllocationRequest* pAllocationRequest);
5593  bool CreateAllocationRequest_UpperAddress(
5594  uint32_t currentFrameIndex,
5595  uint32_t frameInUseCount,
5596  VkDeviceSize bufferImageGranularity,
5597  VkDeviceSize allocSize,
5598  VkDeviceSize allocAlignment,
5599  VmaSuballocationType allocType,
5600  bool canMakeOtherLost,
5601  uint32_t strategy,
5602  VmaAllocationRequest* pAllocationRequest);
5603 };
5604 
5605 /*
5606 - GetSize() is the original size of allocated memory block.
5607 - m_UsableSize is this size aligned down to a power of two.
5608  All allocations and calculations happen relative to m_UsableSize.
5609 - GetUnusableSize() is the difference between them.
5610  It is repoted as separate, unused range, not available for allocations.
5611 
5612 Node at level 0 has size = m_UsableSize.
5613 Each next level contains nodes with size 2 times smaller than current level.
5614 m_LevelCount is the maximum number of levels to use in the current object.
5615 */
5616 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5617 {
5618  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5619 public:
5620  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5621  virtual ~VmaBlockMetadata_Buddy();
5622  virtual void Init(VkDeviceSize size);
5623 
5624  virtual bool Validate() const;
5625  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5626  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5627  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5628  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5629 
5630  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5631  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5632 
5633 #if VMA_STATS_STRING_ENABLED
5634  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5635 #endif
5636 
5637  virtual bool CreateAllocationRequest(
5638  uint32_t currentFrameIndex,
5639  uint32_t frameInUseCount,
5640  VkDeviceSize bufferImageGranularity,
5641  VkDeviceSize allocSize,
5642  VkDeviceSize allocAlignment,
5643  bool upperAddress,
5644  VmaSuballocationType allocType,
5645  bool canMakeOtherLost,
5646  uint32_t strategy,
5647  VmaAllocationRequest* pAllocationRequest);
5648 
5649  virtual bool MakeRequestedAllocationsLost(
5650  uint32_t currentFrameIndex,
5651  uint32_t frameInUseCount,
5652  VmaAllocationRequest* pAllocationRequest);
5653 
5654  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5655 
5656  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5657 
5658  virtual void Alloc(
5659  const VmaAllocationRequest& request,
5660  VmaSuballocationType type,
5661  VkDeviceSize allocSize,
5662  VmaAllocation hAllocation);
5663 
5664  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5665  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5666 
5667 private:
5668  static const VkDeviceSize MIN_NODE_SIZE = 32;
5669  static const size_t MAX_LEVELS = 30;
5670 
5671  struct ValidationContext
5672  {
5673  size_t calculatedAllocationCount;
5674  size_t calculatedFreeCount;
5675  VkDeviceSize calculatedSumFreeSize;
5676 
5677  ValidationContext() :
5678  calculatedAllocationCount(0),
5679  calculatedFreeCount(0),
5680  calculatedSumFreeSize(0) { }
5681  };
5682 
5683  struct Node
5684  {
5685  VkDeviceSize offset;
5686  enum TYPE
5687  {
5688  TYPE_FREE,
5689  TYPE_ALLOCATION,
5690  TYPE_SPLIT,
5691  TYPE_COUNT
5692  } type;
5693  Node* parent;
5694  Node* buddy;
5695 
5696  union
5697  {
5698  struct
5699  {
5700  Node* prev;
5701  Node* next;
5702  } free;
5703  struct
5704  {
5705  VmaAllocation alloc;
5706  } allocation;
5707  struct
5708  {
5709  Node* leftChild;
5710  } split;
5711  };
5712  };
5713 
5714  // Size of the memory block aligned down to a power of two.
5715  VkDeviceSize m_UsableSize;
5716  uint32_t m_LevelCount;
5717 
5718  Node* m_Root;
5719  struct {
5720  Node* front;
5721  Node* back;
5722  } m_FreeList[MAX_LEVELS];
5723  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5724  size_t m_AllocationCount;
5725  // Number of nodes in the tree with type == TYPE_FREE.
5726  size_t m_FreeCount;
5727  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5728  VkDeviceSize m_SumFreeSize;
5729 
5730  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5731  void DeleteNode(Node* node);
5732  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5733  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5734  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5735  // Alloc passed just for validation. Can be null.
5736  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5737  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5738  // Adds node to the front of FreeList at given level.
5739  // node->type must be FREE.
5740  // node->free.prev, next can be undefined.
5741  void AddToFreeListFront(uint32_t level, Node* node);
5742  // Removes node from FreeList at given level.
5743  // node->type must be FREE.
5744  // node->free.prev, next stay untouched.
5745  void RemoveFromFreeList(uint32_t level, Node* node);
5746 
5747 #if VMA_STATS_STRING_ENABLED
5748  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5749 #endif
5750 };
5751 
5752 /*
5753 Represents a single block of device memory (`VkDeviceMemory`) with all the
5754 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5755 
5756 Thread-safety: This class must be externally synchronized.
5757 */
5758 class VmaDeviceMemoryBlock
5759 {
5760  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5761 public:
5762  VmaBlockMetadata* m_pMetadata;
5763 
5764  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5765 
5766  ~VmaDeviceMemoryBlock()
5767  {
5768  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5769  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5770  }
5771 
5772  // Always call after construction.
5773  void Init(
5774  VmaAllocator hAllocator,
5775  VmaPool hParentPool,
5776  uint32_t newMemoryTypeIndex,
5777  VkDeviceMemory newMemory,
5778  VkDeviceSize newSize,
5779  uint32_t id,
5780  uint32_t algorithm);
5781  // Always call before destruction.
5782  void Destroy(VmaAllocator allocator);
5783 
5784  VmaPool GetParentPool() const { return m_hParentPool; }
5785  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5786  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5787  uint32_t GetId() const { return m_Id; }
5788  void* GetMappedData() const { return m_pMappedData; }
5789 
5790  // Validates all data structures inside this object. If not valid, returns false.
5791  bool Validate() const;
5792 
5793  VkResult CheckCorruption(VmaAllocator hAllocator);
5794 
5795  // ppData can be null.
5796  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5797  void Unmap(VmaAllocator hAllocator, uint32_t count);
5798 
5799  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5800  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5801 
5802  VkResult BindBufferMemory(
5803  const VmaAllocator hAllocator,
5804  const VmaAllocation hAllocation,
5805  VkBuffer hBuffer);
5806  VkResult BindImageMemory(
5807  const VmaAllocator hAllocator,
5808  const VmaAllocation hAllocation,
5809  VkImage hImage);
5810 
5811 private:
5812  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
5813  uint32_t m_MemoryTypeIndex;
5814  uint32_t m_Id;
5815  VkDeviceMemory m_hMemory;
5816 
5817  /*
5818  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5819  Also protects m_MapCount, m_pMappedData.
5820  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5821  */
5822  VMA_MUTEX m_Mutex;
5823  uint32_t m_MapCount;
5824  void* m_pMappedData;
5825 };
5826 
5827 struct VmaPointerLess
5828 {
5829  bool operator()(const void* lhs, const void* rhs) const
5830  {
5831  return lhs < rhs;
5832  }
5833 };
5834 
5835 struct VmaDefragmentationMove
5836 {
5837  size_t srcBlockIndex;
5838  size_t dstBlockIndex;
5839  VkDeviceSize srcOffset;
5840  VkDeviceSize dstOffset;
5841  VkDeviceSize size;
5842 };
5843 
5844 class VmaDefragmentationAlgorithm;
5845 
5846 /*
5847 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5848 Vulkan memory type.
5849 
5850 Synchronized internally with a mutex.
5851 */
5852 struct VmaBlockVector
5853 {
5854  VMA_CLASS_NO_COPY(VmaBlockVector)
5855 public:
5856  VmaBlockVector(
5857  VmaAllocator hAllocator,
5858  VmaPool hParentPool,
5859  uint32_t memoryTypeIndex,
5860  VkDeviceSize preferredBlockSize,
5861  size_t minBlockCount,
5862  size_t maxBlockCount,
5863  VkDeviceSize bufferImageGranularity,
5864  uint32_t frameInUseCount,
5865  bool isCustomPool,
5866  bool explicitBlockSize,
5867  uint32_t algorithm);
5868  ~VmaBlockVector();
5869 
5870  VkResult CreateMinBlocks();
5871 
5872  VmaPool GetParentPool() const { return m_hParentPool; }
5873  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5874  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5875  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5876  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5877  uint32_t GetAlgorithm() const { return m_Algorithm; }
5878 
5879  void GetPoolStats(VmaPoolStats* pStats);
5880 
5881  bool IsEmpty() const { return m_Blocks.empty(); }
5882  bool IsCorruptionDetectionEnabled() const;
5883 
5884  VkResult Allocate(
5885  uint32_t currentFrameIndex,
5886  VkDeviceSize size,
5887  VkDeviceSize alignment,
5888  const VmaAllocationCreateInfo& createInfo,
5889  VmaSuballocationType suballocType,
5890  size_t allocationCount,
5891  VmaAllocation* pAllocations);
5892 
5893  void Free(
5894  VmaAllocation hAllocation);
5895 
5896  // Adds statistics of this BlockVector to pStats.
5897  void AddStats(VmaStats* pStats);
5898 
5899 #if VMA_STATS_STRING_ENABLED
5900  void PrintDetailedMap(class VmaJsonWriter& json);
5901 #endif
5902 
5903  void MakePoolAllocationsLost(
5904  uint32_t currentFrameIndex,
5905  size_t* pLostAllocationCount);
5906  VkResult CheckCorruption();
5907 
5908  // Saves results in pCtx->res.
5909  void Defragment(
5910  class VmaBlockVectorDefragmentationContext* pCtx,
5911  VmaDefragmentationStats* pStats,
5912  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
5913  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
5914  VkCommandBuffer commandBuffer);
5915  void DefragmentationEnd(
5916  class VmaBlockVectorDefragmentationContext* pCtx,
5917  VmaDefragmentationStats* pStats);
5918 
5920  // To be used only while the m_Mutex is locked. Used during defragmentation.
5921 
5922  size_t GetBlockCount() const { return m_Blocks.size(); }
5923  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
5924  size_t CalcAllocationCount() const;
5925  bool IsBufferImageGranularityConflictPossible() const;
5926 
5927 private:
5928  friend class VmaDefragmentationAlgorithm_Generic;
5929 
5930  const VmaAllocator m_hAllocator;
5931  const VmaPool m_hParentPool;
5932  const uint32_t m_MemoryTypeIndex;
5933  const VkDeviceSize m_PreferredBlockSize;
5934  const size_t m_MinBlockCount;
5935  const size_t m_MaxBlockCount;
5936  const VkDeviceSize m_BufferImageGranularity;
5937  const uint32_t m_FrameInUseCount;
5938  const bool m_IsCustomPool;
5939  const bool m_ExplicitBlockSize;
5940  const uint32_t m_Algorithm;
5941  /* There can be at most one allocation that is completely empty - a
5942  hysteresis to avoid pessimistic case of alternating creation and destruction
5943  of a VkDeviceMemory. */
5944  bool m_HasEmptyBlock;
5945  VMA_RW_MUTEX m_Mutex;
5946  // Incrementally sorted by sumFreeSize, ascending.
5947  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5948  uint32_t m_NextBlockId;
5949 
5950  VkDeviceSize CalcMaxBlockSize() const;
5951 
5952  // Finds and removes given block from vector.
5953  void Remove(VmaDeviceMemoryBlock* pBlock);
5954 
5955  // Performs single step in sorting m_Blocks. They may not be fully sorted
5956  // after this call.
5957  void IncrementallySortBlocks();
5958 
5959  VkResult AllocatePage(
5960  uint32_t currentFrameIndex,
5961  VkDeviceSize size,
5962  VkDeviceSize alignment,
5963  const VmaAllocationCreateInfo& createInfo,
5964  VmaSuballocationType suballocType,
5965  VmaAllocation* pAllocation);
5966 
5967  // To be used only without CAN_MAKE_OTHER_LOST flag.
5968  VkResult AllocateFromBlock(
5969  VmaDeviceMemoryBlock* pBlock,
5970  uint32_t currentFrameIndex,
5971  VkDeviceSize size,
5972  VkDeviceSize alignment,
5973  VmaAllocationCreateFlags allocFlags,
5974  void* pUserData,
5975  VmaSuballocationType suballocType,
5976  uint32_t strategy,
5977  VmaAllocation* pAllocation);
5978 
5979  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5980 
5981  // Saves result to pCtx->res.
5982  void ApplyDefragmentationMovesCpu(
5983  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5984  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
5985  // Saves result to pCtx->res.
5986  void ApplyDefragmentationMovesGpu(
5987  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5988  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5989  VkCommandBuffer commandBuffer);
5990 
5991  /*
5992  Used during defragmentation. pDefragmentationStats is optional. It's in/out
5993  - updated with new data.
5994  */
5995  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
5996 };
5997 
5998 struct VmaPool_T
5999 {
6000  VMA_CLASS_NO_COPY(VmaPool_T)
6001 public:
6002  VmaBlockVector m_BlockVector;
6003 
6004  VmaPool_T(
6005  VmaAllocator hAllocator,
6006  const VmaPoolCreateInfo& createInfo,
6007  VkDeviceSize preferredBlockSize);
6008  ~VmaPool_T();
6009 
6010  uint32_t GetId() const { return m_Id; }
6011  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
6012 
6013 #if VMA_STATS_STRING_ENABLED
6014  //void PrintDetailedMap(class VmaStringBuilder& sb);
6015 #endif
6016 
6017 private:
6018  uint32_t m_Id;
6019 };
6020 
6021 /*
6022 Performs defragmentation:
6023 
6024 - Updates `pBlockVector->m_pMetadata`.
6025 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
6026 - Does not move actual data, only returns requested moves as `moves`.
6027 */
6028 class VmaDefragmentationAlgorithm
6029 {
6030  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
6031 public:
6032  VmaDefragmentationAlgorithm(
6033  VmaAllocator hAllocator,
6034  VmaBlockVector* pBlockVector,
6035  uint32_t currentFrameIndex) :
6036  m_hAllocator(hAllocator),
6037  m_pBlockVector(pBlockVector),
6038  m_CurrentFrameIndex(currentFrameIndex)
6039  {
6040  }
6041  virtual ~VmaDefragmentationAlgorithm()
6042  {
6043  }
6044 
6045  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
6046  virtual void AddAll() = 0;
6047 
6048  virtual VkResult Defragment(
6049  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6050  VkDeviceSize maxBytesToMove,
6051  uint32_t maxAllocationsToMove) = 0;
6052 
6053  virtual VkDeviceSize GetBytesMoved() const = 0;
6054  virtual uint32_t GetAllocationsMoved() const = 0;
6055 
6056 protected:
6057  VmaAllocator const m_hAllocator;
6058  VmaBlockVector* const m_pBlockVector;
6059  const uint32_t m_CurrentFrameIndex;
6060 
6061  struct AllocationInfo
6062  {
6063  VmaAllocation m_hAllocation;
6064  VkBool32* m_pChanged;
6065 
6066  AllocationInfo() :
6067  m_hAllocation(VK_NULL_HANDLE),
6068  m_pChanged(VMA_NULL)
6069  {
6070  }
6071  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6072  m_hAllocation(hAlloc),
6073  m_pChanged(pChanged)
6074  {
6075  }
6076  };
6077 };
6078 
6079 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6080 {
6081  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6082 public:
6083  VmaDefragmentationAlgorithm_Generic(
6084  VmaAllocator hAllocator,
6085  VmaBlockVector* pBlockVector,
6086  uint32_t currentFrameIndex,
6087  bool overlappingMoveSupported);
6088  virtual ~VmaDefragmentationAlgorithm_Generic();
6089 
6090  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6091  virtual void AddAll() { m_AllAllocations = true; }
6092 
6093  virtual VkResult Defragment(
6094  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6095  VkDeviceSize maxBytesToMove,
6096  uint32_t maxAllocationsToMove);
6097 
6098  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6099  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6100 
6101 private:
6102  uint32_t m_AllocationCount;
6103  bool m_AllAllocations;
6104 
6105  VkDeviceSize m_BytesMoved;
6106  uint32_t m_AllocationsMoved;
6107 
6108  struct AllocationInfoSizeGreater
6109  {
6110  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6111  {
6112  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6113  }
6114  };
6115 
6116  struct AllocationInfoOffsetGreater
6117  {
6118  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6119  {
6120  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6121  }
6122  };
6123 
6124  struct BlockInfo
6125  {
6126  size_t m_OriginalBlockIndex;
6127  VmaDeviceMemoryBlock* m_pBlock;
6128  bool m_HasNonMovableAllocations;
6129  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6130 
6131  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6132  m_OriginalBlockIndex(SIZE_MAX),
6133  m_pBlock(VMA_NULL),
6134  m_HasNonMovableAllocations(true),
6135  m_Allocations(pAllocationCallbacks)
6136  {
6137  }
6138 
6139  void CalcHasNonMovableAllocations()
6140  {
6141  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6142  const size_t defragmentAllocCount = m_Allocations.size();
6143  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6144  }
6145 
6146  void SortAllocationsBySizeDescending()
6147  {
6148  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6149  }
6150 
6151  void SortAllocationsByOffsetDescending()
6152  {
6153  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6154  }
6155  };
6156 
6157  struct BlockPointerLess
6158  {
6159  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6160  {
6161  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6162  }
6163  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6164  {
6165  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6166  }
6167  };
6168 
6169  // 1. Blocks with some non-movable allocations go first.
6170  // 2. Blocks with smaller sumFreeSize go first.
6171  struct BlockInfoCompareMoveDestination
6172  {
6173  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6174  {
6175  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6176  {
6177  return true;
6178  }
6179  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6180  {
6181  return false;
6182  }
6183  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6184  {
6185  return true;
6186  }
6187  return false;
6188  }
6189  };
6190 
6191  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6192  BlockInfoVector m_Blocks;
6193 
6194  VkResult DefragmentRound(
6195  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6196  VkDeviceSize maxBytesToMove,
6197  uint32_t maxAllocationsToMove);
6198 
6199  size_t CalcBlocksWithNonMovableCount() const;
6200 
6201  static bool MoveMakesSense(
6202  size_t dstBlockIndex, VkDeviceSize dstOffset,
6203  size_t srcBlockIndex, VkDeviceSize srcOffset);
6204 };
6205 
6206 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6207 {
6208  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6209 public:
6210  VmaDefragmentationAlgorithm_Fast(
6211  VmaAllocator hAllocator,
6212  VmaBlockVector* pBlockVector,
6213  uint32_t currentFrameIndex,
6214  bool overlappingMoveSupported);
6215  virtual ~VmaDefragmentationAlgorithm_Fast();
6216 
6217  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6218  virtual void AddAll() { m_AllAllocations = true; }
6219 
6220  virtual VkResult Defragment(
6221  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6222  VkDeviceSize maxBytesToMove,
6223  uint32_t maxAllocationsToMove);
6224 
6225  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6226  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6227 
6228 private:
6229  struct BlockInfo
6230  {
6231  size_t origBlockIndex;
6232  };
6233 
6234  class FreeSpaceDatabase
6235  {
6236  public:
6237  FreeSpaceDatabase()
6238  {
6239  FreeSpace s = {};
6240  s.blockInfoIndex = SIZE_MAX;
6241  for(size_t i = 0; i < MAX_COUNT; ++i)
6242  {
6243  m_FreeSpaces[i] = s;
6244  }
6245  }
6246 
6247  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6248  {
6249  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6250  {
6251  return;
6252  }
6253 
6254  // Find first invalid or the smallest structure.
6255  size_t bestIndex = SIZE_MAX;
6256  for(size_t i = 0; i < MAX_COUNT; ++i)
6257  {
6258  // Empty structure.
6259  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6260  {
6261  bestIndex = i;
6262  break;
6263  }
6264  if(m_FreeSpaces[i].size < size &&
6265  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6266  {
6267  bestIndex = i;
6268  }
6269  }
6270 
6271  if(bestIndex != SIZE_MAX)
6272  {
6273  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6274  m_FreeSpaces[bestIndex].offset = offset;
6275  m_FreeSpaces[bestIndex].size = size;
6276  }
6277  }
6278 
6279  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6280  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6281  {
6282  size_t bestIndex = SIZE_MAX;
6283  VkDeviceSize bestFreeSpaceAfter = 0;
6284  for(size_t i = 0; i < MAX_COUNT; ++i)
6285  {
6286  // Structure is valid.
6287  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6288  {
6289  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6290  // Allocation fits into this structure.
6291  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6292  {
6293  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6294  (dstOffset + size);
6295  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6296  {
6297  bestIndex = i;
6298  bestFreeSpaceAfter = freeSpaceAfter;
6299  }
6300  }
6301  }
6302  }
6303 
6304  if(bestIndex != SIZE_MAX)
6305  {
6306  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6307  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6308 
6309  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6310  {
6311  // Leave this structure for remaining empty space.
6312  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6313  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6314  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6315  }
6316  else
6317  {
6318  // This structure becomes invalid.
6319  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6320  }
6321 
6322  return true;
6323  }
6324 
6325  return false;
6326  }
6327 
6328  private:
6329  static const size_t MAX_COUNT = 4;
6330 
6331  struct FreeSpace
6332  {
6333  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6334  VkDeviceSize offset;
6335  VkDeviceSize size;
6336  } m_FreeSpaces[MAX_COUNT];
6337  };
6338 
6339  const bool m_OverlappingMoveSupported;
6340 
6341  uint32_t m_AllocationCount;
6342  bool m_AllAllocations;
6343 
6344  VkDeviceSize m_BytesMoved;
6345  uint32_t m_AllocationsMoved;
6346 
6347  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6348 
6349  void PreprocessMetadata();
6350  void PostprocessMetadata();
6351  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6352 };
6353 
6354 struct VmaBlockDefragmentationContext
6355 {
6356  enum BLOCK_FLAG
6357  {
6358  BLOCK_FLAG_USED = 0x00000001,
6359  };
6360  uint32_t flags;
6361  VkBuffer hBuffer;
6362 
6363  VmaBlockDefragmentationContext() :
6364  flags(0),
6365  hBuffer(VK_NULL_HANDLE)
6366  {
6367  }
6368 };
6369 
6370 class VmaBlockVectorDefragmentationContext
6371 {
6372  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6373 public:
6374  VkResult res;
6375  bool mutexLocked;
6376  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6377 
6378  VmaBlockVectorDefragmentationContext(
6379  VmaAllocator hAllocator,
6380  VmaPool hCustomPool, // Optional.
6381  VmaBlockVector* pBlockVector,
6382  uint32_t currFrameIndex,
6383  uint32_t flags);
6384  ~VmaBlockVectorDefragmentationContext();
6385 
6386  VmaPool GetCustomPool() const { return m_hCustomPool; }
6387  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6388  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6389 
6390  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6391  void AddAll() { m_AllAllocations = true; }
6392 
6393  void Begin(bool overlappingMoveSupported);
6394 
6395 private:
6396  const VmaAllocator m_hAllocator;
6397  // Null if not from custom pool.
6398  const VmaPool m_hCustomPool;
6399  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6400  VmaBlockVector* const m_pBlockVector;
6401  const uint32_t m_CurrFrameIndex;
6402  const uint32_t m_AlgorithmFlags;
6403  // Owner of this object.
6404  VmaDefragmentationAlgorithm* m_pAlgorithm;
6405 
6406  struct AllocInfo
6407  {
6408  VmaAllocation hAlloc;
6409  VkBool32* pChanged;
6410  };
6411  // Used between constructor and Begin.
6412  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6413  bool m_AllAllocations;
6414 };
6415 
6416 struct VmaDefragmentationContext_T
6417 {
6418 private:
6419  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6420 public:
6421  VmaDefragmentationContext_T(
6422  VmaAllocator hAllocator,
6423  uint32_t currFrameIndex,
6424  uint32_t flags,
6425  VmaDefragmentationStats* pStats);
6426  ~VmaDefragmentationContext_T();
6427 
6428  void AddPools(uint32_t poolCount, VmaPool* pPools);
6429  void AddAllocations(
6430  uint32_t allocationCount,
6431  VmaAllocation* pAllocations,
6432  VkBool32* pAllocationsChanged);
6433 
6434  /*
6435  Returns:
6436  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6437  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6438  - Negative value if error occured and object can be destroyed immediately.
6439  */
6440  VkResult Defragment(
6441  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6442  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6443  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6444 
6445 private:
6446  const VmaAllocator m_hAllocator;
6447  const uint32_t m_CurrFrameIndex;
6448  const uint32_t m_Flags;
6449  VmaDefragmentationStats* const m_pStats;
6450  // Owner of these objects.
6451  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6452  // Owner of these objects.
6453  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6454 };
6455 
6456 #if VMA_RECORDING_ENABLED
6457 
6458 class VmaRecorder
6459 {
6460 public:
6461  VmaRecorder();
6462  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6463  void WriteConfiguration(
6464  const VkPhysicalDeviceProperties& devProps,
6465  const VkPhysicalDeviceMemoryProperties& memProps,
6466  bool dedicatedAllocationExtensionEnabled);
6467  ~VmaRecorder();
6468 
6469  void RecordCreateAllocator(uint32_t frameIndex);
6470  void RecordDestroyAllocator(uint32_t frameIndex);
6471  void RecordCreatePool(uint32_t frameIndex,
6472  const VmaPoolCreateInfo& createInfo,
6473  VmaPool pool);
6474  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6475  void RecordAllocateMemory(uint32_t frameIndex,
6476  const VkMemoryRequirements& vkMemReq,
6477  const VmaAllocationCreateInfo& createInfo,
6478  VmaAllocation allocation);
6479  void RecordAllocateMemoryPages(uint32_t frameIndex,
6480  const VkMemoryRequirements& vkMemReq,
6481  const VmaAllocationCreateInfo& createInfo,
6482  uint64_t allocationCount,
6483  const VmaAllocation* pAllocations);
6484  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6485  const VkMemoryRequirements& vkMemReq,
6486  bool requiresDedicatedAllocation,
6487  bool prefersDedicatedAllocation,
6488  const VmaAllocationCreateInfo& createInfo,
6489  VmaAllocation allocation);
6490  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6491  const VkMemoryRequirements& vkMemReq,
6492  bool requiresDedicatedAllocation,
6493  bool prefersDedicatedAllocation,
6494  const VmaAllocationCreateInfo& createInfo,
6495  VmaAllocation allocation);
6496  void RecordFreeMemory(uint32_t frameIndex,
6497  VmaAllocation allocation);
6498  void RecordFreeMemoryPages(uint32_t frameIndex,
6499  uint64_t allocationCount,
6500  const VmaAllocation* pAllocations);
6501  void RecordResizeAllocation(
6502  uint32_t frameIndex,
6503  VmaAllocation allocation,
6504  VkDeviceSize newSize);
6505  void RecordSetAllocationUserData(uint32_t frameIndex,
6506  VmaAllocation allocation,
6507  const void* pUserData);
6508  void RecordCreateLostAllocation(uint32_t frameIndex,
6509  VmaAllocation allocation);
6510  void RecordMapMemory(uint32_t frameIndex,
6511  VmaAllocation allocation);
6512  void RecordUnmapMemory(uint32_t frameIndex,
6513  VmaAllocation allocation);
6514  void RecordFlushAllocation(uint32_t frameIndex,
6515  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6516  void RecordInvalidateAllocation(uint32_t frameIndex,
6517  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6518  void RecordCreateBuffer(uint32_t frameIndex,
6519  const VkBufferCreateInfo& bufCreateInfo,
6520  const VmaAllocationCreateInfo& allocCreateInfo,
6521  VmaAllocation allocation);
6522  void RecordCreateImage(uint32_t frameIndex,
6523  const VkImageCreateInfo& imageCreateInfo,
6524  const VmaAllocationCreateInfo& allocCreateInfo,
6525  VmaAllocation allocation);
6526  void RecordDestroyBuffer(uint32_t frameIndex,
6527  VmaAllocation allocation);
6528  void RecordDestroyImage(uint32_t frameIndex,
6529  VmaAllocation allocation);
6530  void RecordTouchAllocation(uint32_t frameIndex,
6531  VmaAllocation allocation);
6532  void RecordGetAllocationInfo(uint32_t frameIndex,
6533  VmaAllocation allocation);
6534  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6535  VmaPool pool);
6536  void RecordDefragmentationBegin(uint32_t frameIndex,
6537  const VmaDefragmentationInfo2& info,
6539  void RecordDefragmentationEnd(uint32_t frameIndex,
6541 
6542 private:
6543  struct CallParams
6544  {
6545  uint32_t threadId;
6546  double time;
6547  };
6548 
6549  class UserDataString
6550  {
6551  public:
6552  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6553  const char* GetString() const { return m_Str; }
6554 
6555  private:
6556  char m_PtrStr[17];
6557  const char* m_Str;
6558  };
6559 
6560  bool m_UseMutex;
6561  VmaRecordFlags m_Flags;
6562  FILE* m_File;
6563  VMA_MUTEX m_FileMutex;
6564  int64_t m_Freq;
6565  int64_t m_StartCounter;
6566 
6567  void GetBasicParams(CallParams& outParams);
6568 
6569  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6570  template<typename T>
6571  void PrintPointerList(uint64_t count, const T* pItems)
6572  {
6573  if(count)
6574  {
6575  fprintf(m_File, "%p", pItems[0]);
6576  for(uint64_t i = 1; i < count; ++i)
6577  {
6578  fprintf(m_File, " %p", pItems[i]);
6579  }
6580  }
6581  }
6582 
6583  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6584  void Flush();
6585 };
6586 
6587 #endif // #if VMA_RECORDING_ENABLED
6588 
6589 /*
6590 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
6591 */
6592 class VmaAllocationObjectAllocator
6593 {
6594  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
6595 public:
6596  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
6597 
6598  VmaAllocation Allocate();
6599  void Free(VmaAllocation hAlloc);
6600 
6601 private:
6602  VMA_MUTEX m_Mutex;
6603  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
6604 };
6605 
6606 // Main allocator object.
6607 struct VmaAllocator_T
6608 {
6609  VMA_CLASS_NO_COPY(VmaAllocator_T)
6610 public:
6611  bool m_UseMutex;
6612  bool m_UseKhrDedicatedAllocation;
6613  VkDevice m_hDevice;
6614  bool m_AllocationCallbacksSpecified;
6615  VkAllocationCallbacks m_AllocationCallbacks;
6616  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6617  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
6618 
6619  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
6620  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6621  VMA_MUTEX m_HeapSizeLimitMutex;
6622 
6623  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6624  VkPhysicalDeviceMemoryProperties m_MemProps;
6625 
6626  // Default pools.
6627  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6628 
6629  // Each vector is sorted by memory (handle value).
6630  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6631  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6632  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6633 
6634  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6635  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6636  ~VmaAllocator_T();
6637 
6638  const VkAllocationCallbacks* GetAllocationCallbacks() const
6639  {
6640  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6641  }
6642  const VmaVulkanFunctions& GetVulkanFunctions() const
6643  {
6644  return m_VulkanFunctions;
6645  }
6646 
6647  VkDeviceSize GetBufferImageGranularity() const
6648  {
6649  return VMA_MAX(
6650  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6651  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6652  }
6653 
6654  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
6655  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6656 
6657  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6658  {
6659  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6660  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6661  }
6662  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
6663  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6664  {
6665  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6666  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6667  }
6668  // Minimum alignment for all allocations in specific memory type.
6669  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6670  {
6671  return IsMemoryTypeNonCoherent(memTypeIndex) ?
6672  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6673  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6674  }
6675 
6676  bool IsIntegratedGpu() const
6677  {
6678  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6679  }
6680 
6681 #if VMA_RECORDING_ENABLED
6682  VmaRecorder* GetRecorder() const { return m_pRecorder; }
6683 #endif
6684 
6685  void GetBufferMemoryRequirements(
6686  VkBuffer hBuffer,
6687  VkMemoryRequirements& memReq,
6688  bool& requiresDedicatedAllocation,
6689  bool& prefersDedicatedAllocation) const;
6690  void GetImageMemoryRequirements(
6691  VkImage hImage,
6692  VkMemoryRequirements& memReq,
6693  bool& requiresDedicatedAllocation,
6694  bool& prefersDedicatedAllocation) const;
6695 
6696  // Main allocation function.
6697  VkResult AllocateMemory(
6698  const VkMemoryRequirements& vkMemReq,
6699  bool requiresDedicatedAllocation,
6700  bool prefersDedicatedAllocation,
6701  VkBuffer dedicatedBuffer,
6702  VkImage dedicatedImage,
6703  const VmaAllocationCreateInfo& createInfo,
6704  VmaSuballocationType suballocType,
6705  size_t allocationCount,
6706  VmaAllocation* pAllocations);
6707 
6708  // Main deallocation function.
6709  void FreeMemory(
6710  size_t allocationCount,
6711  const VmaAllocation* pAllocations);
6712 
6713  VkResult ResizeAllocation(
6714  const VmaAllocation alloc,
6715  VkDeviceSize newSize);
6716 
6717  void CalculateStats(VmaStats* pStats);
6718 
6719 #if VMA_STATS_STRING_ENABLED
6720  void PrintDetailedMap(class VmaJsonWriter& json);
6721 #endif
6722 
6723  VkResult DefragmentationBegin(
6724  const VmaDefragmentationInfo2& info,
6725  VmaDefragmentationStats* pStats,
6726  VmaDefragmentationContext* pContext);
6727  VkResult DefragmentationEnd(
6728  VmaDefragmentationContext context);
6729 
6730  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
6731  bool TouchAllocation(VmaAllocation hAllocation);
6732 
6733  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
6734  void DestroyPool(VmaPool pool);
6735  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
6736 
6737  void SetCurrentFrameIndex(uint32_t frameIndex);
6738  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6739 
6740  void MakePoolAllocationsLost(
6741  VmaPool hPool,
6742  size_t* pLostAllocationCount);
6743  VkResult CheckPoolCorruption(VmaPool hPool);
6744  VkResult CheckCorruption(uint32_t memoryTypeBits);
6745 
6746  void CreateLostAllocation(VmaAllocation* pAllocation);
6747 
6748  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6749  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6750 
6751  VkResult Map(VmaAllocation hAllocation, void** ppData);
6752  void Unmap(VmaAllocation hAllocation);
6753 
6754  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
6755  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
6756 
6757  void FlushOrInvalidateAllocation(
6758  VmaAllocation hAllocation,
6759  VkDeviceSize offset, VkDeviceSize size,
6760  VMA_CACHE_OPERATION op);
6761 
6762  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
6763 
6764 private:
6765  VkDeviceSize m_PreferredLargeHeapBlockSize;
6766 
6767  VkPhysicalDevice m_PhysicalDevice;
6768  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6769 
6770  VMA_RW_MUTEX m_PoolsMutex;
6771  // Protected by m_PoolsMutex. Sorted by pointer value.
6772  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6773  uint32_t m_NextPoolId;
6774 
6775  VmaVulkanFunctions m_VulkanFunctions;
6776 
6777 #if VMA_RECORDING_ENABLED
6778  VmaRecorder* m_pRecorder;
6779 #endif
6780 
6781  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
6782 
6783  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6784 
6785  VkResult AllocateMemoryOfType(
6786  VkDeviceSize size,
6787  VkDeviceSize alignment,
6788  bool dedicatedAllocation,
6789  VkBuffer dedicatedBuffer,
6790  VkImage dedicatedImage,
6791  const VmaAllocationCreateInfo& createInfo,
6792  uint32_t memTypeIndex,
6793  VmaSuballocationType suballocType,
6794  size_t allocationCount,
6795  VmaAllocation* pAllocations);
6796 
6797  // Helper function only to be used inside AllocateDedicatedMemory.
6798  VkResult AllocateDedicatedMemoryPage(
6799  VkDeviceSize size,
6800  VmaSuballocationType suballocType,
6801  uint32_t memTypeIndex,
6802  const VkMemoryAllocateInfo& allocInfo,
6803  bool map,
6804  bool isUserDataString,
6805  void* pUserData,
6806  VmaAllocation* pAllocation);
6807 
6808  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
6809  VkResult AllocateDedicatedMemory(
6810  VkDeviceSize size,
6811  VmaSuballocationType suballocType,
6812  uint32_t memTypeIndex,
6813  bool map,
6814  bool isUserDataString,
6815  void* pUserData,
6816  VkBuffer dedicatedBuffer,
6817  VkImage dedicatedImage,
6818  size_t allocationCount,
6819  VmaAllocation* pAllocations);
6820 
6821  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
6822  void FreeDedicatedMemory(VmaAllocation allocation);
6823 };
6824 
6826 // Memory allocation #2 after VmaAllocator_T definition
6827 
6828 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
6829 {
6830  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6831 }
6832 
6833 static void VmaFree(VmaAllocator hAllocator, void* ptr)
6834 {
6835  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6836 }
6837 
6838 template<typename T>
6839 static T* VmaAllocate(VmaAllocator hAllocator)
6840 {
6841  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
6842 }
6843 
6844 template<typename T>
6845 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
6846 {
6847  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
6848 }
6849 
6850 template<typename T>
6851 static void vma_delete(VmaAllocator hAllocator, T* ptr)
6852 {
6853  if(ptr != VMA_NULL)
6854  {
6855  ptr->~T();
6856  VmaFree(hAllocator, ptr);
6857  }
6858 }
6859 
6860 template<typename T>
6861 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6862 {
6863  if(ptr != VMA_NULL)
6864  {
6865  for(size_t i = count; i--; )
6866  ptr[i].~T();
6867  VmaFree(hAllocator, ptr);
6868  }
6869 }
6870 
6872 // VmaStringBuilder
6873 
6874 #if VMA_STATS_STRING_ENABLED
6875 
6876 class VmaStringBuilder
6877 {
6878 public:
6879  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
6880  size_t GetLength() const { return m_Data.size(); }
6881  const char* GetData() const { return m_Data.data(); }
6882 
6883  void Add(char ch) { m_Data.push_back(ch); }
6884  void Add(const char* pStr);
6885  void AddNewLine() { Add('\n'); }
6886  void AddNumber(uint32_t num);
6887  void AddNumber(uint64_t num);
6888  void AddPointer(const void* ptr);
6889 
6890 private:
6891  VmaVector< char, VmaStlAllocator<char> > m_Data;
6892 };
6893 
6894 void VmaStringBuilder::Add(const char* pStr)
6895 {
6896  const size_t strLen = strlen(pStr);
6897  if(strLen > 0)
6898  {
6899  const size_t oldCount = m_Data.size();
6900  m_Data.resize(oldCount + strLen);
6901  memcpy(m_Data.data() + oldCount, pStr, strLen);
6902  }
6903 }
6904 
6905 void VmaStringBuilder::AddNumber(uint32_t num)
6906 {
6907  char buf[11];
6908  VmaUint32ToStr(buf, sizeof(buf), num);
6909  Add(buf);
6910 }
6911 
6912 void VmaStringBuilder::AddNumber(uint64_t num)
6913 {
6914  char buf[21];
6915  VmaUint64ToStr(buf, sizeof(buf), num);
6916  Add(buf);
6917 }
6918 
6919 void VmaStringBuilder::AddPointer(const void* ptr)
6920 {
6921  char buf[21];
6922  VmaPtrToStr(buf, sizeof(buf), ptr);
6923  Add(buf);
6924 }
6925 
6926 #endif // #if VMA_STATS_STRING_ENABLED
6927 
6929 // VmaJsonWriter
6930 
6931 #if VMA_STATS_STRING_ENABLED
6932 
6933 class VmaJsonWriter
6934 {
6935  VMA_CLASS_NO_COPY(VmaJsonWriter)
6936 public:
6937  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6938  ~VmaJsonWriter();
6939 
6940  void BeginObject(bool singleLine = false);
6941  void EndObject();
6942 
6943  void BeginArray(bool singleLine = false);
6944  void EndArray();
6945 
6946  void WriteString(const char* pStr);
6947  void BeginString(const char* pStr = VMA_NULL);
6948  void ContinueString(const char* pStr);
6949  void ContinueString(uint32_t n);
6950  void ContinueString(uint64_t n);
6951  void ContinueString_Pointer(const void* ptr);
6952  void EndString(const char* pStr = VMA_NULL);
6953 
6954  void WriteNumber(uint32_t n);
6955  void WriteNumber(uint64_t n);
6956  void WriteBool(bool b);
6957  void WriteNull();
6958 
6959 private:
6960  static const char* const INDENT;
6961 
6962  enum COLLECTION_TYPE
6963  {
6964  COLLECTION_TYPE_OBJECT,
6965  COLLECTION_TYPE_ARRAY,
6966  };
6967  struct StackItem
6968  {
6969  COLLECTION_TYPE type;
6970  uint32_t valueCount;
6971  bool singleLineMode;
6972  };
6973 
6974  VmaStringBuilder& m_SB;
6975  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6976  bool m_InsideString;
6977 
6978  void BeginValue(bool isString);
6979  void WriteIndent(bool oneLess = false);
6980 };
6981 
6982 const char* const VmaJsonWriter::INDENT = " ";
6983 
6984 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6985  m_SB(sb),
6986  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6987  m_InsideString(false)
6988 {
6989 }
6990 
6991 VmaJsonWriter::~VmaJsonWriter()
6992 {
6993  VMA_ASSERT(!m_InsideString);
6994  VMA_ASSERT(m_Stack.empty());
6995 }
6996 
6997 void VmaJsonWriter::BeginObject(bool singleLine)
6998 {
6999  VMA_ASSERT(!m_InsideString);
7000 
7001  BeginValue(false);
7002  m_SB.Add('{');
7003 
7004  StackItem item;
7005  item.type = COLLECTION_TYPE_OBJECT;
7006  item.valueCount = 0;
7007  item.singleLineMode = singleLine;
7008  m_Stack.push_back(item);
7009 }
7010 
7011 void VmaJsonWriter::EndObject()
7012 {
7013  VMA_ASSERT(!m_InsideString);
7014 
7015  WriteIndent(true);
7016  m_SB.Add('}');
7017 
7018  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
7019  m_Stack.pop_back();
7020 }
7021 
7022 void VmaJsonWriter::BeginArray(bool singleLine)
7023 {
7024  VMA_ASSERT(!m_InsideString);
7025 
7026  BeginValue(false);
7027  m_SB.Add('[');
7028 
7029  StackItem item;
7030  item.type = COLLECTION_TYPE_ARRAY;
7031  item.valueCount = 0;
7032  item.singleLineMode = singleLine;
7033  m_Stack.push_back(item);
7034 }
7035 
7036 void VmaJsonWriter::EndArray()
7037 {
7038  VMA_ASSERT(!m_InsideString);
7039 
7040  WriteIndent(true);
7041  m_SB.Add(']');
7042 
7043  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
7044  m_Stack.pop_back();
7045 }
7046 
7047 void VmaJsonWriter::WriteString(const char* pStr)
7048 {
7049  BeginString(pStr);
7050  EndString();
7051 }
7052 
7053 void VmaJsonWriter::BeginString(const char* pStr)
7054 {
7055  VMA_ASSERT(!m_InsideString);
7056 
7057  BeginValue(true);
7058  m_SB.Add('"');
7059  m_InsideString = true;
7060  if(pStr != VMA_NULL && pStr[0] != '\0')
7061  {
7062  ContinueString(pStr);
7063  }
7064 }
7065 
7066 void VmaJsonWriter::ContinueString(const char* pStr)
7067 {
7068  VMA_ASSERT(m_InsideString);
7069 
7070  const size_t strLen = strlen(pStr);
7071  for(size_t i = 0; i < strLen; ++i)
7072  {
7073  char ch = pStr[i];
7074  if(ch == '\\')
7075  {
7076  m_SB.Add("\\\\");
7077  }
7078  else if(ch == '"')
7079  {
7080  m_SB.Add("\\\"");
7081  }
7082  else if(ch >= 32)
7083  {
7084  m_SB.Add(ch);
7085  }
7086  else switch(ch)
7087  {
7088  case '\b':
7089  m_SB.Add("\\b");
7090  break;
7091  case '\f':
7092  m_SB.Add("\\f");
7093  break;
7094  case '\n':
7095  m_SB.Add("\\n");
7096  break;
7097  case '\r':
7098  m_SB.Add("\\r");
7099  break;
7100  case '\t':
7101  m_SB.Add("\\t");
7102  break;
7103  default:
7104  VMA_ASSERT(0 && "Character not currently supported.");
7105  break;
7106  }
7107  }
7108 }
7109 
7110 void VmaJsonWriter::ContinueString(uint32_t n)
7111 {
7112  VMA_ASSERT(m_InsideString);
7113  m_SB.AddNumber(n);
7114 }
7115 
7116 void VmaJsonWriter::ContinueString(uint64_t n)
7117 {
7118  VMA_ASSERT(m_InsideString);
7119  m_SB.AddNumber(n);
7120 }
7121 
7122 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7123 {
7124  VMA_ASSERT(m_InsideString);
7125  m_SB.AddPointer(ptr);
7126 }
7127 
7128 void VmaJsonWriter::EndString(const char* pStr)
7129 {
7130  VMA_ASSERT(m_InsideString);
7131  if(pStr != VMA_NULL && pStr[0] != '\0')
7132  {
7133  ContinueString(pStr);
7134  }
7135  m_SB.Add('"');
7136  m_InsideString = false;
7137 }
7138 
7139 void VmaJsonWriter::WriteNumber(uint32_t n)
7140 {
7141  VMA_ASSERT(!m_InsideString);
7142  BeginValue(false);
7143  m_SB.AddNumber(n);
7144 }
7145 
7146 void VmaJsonWriter::WriteNumber(uint64_t n)
7147 {
7148  VMA_ASSERT(!m_InsideString);
7149  BeginValue(false);
7150  m_SB.AddNumber(n);
7151 }
7152 
7153 void VmaJsonWriter::WriteBool(bool b)
7154 {
7155  VMA_ASSERT(!m_InsideString);
7156  BeginValue(false);
7157  m_SB.Add(b ? "true" : "false");
7158 }
7159 
7160 void VmaJsonWriter::WriteNull()
7161 {
7162  VMA_ASSERT(!m_InsideString);
7163  BeginValue(false);
7164  m_SB.Add("null");
7165 }
7166 
7167 void VmaJsonWriter::BeginValue(bool isString)
7168 {
7169  if(!m_Stack.empty())
7170  {
7171  StackItem& currItem = m_Stack.back();
7172  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7173  currItem.valueCount % 2 == 0)
7174  {
7175  VMA_ASSERT(isString);
7176  }
7177 
7178  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7179  currItem.valueCount % 2 != 0)
7180  {
7181  m_SB.Add(": ");
7182  }
7183  else if(currItem.valueCount > 0)
7184  {
7185  m_SB.Add(", ");
7186  WriteIndent();
7187  }
7188  else
7189  {
7190  WriteIndent();
7191  }
7192  ++currItem.valueCount;
7193  }
7194 }
7195 
7196 void VmaJsonWriter::WriteIndent(bool oneLess)
7197 {
7198  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7199  {
7200  m_SB.AddNewLine();
7201 
7202  size_t count = m_Stack.size();
7203  if(count > 0 && oneLess)
7204  {
7205  --count;
7206  }
7207  for(size_t i = 0; i < count; ++i)
7208  {
7209  m_SB.Add(INDENT);
7210  }
7211  }
7212 }
7213 
7214 #endif // #if VMA_STATS_STRING_ENABLED
7215 
7217 
7218 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7219 {
7220  if(IsUserDataString())
7221  {
7222  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7223 
7224  FreeUserDataString(hAllocator);
7225 
7226  if(pUserData != VMA_NULL)
7227  {
7228  const char* const newStrSrc = (char*)pUserData;
7229  const size_t newStrLen = strlen(newStrSrc);
7230  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
7231  memcpy(newStrDst, newStrSrc, newStrLen + 1);
7232  m_pUserData = newStrDst;
7233  }
7234  }
7235  else
7236  {
7237  m_pUserData = pUserData;
7238  }
7239 }
7240 
7241 void VmaAllocation_T::ChangeBlockAllocation(
7242  VmaAllocator hAllocator,
7243  VmaDeviceMemoryBlock* block,
7244  VkDeviceSize offset)
7245 {
7246  VMA_ASSERT(block != VMA_NULL);
7247  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7248 
7249  // Move mapping reference counter from old block to new block.
7250  if(block != m_BlockAllocation.m_Block)
7251  {
7252  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7253  if(IsPersistentMap())
7254  ++mapRefCount;
7255  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7256  block->Map(hAllocator, mapRefCount, VMA_NULL);
7257  }
7258 
7259  m_BlockAllocation.m_Block = block;
7260  m_BlockAllocation.m_Offset = offset;
7261 }
7262 
7263 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
7264 {
7265  VMA_ASSERT(newSize > 0);
7266  m_Size = newSize;
7267 }
7268 
7269 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7270 {
7271  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7272  m_BlockAllocation.m_Offset = newOffset;
7273 }
7274 
7275 VkDeviceSize VmaAllocation_T::GetOffset() const
7276 {
7277  switch(m_Type)
7278  {
7279  case ALLOCATION_TYPE_BLOCK:
7280  return m_BlockAllocation.m_Offset;
7281  case ALLOCATION_TYPE_DEDICATED:
7282  return 0;
7283  default:
7284  VMA_ASSERT(0);
7285  return 0;
7286  }
7287 }
7288 
7289 VkDeviceMemory VmaAllocation_T::GetMemory() const
7290 {
7291  switch(m_Type)
7292  {
7293  case ALLOCATION_TYPE_BLOCK:
7294  return m_BlockAllocation.m_Block->GetDeviceMemory();
7295  case ALLOCATION_TYPE_DEDICATED:
7296  return m_DedicatedAllocation.m_hMemory;
7297  default:
7298  VMA_ASSERT(0);
7299  return VK_NULL_HANDLE;
7300  }
7301 }
7302 
7303 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7304 {
7305  switch(m_Type)
7306  {
7307  case ALLOCATION_TYPE_BLOCK:
7308  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7309  case ALLOCATION_TYPE_DEDICATED:
7310  return m_DedicatedAllocation.m_MemoryTypeIndex;
7311  default:
7312  VMA_ASSERT(0);
7313  return UINT32_MAX;
7314  }
7315 }
7316 
7317 void* VmaAllocation_T::GetMappedData() const
7318 {
7319  switch(m_Type)
7320  {
7321  case ALLOCATION_TYPE_BLOCK:
7322  if(m_MapCount != 0)
7323  {
7324  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7325  VMA_ASSERT(pBlockData != VMA_NULL);
7326  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7327  }
7328  else
7329  {
7330  return VMA_NULL;
7331  }
7332  break;
7333  case ALLOCATION_TYPE_DEDICATED:
7334  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7335  return m_DedicatedAllocation.m_pMappedData;
7336  default:
7337  VMA_ASSERT(0);
7338  return VMA_NULL;
7339  }
7340 }
7341 
7342 bool VmaAllocation_T::CanBecomeLost() const
7343 {
7344  switch(m_Type)
7345  {
7346  case ALLOCATION_TYPE_BLOCK:
7347  return m_BlockAllocation.m_CanBecomeLost;
7348  case ALLOCATION_TYPE_DEDICATED:
7349  return false;
7350  default:
7351  VMA_ASSERT(0);
7352  return false;
7353  }
7354 }
7355 
7356 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7357 {
7358  VMA_ASSERT(CanBecomeLost());
7359 
7360  /*
7361  Warning: This is a carefully designed algorithm.
7362  Do not modify unless you really know what you're doing :)
7363  */
7364  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7365  for(;;)
7366  {
7367  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7368  {
7369  VMA_ASSERT(0);
7370  return false;
7371  }
7372  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7373  {
7374  return false;
7375  }
7376  else // Last use time earlier than current time.
7377  {
7378  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7379  {
7380  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7381  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7382  return true;
7383  }
7384  }
7385  }
7386 }
7387 
7388 #if VMA_STATS_STRING_ENABLED
7389 
7390 // Correspond to values of enum VmaSuballocationType.
7391 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7392  "FREE",
7393  "UNKNOWN",
7394  "BUFFER",
7395  "IMAGE_UNKNOWN",
7396  "IMAGE_LINEAR",
7397  "IMAGE_OPTIMAL",
7398 };
7399 
7400 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7401 {
7402  json.WriteString("Type");
7403  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7404 
7405  json.WriteString("Size");
7406  json.WriteNumber(m_Size);
7407 
7408  if(m_pUserData != VMA_NULL)
7409  {
7410  json.WriteString("UserData");
7411  if(IsUserDataString())
7412  {
7413  json.WriteString((const char*)m_pUserData);
7414  }
7415  else
7416  {
7417  json.BeginString();
7418  json.ContinueString_Pointer(m_pUserData);
7419  json.EndString();
7420  }
7421  }
7422 
7423  json.WriteString("CreationFrameIndex");
7424  json.WriteNumber(m_CreationFrameIndex);
7425 
7426  json.WriteString("LastUseFrameIndex");
7427  json.WriteNumber(GetLastUseFrameIndex());
7428 
7429  if(m_BufferImageUsage != 0)
7430  {
7431  json.WriteString("Usage");
7432  json.WriteNumber(m_BufferImageUsage);
7433  }
7434 }
7435 
7436 #endif
7437 
7438 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7439 {
7440  VMA_ASSERT(IsUserDataString());
7441  if(m_pUserData != VMA_NULL)
7442  {
7443  char* const oldStr = (char*)m_pUserData;
7444  const size_t oldStrLen = strlen(oldStr);
7445  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7446  m_pUserData = VMA_NULL;
7447  }
7448 }
7449 
7450 void VmaAllocation_T::BlockAllocMap()
7451 {
7452  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7453 
7454  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7455  {
7456  ++m_MapCount;
7457  }
7458  else
7459  {
7460  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7461  }
7462 }
7463 
7464 void VmaAllocation_T::BlockAllocUnmap()
7465 {
7466  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7467 
7468  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7469  {
7470  --m_MapCount;
7471  }
7472  else
7473  {
7474  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7475  }
7476 }
7477 
7478 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7479 {
7480  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7481 
7482  if(m_MapCount != 0)
7483  {
7484  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7485  {
7486  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7487  *ppData = m_DedicatedAllocation.m_pMappedData;
7488  ++m_MapCount;
7489  return VK_SUCCESS;
7490  }
7491  else
7492  {
7493  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7494  return VK_ERROR_MEMORY_MAP_FAILED;
7495  }
7496  }
7497  else
7498  {
7499  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7500  hAllocator->m_hDevice,
7501  m_DedicatedAllocation.m_hMemory,
7502  0, // offset
7503  VK_WHOLE_SIZE,
7504  0, // flags
7505  ppData);
7506  if(result == VK_SUCCESS)
7507  {
7508  m_DedicatedAllocation.m_pMappedData = *ppData;
7509  m_MapCount = 1;
7510  }
7511  return result;
7512  }
7513 }
7514 
7515 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7516 {
7517  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7518 
7519  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7520  {
7521  --m_MapCount;
7522  if(m_MapCount == 0)
7523  {
7524  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7525  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7526  hAllocator->m_hDevice,
7527  m_DedicatedAllocation.m_hMemory);
7528  }
7529  }
7530  else
7531  {
7532  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7533  }
7534 }
7535 
7536 #if VMA_STATS_STRING_ENABLED
7537 
7538 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7539 {
7540  json.BeginObject();
7541 
7542  json.WriteString("Blocks");
7543  json.WriteNumber(stat.blockCount);
7544 
7545  json.WriteString("Allocations");
7546  json.WriteNumber(stat.allocationCount);
7547 
7548  json.WriteString("UnusedRanges");
7549  json.WriteNumber(stat.unusedRangeCount);
7550 
7551  json.WriteString("UsedBytes");
7552  json.WriteNumber(stat.usedBytes);
7553 
7554  json.WriteString("UnusedBytes");
7555  json.WriteNumber(stat.unusedBytes);
7556 
7557  if(stat.allocationCount > 1)
7558  {
7559  json.WriteString("AllocationSize");
7560  json.BeginObject(true);
7561  json.WriteString("Min");
7562  json.WriteNumber(stat.allocationSizeMin);
7563  json.WriteString("Avg");
7564  json.WriteNumber(stat.allocationSizeAvg);
7565  json.WriteString("Max");
7566  json.WriteNumber(stat.allocationSizeMax);
7567  json.EndObject();
7568  }
7569 
7570  if(stat.unusedRangeCount > 1)
7571  {
7572  json.WriteString("UnusedRangeSize");
7573  json.BeginObject(true);
7574  json.WriteString("Min");
7575  json.WriteNumber(stat.unusedRangeSizeMin);
7576  json.WriteString("Avg");
7577  json.WriteNumber(stat.unusedRangeSizeAvg);
7578  json.WriteString("Max");
7579  json.WriteNumber(stat.unusedRangeSizeMax);
7580  json.EndObject();
7581  }
7582 
7583  json.EndObject();
7584 }
7585 
7586 #endif // #if VMA_STATS_STRING_ENABLED
7587 
7588 struct VmaSuballocationItemSizeLess
7589 {
7590  bool operator()(
7591  const VmaSuballocationList::iterator lhs,
7592  const VmaSuballocationList::iterator rhs) const
7593  {
7594  return lhs->size < rhs->size;
7595  }
7596  bool operator()(
7597  const VmaSuballocationList::iterator lhs,
7598  VkDeviceSize rhsSize) const
7599  {
7600  return lhs->size < rhsSize;
7601  }
7602 };
7603 
7604 
7606 // class VmaBlockMetadata
7607 
7608 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7609  m_Size(0),
7610  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7611 {
7612 }
7613 
7614 #if VMA_STATS_STRING_ENABLED
7615 
7616 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7617  VkDeviceSize unusedBytes,
7618  size_t allocationCount,
7619  size_t unusedRangeCount) const
7620 {
7621  json.BeginObject();
7622 
7623  json.WriteString("TotalBytes");
7624  json.WriteNumber(GetSize());
7625 
7626  json.WriteString("UnusedBytes");
7627  json.WriteNumber(unusedBytes);
7628 
7629  json.WriteString("Allocations");
7630  json.WriteNumber((uint64_t)allocationCount);
7631 
7632  json.WriteString("UnusedRanges");
7633  json.WriteNumber((uint64_t)unusedRangeCount);
7634 
7635  json.WriteString("Suballocations");
7636  json.BeginArray();
7637 }
7638 
7639 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
7640  VkDeviceSize offset,
7641  VmaAllocation hAllocation) const
7642 {
7643  json.BeginObject(true);
7644 
7645  json.WriteString("Offset");
7646  json.WriteNumber(offset);
7647 
7648  hAllocation->PrintParameters(json);
7649 
7650  json.EndObject();
7651 }
7652 
7653 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
7654  VkDeviceSize offset,
7655  VkDeviceSize size) const
7656 {
7657  json.BeginObject(true);
7658 
7659  json.WriteString("Offset");
7660  json.WriteNumber(offset);
7661 
7662  json.WriteString("Type");
7663  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7664 
7665  json.WriteString("Size");
7666  json.WriteNumber(size);
7667 
7668  json.EndObject();
7669 }
7670 
7671 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
7672 {
7673  json.EndArray();
7674  json.EndObject();
7675 }
7676 
7677 #endif // #if VMA_STATS_STRING_ENABLED
7678 
7680 // class VmaBlockMetadata_Generic
7681 
7682 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
7683  VmaBlockMetadata(hAllocator),
7684  m_FreeCount(0),
7685  m_SumFreeSize(0),
7686  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7687  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7688 {
7689 }
7690 
7691 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7692 {
7693 }
7694 
7695 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7696 {
7697  VmaBlockMetadata::Init(size);
7698 
7699  m_FreeCount = 1;
7700  m_SumFreeSize = size;
7701 
7702  VmaSuballocation suballoc = {};
7703  suballoc.offset = 0;
7704  suballoc.size = size;
7705  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7706  suballoc.hAllocation = VK_NULL_HANDLE;
7707 
7708  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7709  m_Suballocations.push_back(suballoc);
7710  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7711  --suballocItem;
7712  m_FreeSuballocationsBySize.push_back(suballocItem);
7713 }
7714 
7715 bool VmaBlockMetadata_Generic::Validate() const
7716 {
7717  VMA_VALIDATE(!m_Suballocations.empty());
7718 
7719  // Expected offset of new suballocation as calculated from previous ones.
7720  VkDeviceSize calculatedOffset = 0;
7721  // Expected number of free suballocations as calculated from traversing their list.
7722  uint32_t calculatedFreeCount = 0;
7723  // Expected sum size of free suballocations as calculated from traversing their list.
7724  VkDeviceSize calculatedSumFreeSize = 0;
7725  // Expected number of free suballocations that should be registered in
7726  // m_FreeSuballocationsBySize calculated from traversing their list.
7727  size_t freeSuballocationsToRegister = 0;
7728  // True if previous visited suballocation was free.
7729  bool prevFree = false;
7730 
7731  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7732  suballocItem != m_Suballocations.cend();
7733  ++suballocItem)
7734  {
7735  const VmaSuballocation& subAlloc = *suballocItem;
7736 
7737  // Actual offset of this suballocation doesn't match expected one.
7738  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7739 
7740  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7741  // Two adjacent free suballocations are invalid. They should be merged.
7742  VMA_VALIDATE(!prevFree || !currFree);
7743 
7744  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7745 
7746  if(currFree)
7747  {
7748  calculatedSumFreeSize += subAlloc.size;
7749  ++calculatedFreeCount;
7750  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7751  {
7752  ++freeSuballocationsToRegister;
7753  }
7754 
7755  // Margin required between allocations - every free space must be at least that large.
7756  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7757  }
7758  else
7759  {
7760  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7761  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7762 
7763  // Margin required between allocations - previous allocation must be free.
7764  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7765  }
7766 
7767  calculatedOffset += subAlloc.size;
7768  prevFree = currFree;
7769  }
7770 
7771  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
7772  // match expected one.
7773  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7774 
7775  VkDeviceSize lastSize = 0;
7776  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7777  {
7778  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7779 
7780  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
7781  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7782  // They must be sorted by size ascending.
7783  VMA_VALIDATE(suballocItem->size >= lastSize);
7784 
7785  lastSize = suballocItem->size;
7786  }
7787 
7788  // Check if totals match calculacted values.
7789  VMA_VALIDATE(ValidateFreeSuballocationList());
7790  VMA_VALIDATE(calculatedOffset == GetSize());
7791  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7792  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7793 
7794  return true;
7795 }
7796 
7797 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
7798 {
7799  if(!m_FreeSuballocationsBySize.empty())
7800  {
7801  return m_FreeSuballocationsBySize.back()->size;
7802  }
7803  else
7804  {
7805  return 0;
7806  }
7807 }
7808 
7809 bool VmaBlockMetadata_Generic::IsEmpty() const
7810 {
7811  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7812 }
7813 
7814 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7815 {
7816  outInfo.blockCount = 1;
7817 
7818  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7819  outInfo.allocationCount = rangeCount - m_FreeCount;
7820  outInfo.unusedRangeCount = m_FreeCount;
7821 
7822  outInfo.unusedBytes = m_SumFreeSize;
7823  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
7824 
7825  outInfo.allocationSizeMin = UINT64_MAX;
7826  outInfo.allocationSizeMax = 0;
7827  outInfo.unusedRangeSizeMin = UINT64_MAX;
7828  outInfo.unusedRangeSizeMax = 0;
7829 
7830  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7831  suballocItem != m_Suballocations.cend();
7832  ++suballocItem)
7833  {
7834  const VmaSuballocation& suballoc = *suballocItem;
7835  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7836  {
7837  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7838  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
7839  }
7840  else
7841  {
7842  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
7843  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
7844  }
7845  }
7846 }
7847 
7848 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
7849 {
7850  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7851 
7852  inoutStats.size += GetSize();
7853  inoutStats.unusedSize += m_SumFreeSize;
7854  inoutStats.allocationCount += rangeCount - m_FreeCount;
7855  inoutStats.unusedRangeCount += m_FreeCount;
7856  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
7857 }
7858 
7859 #if VMA_STATS_STRING_ENABLED
7860 
7861 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7862 {
7863  PrintDetailedMap_Begin(json,
7864  m_SumFreeSize, // unusedBytes
7865  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7866  m_FreeCount); // unusedRangeCount
7867 
7868  size_t i = 0;
7869  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7870  suballocItem != m_Suballocations.cend();
7871  ++suballocItem, ++i)
7872  {
7873  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7874  {
7875  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
7876  }
7877  else
7878  {
7879  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
7880  }
7881  }
7882 
7883  PrintDetailedMap_End(json);
7884 }
7885 
7886 #endif // #if VMA_STATS_STRING_ENABLED
7887 
7888 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7889  uint32_t currentFrameIndex,
7890  uint32_t frameInUseCount,
7891  VkDeviceSize bufferImageGranularity,
7892  VkDeviceSize allocSize,
7893  VkDeviceSize allocAlignment,
7894  bool upperAddress,
7895  VmaSuballocationType allocType,
7896  bool canMakeOtherLost,
7897  uint32_t strategy,
7898  VmaAllocationRequest* pAllocationRequest)
7899 {
7900  VMA_ASSERT(allocSize > 0);
7901  VMA_ASSERT(!upperAddress);
7902  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7903  VMA_ASSERT(pAllocationRequest != VMA_NULL);
7904  VMA_HEAVY_ASSERT(Validate());
7905 
7906  pAllocationRequest->type = VmaAllocationRequestType::Normal;
7907 
7908  // There is not enough total free space in this block to fullfill the request: Early return.
7909  if(canMakeOtherLost == false &&
7910  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
7911  {
7912  return false;
7913  }
7914 
7915  // New algorithm, efficiently searching freeSuballocationsBySize.
7916  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7917  if(freeSuballocCount > 0)
7918  {
7920  {
7921  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7922  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7923  m_FreeSuballocationsBySize.data(),
7924  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7925  allocSize + 2 * VMA_DEBUG_MARGIN,
7926  VmaSuballocationItemSizeLess());
7927  size_t index = it - m_FreeSuballocationsBySize.data();
7928  for(; index < freeSuballocCount; ++index)
7929  {
7930  if(CheckAllocation(
7931  currentFrameIndex,
7932  frameInUseCount,
7933  bufferImageGranularity,
7934  allocSize,
7935  allocAlignment,
7936  allocType,
7937  m_FreeSuballocationsBySize[index],
7938  false, // canMakeOtherLost
7939  &pAllocationRequest->offset,
7940  &pAllocationRequest->itemsToMakeLostCount,
7941  &pAllocationRequest->sumFreeSize,
7942  &pAllocationRequest->sumItemSize))
7943  {
7944  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7945  return true;
7946  }
7947  }
7948  }
7949  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7950  {
7951  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7952  it != m_Suballocations.end();
7953  ++it)
7954  {
7955  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
7956  currentFrameIndex,
7957  frameInUseCount,
7958  bufferImageGranularity,
7959  allocSize,
7960  allocAlignment,
7961  allocType,
7962  it,
7963  false, // canMakeOtherLost
7964  &pAllocationRequest->offset,
7965  &pAllocationRequest->itemsToMakeLostCount,
7966  &pAllocationRequest->sumFreeSize,
7967  &pAllocationRequest->sumItemSize))
7968  {
7969  pAllocationRequest->item = it;
7970  return true;
7971  }
7972  }
7973  }
7974  else // WORST_FIT, FIRST_FIT
7975  {
7976  // Search staring from biggest suballocations.
7977  for(size_t index = freeSuballocCount; index--; )
7978  {
7979  if(CheckAllocation(
7980  currentFrameIndex,
7981  frameInUseCount,
7982  bufferImageGranularity,
7983  allocSize,
7984  allocAlignment,
7985  allocType,
7986  m_FreeSuballocationsBySize[index],
7987  false, // canMakeOtherLost
7988  &pAllocationRequest->offset,
7989  &pAllocationRequest->itemsToMakeLostCount,
7990  &pAllocationRequest->sumFreeSize,
7991  &pAllocationRequest->sumItemSize))
7992  {
7993  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7994  return true;
7995  }
7996  }
7997  }
7998  }
7999 
8000  if(canMakeOtherLost)
8001  {
8002  // Brute-force algorithm. TODO: Come up with something better.
8003 
8004  bool found = false;
8005  VmaAllocationRequest tmpAllocRequest = {};
8006  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
8007  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
8008  suballocIt != m_Suballocations.end();
8009  ++suballocIt)
8010  {
8011  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
8012  suballocIt->hAllocation->CanBecomeLost())
8013  {
8014  if(CheckAllocation(
8015  currentFrameIndex,
8016  frameInUseCount,
8017  bufferImageGranularity,
8018  allocSize,
8019  allocAlignment,
8020  allocType,
8021  suballocIt,
8022  canMakeOtherLost,
8023  &tmpAllocRequest.offset,
8024  &tmpAllocRequest.itemsToMakeLostCount,
8025  &tmpAllocRequest.sumFreeSize,
8026  &tmpAllocRequest.sumItemSize))
8027  {
8029  {
8030  *pAllocationRequest = tmpAllocRequest;
8031  pAllocationRequest->item = suballocIt;
8032  break;
8033  }
8034  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
8035  {
8036  *pAllocationRequest = tmpAllocRequest;
8037  pAllocationRequest->item = suballocIt;
8038  found = true;
8039  }
8040  }
8041  }
8042  }
8043 
8044  return found;
8045  }
8046 
8047  return false;
8048 }
8049 
8050 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
8051  uint32_t currentFrameIndex,
8052  uint32_t frameInUseCount,
8053  VmaAllocationRequest* pAllocationRequest)
8054 {
8055  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
8056 
8057  while(pAllocationRequest->itemsToMakeLostCount > 0)
8058  {
8059  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
8060  {
8061  ++pAllocationRequest->item;
8062  }
8063  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8064  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
8065  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
8066  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8067  {
8068  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
8069  --pAllocationRequest->itemsToMakeLostCount;
8070  }
8071  else
8072  {
8073  return false;
8074  }
8075  }
8076 
8077  VMA_HEAVY_ASSERT(Validate());
8078  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8079  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8080 
8081  return true;
8082 }
8083 
8084 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8085 {
8086  uint32_t lostAllocationCount = 0;
8087  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8088  it != m_Suballocations.end();
8089  ++it)
8090  {
8091  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8092  it->hAllocation->CanBecomeLost() &&
8093  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8094  {
8095  it = FreeSuballocation(it);
8096  ++lostAllocationCount;
8097  }
8098  }
8099  return lostAllocationCount;
8100 }
8101 
8102 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8103 {
8104  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8105  it != m_Suballocations.end();
8106  ++it)
8107  {
8108  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8109  {
8110  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8111  {
8112  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8113  return VK_ERROR_VALIDATION_FAILED_EXT;
8114  }
8115  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8116  {
8117  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8118  return VK_ERROR_VALIDATION_FAILED_EXT;
8119  }
8120  }
8121  }
8122 
8123  return VK_SUCCESS;
8124 }
8125 
8126 void VmaBlockMetadata_Generic::Alloc(
8127  const VmaAllocationRequest& request,
8128  VmaSuballocationType type,
8129  VkDeviceSize allocSize,
8130  VmaAllocation hAllocation)
8131 {
8132  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
8133  VMA_ASSERT(request.item != m_Suballocations.end());
8134  VmaSuballocation& suballoc = *request.item;
8135  // Given suballocation is a free block.
8136  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8137  // Given offset is inside this suballocation.
8138  VMA_ASSERT(request.offset >= suballoc.offset);
8139  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8140  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8141  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8142 
8143  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8144  // it to become used.
8145  UnregisterFreeSuballocation(request.item);
8146 
8147  suballoc.offset = request.offset;
8148  suballoc.size = allocSize;
8149  suballoc.type = type;
8150  suballoc.hAllocation = hAllocation;
8151 
8152  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8153  if(paddingEnd)
8154  {
8155  VmaSuballocation paddingSuballoc = {};
8156  paddingSuballoc.offset = request.offset + allocSize;
8157  paddingSuballoc.size = paddingEnd;
8158  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8159  VmaSuballocationList::iterator next = request.item;
8160  ++next;
8161  const VmaSuballocationList::iterator paddingEndItem =
8162  m_Suballocations.insert(next, paddingSuballoc);
8163  RegisterFreeSuballocation(paddingEndItem);
8164  }
8165 
8166  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8167  if(paddingBegin)
8168  {
8169  VmaSuballocation paddingSuballoc = {};
8170  paddingSuballoc.offset = request.offset - paddingBegin;
8171  paddingSuballoc.size = paddingBegin;
8172  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8173  const VmaSuballocationList::iterator paddingBeginItem =
8174  m_Suballocations.insert(request.item, paddingSuballoc);
8175  RegisterFreeSuballocation(paddingBeginItem);
8176  }
8177 
8178  // Update totals.
8179  m_FreeCount = m_FreeCount - 1;
8180  if(paddingBegin > 0)
8181  {
8182  ++m_FreeCount;
8183  }
8184  if(paddingEnd > 0)
8185  {
8186  ++m_FreeCount;
8187  }
8188  m_SumFreeSize -= allocSize;
8189 }
8190 
8191 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8192 {
8193  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8194  suballocItem != m_Suballocations.end();
8195  ++suballocItem)
8196  {
8197  VmaSuballocation& suballoc = *suballocItem;
8198  if(suballoc.hAllocation == allocation)
8199  {
8200  FreeSuballocation(suballocItem);
8201  VMA_HEAVY_ASSERT(Validate());
8202  return;
8203  }
8204  }
8205  VMA_ASSERT(0 && "Not found!");
8206 }
8207 
8208 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8209 {
8210  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8211  suballocItem != m_Suballocations.end();
8212  ++suballocItem)
8213  {
8214  VmaSuballocation& suballoc = *suballocItem;
8215  if(suballoc.offset == offset)
8216  {
8217  FreeSuballocation(suballocItem);
8218  return;
8219  }
8220  }
8221  VMA_ASSERT(0 && "Not found!");
8222 }
8223 
8224 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
8225 {
8226  typedef VmaSuballocationList::iterator iter_type;
8227  for(iter_type suballocItem = m_Suballocations.begin();
8228  suballocItem != m_Suballocations.end();
8229  ++suballocItem)
8230  {
8231  VmaSuballocation& suballoc = *suballocItem;
8232  if(suballoc.hAllocation == alloc)
8233  {
8234  iter_type nextItem = suballocItem;
8235  ++nextItem;
8236 
8237  // Should have been ensured on higher level.
8238  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
8239 
8240  // Shrinking.
8241  if(newSize < alloc->GetSize())
8242  {
8243  const VkDeviceSize sizeDiff = suballoc.size - newSize;
8244 
8245  // There is next item.
8246  if(nextItem != m_Suballocations.end())
8247  {
8248  // Next item is free.
8249  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8250  {
8251  // Grow this next item backward.
8252  UnregisterFreeSuballocation(nextItem);
8253  nextItem->offset -= sizeDiff;
8254  nextItem->size += sizeDiff;
8255  RegisterFreeSuballocation(nextItem);
8256  }
8257  // Next item is not free.
8258  else
8259  {
8260  // Create free item after current one.
8261  VmaSuballocation newFreeSuballoc;
8262  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8263  newFreeSuballoc.offset = suballoc.offset + newSize;
8264  newFreeSuballoc.size = sizeDiff;
8265  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8266  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
8267  RegisterFreeSuballocation(newFreeSuballocIt);
8268 
8269  ++m_FreeCount;
8270  }
8271  }
8272  // This is the last item.
8273  else
8274  {
8275  // Create free item at the end.
8276  VmaSuballocation newFreeSuballoc;
8277  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8278  newFreeSuballoc.offset = suballoc.offset + newSize;
8279  newFreeSuballoc.size = sizeDiff;
8280  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8281  m_Suballocations.push_back(newFreeSuballoc);
8282 
8283  iter_type newFreeSuballocIt = m_Suballocations.end();
8284  RegisterFreeSuballocation(--newFreeSuballocIt);
8285 
8286  ++m_FreeCount;
8287  }
8288 
8289  suballoc.size = newSize;
8290  m_SumFreeSize += sizeDiff;
8291  }
8292  // Growing.
8293  else
8294  {
8295  const VkDeviceSize sizeDiff = newSize - suballoc.size;
8296 
8297  // There is next item.
8298  if(nextItem != m_Suballocations.end())
8299  {
8300  // Next item is free.
8301  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8302  {
8303  // There is not enough free space, including margin.
8304  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
8305  {
8306  return false;
8307  }
8308 
8309  // There is more free space than required.
8310  if(nextItem->size > sizeDiff)
8311  {
8312  // Move and shrink this next item.
8313  UnregisterFreeSuballocation(nextItem);
8314  nextItem->offset += sizeDiff;
8315  nextItem->size -= sizeDiff;
8316  RegisterFreeSuballocation(nextItem);
8317  }
8318  // There is exactly the amount of free space required.
8319  else
8320  {
8321  // Remove this next free item.
8322  UnregisterFreeSuballocation(nextItem);
8323  m_Suballocations.erase(nextItem);
8324  --m_FreeCount;
8325  }
8326  }
8327  // Next item is not free - there is no space to grow.
8328  else
8329  {
8330  return false;
8331  }
8332  }
8333  // This is the last item - there is no space to grow.
8334  else
8335  {
8336  return false;
8337  }
8338 
8339  suballoc.size = newSize;
8340  m_SumFreeSize -= sizeDiff;
8341  }
8342 
8343  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
8344  return true;
8345  }
8346  }
8347  VMA_ASSERT(0 && "Not found!");
8348  return false;
8349 }
8350 
8351 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8352 {
8353  VkDeviceSize lastSize = 0;
8354  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8355  {
8356  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8357 
8358  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8359  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8360  VMA_VALIDATE(it->size >= lastSize);
8361  lastSize = it->size;
8362  }
8363  return true;
8364 }
8365 
8366 bool VmaBlockMetadata_Generic::CheckAllocation(
8367  uint32_t currentFrameIndex,
8368  uint32_t frameInUseCount,
8369  VkDeviceSize bufferImageGranularity,
8370  VkDeviceSize allocSize,
8371  VkDeviceSize allocAlignment,
8372  VmaSuballocationType allocType,
8373  VmaSuballocationList::const_iterator suballocItem,
8374  bool canMakeOtherLost,
8375  VkDeviceSize* pOffset,
8376  size_t* itemsToMakeLostCount,
8377  VkDeviceSize* pSumFreeSize,
8378  VkDeviceSize* pSumItemSize) const
8379 {
8380  VMA_ASSERT(allocSize > 0);
8381  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8382  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8383  VMA_ASSERT(pOffset != VMA_NULL);
8384 
8385  *itemsToMakeLostCount = 0;
8386  *pSumFreeSize = 0;
8387  *pSumItemSize = 0;
8388 
8389  if(canMakeOtherLost)
8390  {
8391  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8392  {
8393  *pSumFreeSize = suballocItem->size;
8394  }
8395  else
8396  {
8397  if(suballocItem->hAllocation->CanBecomeLost() &&
8398  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8399  {
8400  ++*itemsToMakeLostCount;
8401  *pSumItemSize = suballocItem->size;
8402  }
8403  else
8404  {
8405  return false;
8406  }
8407  }
8408 
8409  // Remaining size is too small for this request: Early return.
8410  if(GetSize() - suballocItem->offset < allocSize)
8411  {
8412  return false;
8413  }
8414 
8415  // Start from offset equal to beginning of this suballocation.
8416  *pOffset = suballocItem->offset;
8417 
8418  // Apply VMA_DEBUG_MARGIN at the beginning.
8419  if(VMA_DEBUG_MARGIN > 0)
8420  {
8421  *pOffset += VMA_DEBUG_MARGIN;
8422  }
8423 
8424  // Apply alignment.
8425  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8426 
8427  // Check previous suballocations for BufferImageGranularity conflicts.
8428  // Make bigger alignment if necessary.
8429  if(bufferImageGranularity > 1)
8430  {
8431  bool bufferImageGranularityConflict = false;
8432  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8433  while(prevSuballocItem != m_Suballocations.cbegin())
8434  {
8435  --prevSuballocItem;
8436  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8437  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8438  {
8439  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8440  {
8441  bufferImageGranularityConflict = true;
8442  break;
8443  }
8444  }
8445  else
8446  // Already on previous page.
8447  break;
8448  }
8449  if(bufferImageGranularityConflict)
8450  {
8451  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8452  }
8453  }
8454 
8455  // Now that we have final *pOffset, check if we are past suballocItem.
8456  // If yes, return false - this function should be called for another suballocItem as starting point.
8457  if(*pOffset >= suballocItem->offset + suballocItem->size)
8458  {
8459  return false;
8460  }
8461 
8462  // Calculate padding at the beginning based on current offset.
8463  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8464 
8465  // Calculate required margin at the end.
8466  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8467 
8468  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8469  // Another early return check.
8470  if(suballocItem->offset + totalSize > GetSize())
8471  {
8472  return false;
8473  }
8474 
8475  // Advance lastSuballocItem until desired size is reached.
8476  // Update itemsToMakeLostCount.
8477  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8478  if(totalSize > suballocItem->size)
8479  {
8480  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8481  while(remainingSize > 0)
8482  {
8483  ++lastSuballocItem;
8484  if(lastSuballocItem == m_Suballocations.cend())
8485  {
8486  return false;
8487  }
8488  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8489  {
8490  *pSumFreeSize += lastSuballocItem->size;
8491  }
8492  else
8493  {
8494  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8495  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8496  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8497  {
8498  ++*itemsToMakeLostCount;
8499  *pSumItemSize += lastSuballocItem->size;
8500  }
8501  else
8502  {
8503  return false;
8504  }
8505  }
8506  remainingSize = (lastSuballocItem->size < remainingSize) ?
8507  remainingSize - lastSuballocItem->size : 0;
8508  }
8509  }
8510 
8511  // Check next suballocations for BufferImageGranularity conflicts.
8512  // If conflict exists, we must mark more allocations lost or fail.
8513  if(bufferImageGranularity > 1)
8514  {
8515  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8516  ++nextSuballocItem;
8517  while(nextSuballocItem != m_Suballocations.cend())
8518  {
8519  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8520  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8521  {
8522  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8523  {
8524  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8525  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8526  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8527  {
8528  ++*itemsToMakeLostCount;
8529  }
8530  else
8531  {
8532  return false;
8533  }
8534  }
8535  }
8536  else
8537  {
8538  // Already on next page.
8539  break;
8540  }
8541  ++nextSuballocItem;
8542  }
8543  }
8544  }
8545  else
8546  {
8547  const VmaSuballocation& suballoc = *suballocItem;
8548  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8549 
8550  *pSumFreeSize = suballoc.size;
8551 
8552  // Size of this suballocation is too small for this request: Early return.
8553  if(suballoc.size < allocSize)
8554  {
8555  return false;
8556  }
8557 
8558  // Start from offset equal to beginning of this suballocation.
8559  *pOffset = suballoc.offset;
8560 
8561  // Apply VMA_DEBUG_MARGIN at the beginning.
8562  if(VMA_DEBUG_MARGIN > 0)
8563  {
8564  *pOffset += VMA_DEBUG_MARGIN;
8565  }
8566 
8567  // Apply alignment.
8568  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8569 
8570  // Check previous suballocations for BufferImageGranularity conflicts.
8571  // Make bigger alignment if necessary.
8572  if(bufferImageGranularity > 1)
8573  {
8574  bool bufferImageGranularityConflict = false;
8575  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8576  while(prevSuballocItem != m_Suballocations.cbegin())
8577  {
8578  --prevSuballocItem;
8579  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8580  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8581  {
8582  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8583  {
8584  bufferImageGranularityConflict = true;
8585  break;
8586  }
8587  }
8588  else
8589  // Already on previous page.
8590  break;
8591  }
8592  if(bufferImageGranularityConflict)
8593  {
8594  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8595  }
8596  }
8597 
8598  // Calculate padding at the beginning based on current offset.
8599  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8600 
8601  // Calculate required margin at the end.
8602  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8603 
8604  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8605  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8606  {
8607  return false;
8608  }
8609 
8610  // Check next suballocations for BufferImageGranularity conflicts.
8611  // If conflict exists, allocation cannot be made here.
8612  if(bufferImageGranularity > 1)
8613  {
8614  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8615  ++nextSuballocItem;
8616  while(nextSuballocItem != m_Suballocations.cend())
8617  {
8618  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8619  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8620  {
8621  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8622  {
8623  return false;
8624  }
8625  }
8626  else
8627  {
8628  // Already on next page.
8629  break;
8630  }
8631  ++nextSuballocItem;
8632  }
8633  }
8634  }
8635 
8636  // All tests passed: Success. pOffset is already filled.
8637  return true;
8638 }
8639 
8640 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8641 {
8642  VMA_ASSERT(item != m_Suballocations.end());
8643  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8644 
8645  VmaSuballocationList::iterator nextItem = item;
8646  ++nextItem;
8647  VMA_ASSERT(nextItem != m_Suballocations.end());
8648  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8649 
8650  item->size += nextItem->size;
8651  --m_FreeCount;
8652  m_Suballocations.erase(nextItem);
8653 }
8654 
8655 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8656 {
8657  // Change this suballocation to be marked as free.
8658  VmaSuballocation& suballoc = *suballocItem;
8659  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8660  suballoc.hAllocation = VK_NULL_HANDLE;
8661 
8662  // Update totals.
8663  ++m_FreeCount;
8664  m_SumFreeSize += suballoc.size;
8665 
8666  // Merge with previous and/or next suballocation if it's also free.
8667  bool mergeWithNext = false;
8668  bool mergeWithPrev = false;
8669 
8670  VmaSuballocationList::iterator nextItem = suballocItem;
8671  ++nextItem;
8672  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8673  {
8674  mergeWithNext = true;
8675  }
8676 
8677  VmaSuballocationList::iterator prevItem = suballocItem;
8678  if(suballocItem != m_Suballocations.begin())
8679  {
8680  --prevItem;
8681  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8682  {
8683  mergeWithPrev = true;
8684  }
8685  }
8686 
8687  if(mergeWithNext)
8688  {
8689  UnregisterFreeSuballocation(nextItem);
8690  MergeFreeWithNext(suballocItem);
8691  }
8692 
8693  if(mergeWithPrev)
8694  {
8695  UnregisterFreeSuballocation(prevItem);
8696  MergeFreeWithNext(prevItem);
8697  RegisterFreeSuballocation(prevItem);
8698  return prevItem;
8699  }
8700  else
8701  {
8702  RegisterFreeSuballocation(suballocItem);
8703  return suballocItem;
8704  }
8705 }
8706 
8707 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8708 {
8709  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8710  VMA_ASSERT(item->size > 0);
8711 
8712  // You may want to enable this validation at the beginning or at the end of
8713  // this function, depending on what do you want to check.
8714  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8715 
8716  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8717  {
8718  if(m_FreeSuballocationsBySize.empty())
8719  {
8720  m_FreeSuballocationsBySize.push_back(item);
8721  }
8722  else
8723  {
8724  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8725  }
8726  }
8727 
8728  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8729 }
8730 
8731 
8732 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8733 {
8734  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8735  VMA_ASSERT(item->size > 0);
8736 
8737  // You may want to enable this validation at the beginning or at the end of
8738  // this function, depending on what do you want to check.
8739  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8740 
8741  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8742  {
8743  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8744  m_FreeSuballocationsBySize.data(),
8745  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8746  item,
8747  VmaSuballocationItemSizeLess());
8748  for(size_t index = it - m_FreeSuballocationsBySize.data();
8749  index < m_FreeSuballocationsBySize.size();
8750  ++index)
8751  {
8752  if(m_FreeSuballocationsBySize[index] == item)
8753  {
8754  VmaVectorRemove(m_FreeSuballocationsBySize, index);
8755  return;
8756  }
8757  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8758  }
8759  VMA_ASSERT(0 && "Not found.");
8760  }
8761 
8762  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8763 }
8764 
8765 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8766  VkDeviceSize bufferImageGranularity,
8767  VmaSuballocationType& inOutPrevSuballocType) const
8768 {
8769  if(bufferImageGranularity == 1 || IsEmpty())
8770  {
8771  return false;
8772  }
8773 
8774  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8775  bool typeConflictFound = false;
8776  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8777  it != m_Suballocations.cend();
8778  ++it)
8779  {
8780  const VmaSuballocationType suballocType = it->type;
8781  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8782  {
8783  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8784  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8785  {
8786  typeConflictFound = true;
8787  }
8788  inOutPrevSuballocType = suballocType;
8789  }
8790  }
8791 
8792  return typeConflictFound || minAlignment >= bufferImageGranularity;
8793 }
8794 
8796 // class VmaBlockMetadata_Linear
8797 
8798 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
8799  VmaBlockMetadata(hAllocator),
8800  m_SumFreeSize(0),
8801  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8802  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8803  m_1stVectorIndex(0),
8804  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8805  m_1stNullItemsBeginCount(0),
8806  m_1stNullItemsMiddleCount(0),
8807  m_2ndNullItemsCount(0)
8808 {
8809 }
8810 
8811 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8812 {
8813 }
8814 
8815 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8816 {
8817  VmaBlockMetadata::Init(size);
8818  m_SumFreeSize = size;
8819 }
8820 
8821 bool VmaBlockMetadata_Linear::Validate() const
8822 {
8823  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8824  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8825 
8826  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8827  VMA_VALIDATE(!suballocations1st.empty() ||
8828  suballocations2nd.empty() ||
8829  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8830 
8831  if(!suballocations1st.empty())
8832  {
8833  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
8834  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8835  // Null item at the end should be just pop_back().
8836  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8837  }
8838  if(!suballocations2nd.empty())
8839  {
8840  // Null item at the end should be just pop_back().
8841  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8842  }
8843 
8844  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8845  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8846 
8847  VkDeviceSize sumUsedSize = 0;
8848  const size_t suballoc1stCount = suballocations1st.size();
8849  VkDeviceSize offset = VMA_DEBUG_MARGIN;
8850 
8851  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8852  {
8853  const size_t suballoc2ndCount = suballocations2nd.size();
8854  size_t nullItem2ndCount = 0;
8855  for(size_t i = 0; i < suballoc2ndCount; ++i)
8856  {
8857  const VmaSuballocation& suballoc = suballocations2nd[i];
8858  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8859 
8860  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8861  VMA_VALIDATE(suballoc.offset >= offset);
8862 
8863  if(!currFree)
8864  {
8865  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8866  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8867  sumUsedSize += suballoc.size;
8868  }
8869  else
8870  {
8871  ++nullItem2ndCount;
8872  }
8873 
8874  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8875  }
8876 
8877  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8878  }
8879 
8880  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8881  {
8882  const VmaSuballocation& suballoc = suballocations1st[i];
8883  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8884  suballoc.hAllocation == VK_NULL_HANDLE);
8885  }
8886 
8887  size_t nullItem1stCount = m_1stNullItemsBeginCount;
8888 
8889  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8890  {
8891  const VmaSuballocation& suballoc = suballocations1st[i];
8892  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8893 
8894  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8895  VMA_VALIDATE(suballoc.offset >= offset);
8896  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8897 
8898  if(!currFree)
8899  {
8900  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8901  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8902  sumUsedSize += suballoc.size;
8903  }
8904  else
8905  {
8906  ++nullItem1stCount;
8907  }
8908 
8909  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8910  }
8911  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8912 
8913  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8914  {
8915  const size_t suballoc2ndCount = suballocations2nd.size();
8916  size_t nullItem2ndCount = 0;
8917  for(size_t i = suballoc2ndCount; i--; )
8918  {
8919  const VmaSuballocation& suballoc = suballocations2nd[i];
8920  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8921 
8922  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8923  VMA_VALIDATE(suballoc.offset >= offset);
8924 
8925  if(!currFree)
8926  {
8927  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8928  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8929  sumUsedSize += suballoc.size;
8930  }
8931  else
8932  {
8933  ++nullItem2ndCount;
8934  }
8935 
8936  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8937  }
8938 
8939  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8940  }
8941 
8942  VMA_VALIDATE(offset <= GetSize());
8943  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
8944 
8945  return true;
8946 }
8947 
8948 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
8949 {
8950  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
8951  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
8952 }
8953 
8954 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
8955 {
8956  const VkDeviceSize size = GetSize();
8957 
8958  /*
8959  We don't consider gaps inside allocation vectors with freed allocations because
8960  they are not suitable for reuse in linear allocator. We consider only space that
8961  is available for new allocations.
8962  */
8963  if(IsEmpty())
8964  {
8965  return size;
8966  }
8967 
8968  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8969 
8970  switch(m_2ndVectorMode)
8971  {
8972  case SECOND_VECTOR_EMPTY:
8973  /*
8974  Available space is after end of 1st, as well as before beginning of 1st (which
8975  whould make it a ring buffer).
8976  */
8977  {
8978  const size_t suballocations1stCount = suballocations1st.size();
8979  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
8980  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8981  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
8982  return VMA_MAX(
8983  firstSuballoc.offset,
8984  size - (lastSuballoc.offset + lastSuballoc.size));
8985  }
8986  break;
8987 
8988  case SECOND_VECTOR_RING_BUFFER:
8989  /*
8990  Available space is only between end of 2nd and beginning of 1st.
8991  */
8992  {
8993  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8994  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
8995  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
8996  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
8997  }
8998  break;
8999 
9000  case SECOND_VECTOR_DOUBLE_STACK:
9001  /*
9002  Available space is only between end of 1st and top of 2nd.
9003  */
9004  {
9005  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9006  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
9007  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
9008  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
9009  }
9010  break;
9011 
9012  default:
9013  VMA_ASSERT(0);
9014  return 0;
9015  }
9016 }
9017 
9018 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9019 {
9020  const VkDeviceSize size = GetSize();
9021  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9022  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9023  const size_t suballoc1stCount = suballocations1st.size();
9024  const size_t suballoc2ndCount = suballocations2nd.size();
9025 
9026  outInfo.blockCount = 1;
9027  outInfo.allocationCount = (uint32_t)GetAllocationCount();
9028  outInfo.unusedRangeCount = 0;
9029  outInfo.usedBytes = 0;
9030  outInfo.allocationSizeMin = UINT64_MAX;
9031  outInfo.allocationSizeMax = 0;
9032  outInfo.unusedRangeSizeMin = UINT64_MAX;
9033  outInfo.unusedRangeSizeMax = 0;
9034 
9035  VkDeviceSize lastOffset = 0;
9036 
9037  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9038  {
9039  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9040  size_t nextAlloc2ndIndex = 0;
9041  while(lastOffset < freeSpace2ndTo1stEnd)
9042  {
9043  // Find next non-null allocation or move nextAllocIndex to the end.
9044  while(nextAlloc2ndIndex < suballoc2ndCount &&
9045  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9046  {
9047  ++nextAlloc2ndIndex;
9048  }
9049 
9050  // Found non-null allocation.
9051  if(nextAlloc2ndIndex < suballoc2ndCount)
9052  {
9053  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9054 
9055  // 1. Process free space before this allocation.
9056  if(lastOffset < suballoc.offset)
9057  {
9058  // There is free space from lastOffset to suballoc.offset.
9059  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9060  ++outInfo.unusedRangeCount;
9061  outInfo.unusedBytes += unusedRangeSize;
9062  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9063  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9064  }
9065 
9066  // 2. Process this allocation.
9067  // There is allocation with suballoc.offset, suballoc.size.
9068  outInfo.usedBytes += suballoc.size;
9069  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9070  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9071 
9072  // 3. Prepare for next iteration.
9073  lastOffset = suballoc.offset + suballoc.size;
9074  ++nextAlloc2ndIndex;
9075  }
9076  // We are at the end.
9077  else
9078  {
9079  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9080  if(lastOffset < freeSpace2ndTo1stEnd)
9081  {
9082  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9083  ++outInfo.unusedRangeCount;
9084  outInfo.unusedBytes += unusedRangeSize;
9085  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9086  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9087  }
9088 
9089  // End of loop.
9090  lastOffset = freeSpace2ndTo1stEnd;
9091  }
9092  }
9093  }
9094 
9095  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9096  const VkDeviceSize freeSpace1stTo2ndEnd =
9097  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9098  while(lastOffset < freeSpace1stTo2ndEnd)
9099  {
9100  // Find next non-null allocation or move nextAllocIndex to the end.
9101  while(nextAlloc1stIndex < suballoc1stCount &&
9102  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9103  {
9104  ++nextAlloc1stIndex;
9105  }
9106 
9107  // Found non-null allocation.
9108  if(nextAlloc1stIndex < suballoc1stCount)
9109  {
9110  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9111 
9112  // 1. Process free space before this allocation.
9113  if(lastOffset < suballoc.offset)
9114  {
9115  // There is free space from lastOffset to suballoc.offset.
9116  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9117  ++outInfo.unusedRangeCount;
9118  outInfo.unusedBytes += unusedRangeSize;
9119  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9120  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9121  }
9122 
9123  // 2. Process this allocation.
9124  // There is allocation with suballoc.offset, suballoc.size.
9125  outInfo.usedBytes += suballoc.size;
9126  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9127  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9128 
9129  // 3. Prepare for next iteration.
9130  lastOffset = suballoc.offset + suballoc.size;
9131  ++nextAlloc1stIndex;
9132  }
9133  // We are at the end.
9134  else
9135  {
9136  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9137  if(lastOffset < freeSpace1stTo2ndEnd)
9138  {
9139  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9140  ++outInfo.unusedRangeCount;
9141  outInfo.unusedBytes += unusedRangeSize;
9142  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9143  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9144  }
9145 
9146  // End of loop.
9147  lastOffset = freeSpace1stTo2ndEnd;
9148  }
9149  }
9150 
9151  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9152  {
9153  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9154  while(lastOffset < size)
9155  {
9156  // Find next non-null allocation or move nextAllocIndex to the end.
9157  while(nextAlloc2ndIndex != SIZE_MAX &&
9158  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9159  {
9160  --nextAlloc2ndIndex;
9161  }
9162 
9163  // Found non-null allocation.
9164  if(nextAlloc2ndIndex != SIZE_MAX)
9165  {
9166  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9167 
9168  // 1. Process free space before this allocation.
9169  if(lastOffset < suballoc.offset)
9170  {
9171  // There is free space from lastOffset to suballoc.offset.
9172  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9173  ++outInfo.unusedRangeCount;
9174  outInfo.unusedBytes += unusedRangeSize;
9175  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9176  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9177  }
9178 
9179  // 2. Process this allocation.
9180  // There is allocation with suballoc.offset, suballoc.size.
9181  outInfo.usedBytes += suballoc.size;
9182  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9183  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9184 
9185  // 3. Prepare for next iteration.
9186  lastOffset = suballoc.offset + suballoc.size;
9187  --nextAlloc2ndIndex;
9188  }
9189  // We are at the end.
9190  else
9191  {
9192  // There is free space from lastOffset to size.
9193  if(lastOffset < size)
9194  {
9195  const VkDeviceSize unusedRangeSize = size - lastOffset;
9196  ++outInfo.unusedRangeCount;
9197  outInfo.unusedBytes += unusedRangeSize;
9198  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9199  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9200  }
9201 
9202  // End of loop.
9203  lastOffset = size;
9204  }
9205  }
9206  }
9207 
9208  outInfo.unusedBytes = size - outInfo.usedBytes;
9209 }
9210 
9211 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9212 {
9213  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9214  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9215  const VkDeviceSize size = GetSize();
9216  const size_t suballoc1stCount = suballocations1st.size();
9217  const size_t suballoc2ndCount = suballocations2nd.size();
9218 
9219  inoutStats.size += size;
9220 
9221  VkDeviceSize lastOffset = 0;
9222 
9223  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9224  {
9225  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9226  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9227  while(lastOffset < freeSpace2ndTo1stEnd)
9228  {
9229  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9230  while(nextAlloc2ndIndex < suballoc2ndCount &&
9231  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9232  {
9233  ++nextAlloc2ndIndex;
9234  }
9235 
9236  // Found non-null allocation.
9237  if(nextAlloc2ndIndex < suballoc2ndCount)
9238  {
9239  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9240 
9241  // 1. Process free space before this allocation.
9242  if(lastOffset < suballoc.offset)
9243  {
9244  // There is free space from lastOffset to suballoc.offset.
9245  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9246  inoutStats.unusedSize += unusedRangeSize;
9247  ++inoutStats.unusedRangeCount;
9248  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9249  }
9250 
9251  // 2. Process this allocation.
9252  // There is allocation with suballoc.offset, suballoc.size.
9253  ++inoutStats.allocationCount;
9254 
9255  // 3. Prepare for next iteration.
9256  lastOffset = suballoc.offset + suballoc.size;
9257  ++nextAlloc2ndIndex;
9258  }
9259  // We are at the end.
9260  else
9261  {
9262  if(lastOffset < freeSpace2ndTo1stEnd)
9263  {
9264  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9265  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9266  inoutStats.unusedSize += unusedRangeSize;
9267  ++inoutStats.unusedRangeCount;
9268  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9269  }
9270 
9271  // End of loop.
9272  lastOffset = freeSpace2ndTo1stEnd;
9273  }
9274  }
9275  }
9276 
9277  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9278  const VkDeviceSize freeSpace1stTo2ndEnd =
9279  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9280  while(lastOffset < freeSpace1stTo2ndEnd)
9281  {
9282  // Find next non-null allocation or move nextAllocIndex to the end.
9283  while(nextAlloc1stIndex < suballoc1stCount &&
9284  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9285  {
9286  ++nextAlloc1stIndex;
9287  }
9288 
9289  // Found non-null allocation.
9290  if(nextAlloc1stIndex < suballoc1stCount)
9291  {
9292  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9293 
9294  // 1. Process free space before this allocation.
9295  if(lastOffset < suballoc.offset)
9296  {
9297  // There is free space from lastOffset to suballoc.offset.
9298  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9299  inoutStats.unusedSize += unusedRangeSize;
9300  ++inoutStats.unusedRangeCount;
9301  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9302  }
9303 
9304  // 2. Process this allocation.
9305  // There is allocation with suballoc.offset, suballoc.size.
9306  ++inoutStats.allocationCount;
9307 
9308  // 3. Prepare for next iteration.
9309  lastOffset = suballoc.offset + suballoc.size;
9310  ++nextAlloc1stIndex;
9311  }
9312  // We are at the end.
9313  else
9314  {
9315  if(lastOffset < freeSpace1stTo2ndEnd)
9316  {
9317  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9318  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9319  inoutStats.unusedSize += unusedRangeSize;
9320  ++inoutStats.unusedRangeCount;
9321  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9322  }
9323 
9324  // End of loop.
9325  lastOffset = freeSpace1stTo2ndEnd;
9326  }
9327  }
9328 
9329  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9330  {
9331  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9332  while(lastOffset < size)
9333  {
9334  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9335  while(nextAlloc2ndIndex != SIZE_MAX &&
9336  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9337  {
9338  --nextAlloc2ndIndex;
9339  }
9340 
9341  // Found non-null allocation.
9342  if(nextAlloc2ndIndex != SIZE_MAX)
9343  {
9344  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9345 
9346  // 1. Process free space before this allocation.
9347  if(lastOffset < suballoc.offset)
9348  {
9349  // There is free space from lastOffset to suballoc.offset.
9350  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9351  inoutStats.unusedSize += unusedRangeSize;
9352  ++inoutStats.unusedRangeCount;
9353  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9354  }
9355 
9356  // 2. Process this allocation.
9357  // There is allocation with suballoc.offset, suballoc.size.
9358  ++inoutStats.allocationCount;
9359 
9360  // 3. Prepare for next iteration.
9361  lastOffset = suballoc.offset + suballoc.size;
9362  --nextAlloc2ndIndex;
9363  }
9364  // We are at the end.
9365  else
9366  {
9367  if(lastOffset < size)
9368  {
9369  // There is free space from lastOffset to size.
9370  const VkDeviceSize unusedRangeSize = size - lastOffset;
9371  inoutStats.unusedSize += unusedRangeSize;
9372  ++inoutStats.unusedRangeCount;
9373  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9374  }
9375 
9376  // End of loop.
9377  lastOffset = size;
9378  }
9379  }
9380  }
9381 }
9382 
9383 #if VMA_STATS_STRING_ENABLED
9384 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9385 {
9386  const VkDeviceSize size = GetSize();
9387  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9388  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9389  const size_t suballoc1stCount = suballocations1st.size();
9390  const size_t suballoc2ndCount = suballocations2nd.size();
9391 
9392  // FIRST PASS
9393 
9394  size_t unusedRangeCount = 0;
9395  VkDeviceSize usedBytes = 0;
9396 
9397  VkDeviceSize lastOffset = 0;
9398 
9399  size_t alloc2ndCount = 0;
9400  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9401  {
9402  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9403  size_t nextAlloc2ndIndex = 0;
9404  while(lastOffset < freeSpace2ndTo1stEnd)
9405  {
9406  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9407  while(nextAlloc2ndIndex < suballoc2ndCount &&
9408  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9409  {
9410  ++nextAlloc2ndIndex;
9411  }
9412 
9413  // Found non-null allocation.
9414  if(nextAlloc2ndIndex < suballoc2ndCount)
9415  {
9416  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9417 
9418  // 1. Process free space before this allocation.
9419  if(lastOffset < suballoc.offset)
9420  {
9421  // There is free space from lastOffset to suballoc.offset.
9422  ++unusedRangeCount;
9423  }
9424 
9425  // 2. Process this allocation.
9426  // There is allocation with suballoc.offset, suballoc.size.
9427  ++alloc2ndCount;
9428  usedBytes += suballoc.size;
9429 
9430  // 3. Prepare for next iteration.
9431  lastOffset = suballoc.offset + suballoc.size;
9432  ++nextAlloc2ndIndex;
9433  }
9434  // We are at the end.
9435  else
9436  {
9437  if(lastOffset < freeSpace2ndTo1stEnd)
9438  {
9439  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9440  ++unusedRangeCount;
9441  }
9442 
9443  // End of loop.
9444  lastOffset = freeSpace2ndTo1stEnd;
9445  }
9446  }
9447  }
9448 
9449  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9450  size_t alloc1stCount = 0;
9451  const VkDeviceSize freeSpace1stTo2ndEnd =
9452  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9453  while(lastOffset < freeSpace1stTo2ndEnd)
9454  {
9455  // Find next non-null allocation or move nextAllocIndex to the end.
9456  while(nextAlloc1stIndex < suballoc1stCount &&
9457  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9458  {
9459  ++nextAlloc1stIndex;
9460  }
9461 
9462  // Found non-null allocation.
9463  if(nextAlloc1stIndex < suballoc1stCount)
9464  {
9465  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9466 
9467  // 1. Process free space before this allocation.
9468  if(lastOffset < suballoc.offset)
9469  {
9470  // There is free space from lastOffset to suballoc.offset.
9471  ++unusedRangeCount;
9472  }
9473 
9474  // 2. Process this allocation.
9475  // There is allocation with suballoc.offset, suballoc.size.
9476  ++alloc1stCount;
9477  usedBytes += suballoc.size;
9478 
9479  // 3. Prepare for next iteration.
9480  lastOffset = suballoc.offset + suballoc.size;
9481  ++nextAlloc1stIndex;
9482  }
9483  // We are at the end.
9484  else
9485  {
9486  if(lastOffset < size)
9487  {
9488  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9489  ++unusedRangeCount;
9490  }
9491 
9492  // End of loop.
9493  lastOffset = freeSpace1stTo2ndEnd;
9494  }
9495  }
9496 
9497  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9498  {
9499  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9500  while(lastOffset < size)
9501  {
9502  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9503  while(nextAlloc2ndIndex != SIZE_MAX &&
9504  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9505  {
9506  --nextAlloc2ndIndex;
9507  }
9508 
9509  // Found non-null allocation.
9510  if(nextAlloc2ndIndex != SIZE_MAX)
9511  {
9512  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9513 
9514  // 1. Process free space before this allocation.
9515  if(lastOffset < suballoc.offset)
9516  {
9517  // There is free space from lastOffset to suballoc.offset.
9518  ++unusedRangeCount;
9519  }
9520 
9521  // 2. Process this allocation.
9522  // There is allocation with suballoc.offset, suballoc.size.
9523  ++alloc2ndCount;
9524  usedBytes += suballoc.size;
9525 
9526  // 3. Prepare for next iteration.
9527  lastOffset = suballoc.offset + suballoc.size;
9528  --nextAlloc2ndIndex;
9529  }
9530  // We are at the end.
9531  else
9532  {
9533  if(lastOffset < size)
9534  {
9535  // There is free space from lastOffset to size.
9536  ++unusedRangeCount;
9537  }
9538 
9539  // End of loop.
9540  lastOffset = size;
9541  }
9542  }
9543  }
9544 
9545  const VkDeviceSize unusedBytes = size - usedBytes;
9546  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9547 
9548  // SECOND PASS
9549  lastOffset = 0;
9550 
9551  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9552  {
9553  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9554  size_t nextAlloc2ndIndex = 0;
9555  while(lastOffset < freeSpace2ndTo1stEnd)
9556  {
9557  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9558  while(nextAlloc2ndIndex < suballoc2ndCount &&
9559  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9560  {
9561  ++nextAlloc2ndIndex;
9562  }
9563 
9564  // Found non-null allocation.
9565  if(nextAlloc2ndIndex < suballoc2ndCount)
9566  {
9567  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9568 
9569  // 1. Process free space before this allocation.
9570  if(lastOffset < suballoc.offset)
9571  {
9572  // There is free space from lastOffset to suballoc.offset.
9573  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9574  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9575  }
9576 
9577  // 2. Process this allocation.
9578  // There is allocation with suballoc.offset, suballoc.size.
9579  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9580 
9581  // 3. Prepare for next iteration.
9582  lastOffset = suballoc.offset + suballoc.size;
9583  ++nextAlloc2ndIndex;
9584  }
9585  // We are at the end.
9586  else
9587  {
9588  if(lastOffset < freeSpace2ndTo1stEnd)
9589  {
9590  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9591  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9592  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9593  }
9594 
9595  // End of loop.
9596  lastOffset = freeSpace2ndTo1stEnd;
9597  }
9598  }
9599  }
9600 
9601  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9602  while(lastOffset < freeSpace1stTo2ndEnd)
9603  {
9604  // Find next non-null allocation or move nextAllocIndex to the end.
9605  while(nextAlloc1stIndex < suballoc1stCount &&
9606  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9607  {
9608  ++nextAlloc1stIndex;
9609  }
9610 
9611  // Found non-null allocation.
9612  if(nextAlloc1stIndex < suballoc1stCount)
9613  {
9614  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9615 
9616  // 1. Process free space before this allocation.
9617  if(lastOffset < suballoc.offset)
9618  {
9619  // There is free space from lastOffset to suballoc.offset.
9620  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9621  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9622  }
9623 
9624  // 2. Process this allocation.
9625  // There is allocation with suballoc.offset, suballoc.size.
9626  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9627 
9628  // 3. Prepare for next iteration.
9629  lastOffset = suballoc.offset + suballoc.size;
9630  ++nextAlloc1stIndex;
9631  }
9632  // We are at the end.
9633  else
9634  {
9635  if(lastOffset < freeSpace1stTo2ndEnd)
9636  {
9637  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9638  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9639  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9640  }
9641 
9642  // End of loop.
9643  lastOffset = freeSpace1stTo2ndEnd;
9644  }
9645  }
9646 
9647  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9648  {
9649  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9650  while(lastOffset < size)
9651  {
9652  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9653  while(nextAlloc2ndIndex != SIZE_MAX &&
9654  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9655  {
9656  --nextAlloc2ndIndex;
9657  }
9658 
9659  // Found non-null allocation.
9660  if(nextAlloc2ndIndex != SIZE_MAX)
9661  {
9662  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9663 
9664  // 1. Process free space before this allocation.
9665  if(lastOffset < suballoc.offset)
9666  {
9667  // There is free space from lastOffset to suballoc.offset.
9668  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9669  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9670  }
9671 
9672  // 2. Process this allocation.
9673  // There is allocation with suballoc.offset, suballoc.size.
9674  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9675 
9676  // 3. Prepare for next iteration.
9677  lastOffset = suballoc.offset + suballoc.size;
9678  --nextAlloc2ndIndex;
9679  }
9680  // We are at the end.
9681  else
9682  {
9683  if(lastOffset < size)
9684  {
9685  // There is free space from lastOffset to size.
9686  const VkDeviceSize unusedRangeSize = size - lastOffset;
9687  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9688  }
9689 
9690  // End of loop.
9691  lastOffset = size;
9692  }
9693  }
9694  }
9695 
9696  PrintDetailedMap_End(json);
9697 }
9698 #endif // #if VMA_STATS_STRING_ENABLED
9699 
9700 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9701  uint32_t currentFrameIndex,
9702  uint32_t frameInUseCount,
9703  VkDeviceSize bufferImageGranularity,
9704  VkDeviceSize allocSize,
9705  VkDeviceSize allocAlignment,
9706  bool upperAddress,
9707  VmaSuballocationType allocType,
9708  bool canMakeOtherLost,
9709  uint32_t strategy,
9710  VmaAllocationRequest* pAllocationRequest)
9711 {
9712  VMA_ASSERT(allocSize > 0);
9713  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9714  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9715  VMA_HEAVY_ASSERT(Validate());
9716  return upperAddress ?
9717  CreateAllocationRequest_UpperAddress(
9718  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9719  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
9720  CreateAllocationRequest_LowerAddress(
9721  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9722  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
9723 }
9724 
9725 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
9726  uint32_t currentFrameIndex,
9727  uint32_t frameInUseCount,
9728  VkDeviceSize bufferImageGranularity,
9729  VkDeviceSize allocSize,
9730  VkDeviceSize allocAlignment,
9731  VmaSuballocationType allocType,
9732  bool canMakeOtherLost,
9733  uint32_t strategy,
9734  VmaAllocationRequest* pAllocationRequest)
9735 {
9736  const VkDeviceSize size = GetSize();
9737  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9738  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9739 
9740  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9741  {
9742  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9743  return false;
9744  }
9745 
9746  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9747  if(allocSize > size)
9748  {
9749  return false;
9750  }
9751  VkDeviceSize resultBaseOffset = size - allocSize;
9752  if(!suballocations2nd.empty())
9753  {
9754  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9755  resultBaseOffset = lastSuballoc.offset - allocSize;
9756  if(allocSize > lastSuballoc.offset)
9757  {
9758  return false;
9759  }
9760  }
9761 
9762  // Start from offset equal to end of free space.
9763  VkDeviceSize resultOffset = resultBaseOffset;
9764 
9765  // Apply VMA_DEBUG_MARGIN at the end.
9766  if(VMA_DEBUG_MARGIN > 0)
9767  {
9768  if(resultOffset < VMA_DEBUG_MARGIN)
9769  {
9770  return false;
9771  }
9772  resultOffset -= VMA_DEBUG_MARGIN;
9773  }
9774 
9775  // Apply alignment.
9776  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9777 
9778  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9779  // Make bigger alignment if necessary.
9780  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9781  {
9782  bool bufferImageGranularityConflict = false;
9783  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9784  {
9785  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9786  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9787  {
9788  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9789  {
9790  bufferImageGranularityConflict = true;
9791  break;
9792  }
9793  }
9794  else
9795  // Already on previous page.
9796  break;
9797  }
9798  if(bufferImageGranularityConflict)
9799  {
9800  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9801  }
9802  }
9803 
9804  // There is enough free space.
9805  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9806  suballocations1st.back().offset + suballocations1st.back().size :
9807  0;
9808  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9809  {
9810  // Check previous suballocations for BufferImageGranularity conflicts.
9811  // If conflict exists, allocation cannot be made here.
9812  if(bufferImageGranularity > 1)
9813  {
9814  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9815  {
9816  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9817  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9818  {
9819  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9820  {
9821  return false;
9822  }
9823  }
9824  else
9825  {
9826  // Already on next page.
9827  break;
9828  }
9829  }
9830  }
9831 
9832  // All tests passed: Success.
9833  pAllocationRequest->offset = resultOffset;
9834  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9835  pAllocationRequest->sumItemSize = 0;
9836  // pAllocationRequest->item unused.
9837  pAllocationRequest->itemsToMakeLostCount = 0;
9838  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
9839  return true;
9840  }
9841 
9842  return false;
9843 }
9844 
9845 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
9846  uint32_t currentFrameIndex,
9847  uint32_t frameInUseCount,
9848  VkDeviceSize bufferImageGranularity,
9849  VkDeviceSize allocSize,
9850  VkDeviceSize allocAlignment,
9851  VmaSuballocationType allocType,
9852  bool canMakeOtherLost,
9853  uint32_t strategy,
9854  VmaAllocationRequest* pAllocationRequest)
9855 {
9856  const VkDeviceSize size = GetSize();
9857  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9858  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9859 
9860  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9861  {
9862  // Try to allocate at the end of 1st vector.
9863 
9864  VkDeviceSize resultBaseOffset = 0;
9865  if(!suballocations1st.empty())
9866  {
9867  const VmaSuballocation& lastSuballoc = suballocations1st.back();
9868  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9869  }
9870 
9871  // Start from offset equal to beginning of free space.
9872  VkDeviceSize resultOffset = resultBaseOffset;
9873 
9874  // Apply VMA_DEBUG_MARGIN at the beginning.
9875  if(VMA_DEBUG_MARGIN > 0)
9876  {
9877  resultOffset += VMA_DEBUG_MARGIN;
9878  }
9879 
9880  // Apply alignment.
9881  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9882 
9883  // Check previous suballocations for BufferImageGranularity conflicts.
9884  // Make bigger alignment if necessary.
9885  if(bufferImageGranularity > 1 && !suballocations1st.empty())
9886  {
9887  bool bufferImageGranularityConflict = false;
9888  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9889  {
9890  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9891  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9892  {
9893  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9894  {
9895  bufferImageGranularityConflict = true;
9896  break;
9897  }
9898  }
9899  else
9900  // Already on previous page.
9901  break;
9902  }
9903  if(bufferImageGranularityConflict)
9904  {
9905  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9906  }
9907  }
9908 
9909  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9910  suballocations2nd.back().offset : size;
9911 
9912  // There is enough free space at the end after alignment.
9913  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9914  {
9915  // Check next suballocations for BufferImageGranularity conflicts.
9916  // If conflict exists, allocation cannot be made here.
9917  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9918  {
9919  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9920  {
9921  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9922  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9923  {
9924  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9925  {
9926  return false;
9927  }
9928  }
9929  else
9930  {
9931  // Already on previous page.
9932  break;
9933  }
9934  }
9935  }
9936 
9937  // All tests passed: Success.
9938  pAllocationRequest->offset = resultOffset;
9939  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
9940  pAllocationRequest->sumItemSize = 0;
9941  // pAllocationRequest->item, customData unused.
9942  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
9943  pAllocationRequest->itemsToMakeLostCount = 0;
9944  return true;
9945  }
9946  }
9947 
9948  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9949  // beginning of 1st vector as the end of free space.
9950  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9951  {
9952  VMA_ASSERT(!suballocations1st.empty());
9953 
9954  VkDeviceSize resultBaseOffset = 0;
9955  if(!suballocations2nd.empty())
9956  {
9957  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9958  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9959  }
9960 
9961  // Start from offset equal to beginning of free space.
9962  VkDeviceSize resultOffset = resultBaseOffset;
9963 
9964  // Apply VMA_DEBUG_MARGIN at the beginning.
9965  if(VMA_DEBUG_MARGIN > 0)
9966  {
9967  resultOffset += VMA_DEBUG_MARGIN;
9968  }
9969 
9970  // Apply alignment.
9971  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9972 
9973  // Check previous suballocations for BufferImageGranularity conflicts.
9974  // Make bigger alignment if necessary.
9975  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9976  {
9977  bool bufferImageGranularityConflict = false;
9978  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
9979  {
9980  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
9981  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9982  {
9983  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9984  {
9985  bufferImageGranularityConflict = true;
9986  break;
9987  }
9988  }
9989  else
9990  // Already on previous page.
9991  break;
9992  }
9993  if(bufferImageGranularityConflict)
9994  {
9995  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9996  }
9997  }
9998 
9999  pAllocationRequest->itemsToMakeLostCount = 0;
10000  pAllocationRequest->sumItemSize = 0;
10001  size_t index1st = m_1stNullItemsBeginCount;
10002 
10003  if(canMakeOtherLost)
10004  {
10005  while(index1st < suballocations1st.size() &&
10006  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
10007  {
10008  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
10009  const VmaSuballocation& suballoc = suballocations1st[index1st];
10010  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
10011  {
10012  // No problem.
10013  }
10014  else
10015  {
10016  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10017  if(suballoc.hAllocation->CanBecomeLost() &&
10018  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10019  {
10020  ++pAllocationRequest->itemsToMakeLostCount;
10021  pAllocationRequest->sumItemSize += suballoc.size;
10022  }
10023  else
10024  {
10025  return false;
10026  }
10027  }
10028  ++index1st;
10029  }
10030 
10031  // Check next suballocations for BufferImageGranularity conflicts.
10032  // If conflict exists, we must mark more allocations lost or fail.
10033  if(bufferImageGranularity > 1)
10034  {
10035  while(index1st < suballocations1st.size())
10036  {
10037  const VmaSuballocation& suballoc = suballocations1st[index1st];
10038  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
10039  {
10040  if(suballoc.hAllocation != VK_NULL_HANDLE)
10041  {
10042  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
10043  if(suballoc.hAllocation->CanBecomeLost() &&
10044  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10045  {
10046  ++pAllocationRequest->itemsToMakeLostCount;
10047  pAllocationRequest->sumItemSize += suballoc.size;
10048  }
10049  else
10050  {
10051  return false;
10052  }
10053  }
10054  }
10055  else
10056  {
10057  // Already on next page.
10058  break;
10059  }
10060  ++index1st;
10061  }
10062  }
10063 
10064  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
10065  if(index1st == suballocations1st.size() &&
10066  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
10067  {
10068  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
10069  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
10070  }
10071  }
10072 
10073  // There is enough free space at the end after alignment.
10074  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
10075  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
10076  {
10077  // Check next suballocations for BufferImageGranularity conflicts.
10078  // If conflict exists, allocation cannot be made here.
10079  if(bufferImageGranularity > 1)
10080  {
10081  for(size_t nextSuballocIndex = index1st;
10082  nextSuballocIndex < suballocations1st.size();
10083  nextSuballocIndex++)
10084  {
10085  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
10086  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10087  {
10088  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10089  {
10090  return false;
10091  }
10092  }
10093  else
10094  {
10095  // Already on next page.
10096  break;
10097  }
10098  }
10099  }
10100 
10101  // All tests passed: Success.
10102  pAllocationRequest->offset = resultOffset;
10103  pAllocationRequest->sumFreeSize =
10104  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
10105  - resultBaseOffset
10106  - pAllocationRequest->sumItemSize;
10107  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
10108  // pAllocationRequest->item, customData unused.
10109  return true;
10110  }
10111  }
10112 
10113  return false;
10114 }
10115 
10116 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10117  uint32_t currentFrameIndex,
10118  uint32_t frameInUseCount,
10119  VmaAllocationRequest* pAllocationRequest)
10120 {
10121  if(pAllocationRequest->itemsToMakeLostCount == 0)
10122  {
10123  return true;
10124  }
10125 
10126  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10127 
10128  // We always start from 1st.
10129  SuballocationVectorType* suballocations = &AccessSuballocations1st();
10130  size_t index = m_1stNullItemsBeginCount;
10131  size_t madeLostCount = 0;
10132  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10133  {
10134  if(index == suballocations->size())
10135  {
10136  index = 0;
10137  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
10138  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10139  {
10140  suballocations = &AccessSuballocations2nd();
10141  }
10142  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
10143  // suballocations continues pointing at AccessSuballocations1st().
10144  VMA_ASSERT(!suballocations->empty());
10145  }
10146  VmaSuballocation& suballoc = (*suballocations)[index];
10147  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10148  {
10149  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10150  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10151  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10152  {
10153  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10154  suballoc.hAllocation = VK_NULL_HANDLE;
10155  m_SumFreeSize += suballoc.size;
10156  if(suballocations == &AccessSuballocations1st())
10157  {
10158  ++m_1stNullItemsMiddleCount;
10159  }
10160  else
10161  {
10162  ++m_2ndNullItemsCount;
10163  }
10164  ++madeLostCount;
10165  }
10166  else
10167  {
10168  return false;
10169  }
10170  }
10171  ++index;
10172  }
10173 
10174  CleanupAfterFree();
10175  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10176 
10177  return true;
10178 }
10179 
10180 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10181 {
10182  uint32_t lostAllocationCount = 0;
10183 
10184  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10185  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10186  {
10187  VmaSuballocation& suballoc = suballocations1st[i];
10188  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10189  suballoc.hAllocation->CanBecomeLost() &&
10190  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10191  {
10192  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10193  suballoc.hAllocation = VK_NULL_HANDLE;
10194  ++m_1stNullItemsMiddleCount;
10195  m_SumFreeSize += suballoc.size;
10196  ++lostAllocationCount;
10197  }
10198  }
10199 
10200  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10201  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10202  {
10203  VmaSuballocation& suballoc = suballocations2nd[i];
10204  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10205  suballoc.hAllocation->CanBecomeLost() &&
10206  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10207  {
10208  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10209  suballoc.hAllocation = VK_NULL_HANDLE;
10210  ++m_2ndNullItemsCount;
10211  m_SumFreeSize += suballoc.size;
10212  ++lostAllocationCount;
10213  }
10214  }
10215 
10216  if(lostAllocationCount)
10217  {
10218  CleanupAfterFree();
10219  }
10220 
10221  return lostAllocationCount;
10222 }
10223 
10224 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10225 {
10226  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10227  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10228  {
10229  const VmaSuballocation& suballoc = suballocations1st[i];
10230  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10231  {
10232  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10233  {
10234  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10235  return VK_ERROR_VALIDATION_FAILED_EXT;
10236  }
10237  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10238  {
10239  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10240  return VK_ERROR_VALIDATION_FAILED_EXT;
10241  }
10242  }
10243  }
10244 
10245  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10246  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10247  {
10248  const VmaSuballocation& suballoc = suballocations2nd[i];
10249  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10250  {
10251  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10252  {
10253  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10254  return VK_ERROR_VALIDATION_FAILED_EXT;
10255  }
10256  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10257  {
10258  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10259  return VK_ERROR_VALIDATION_FAILED_EXT;
10260  }
10261  }
10262  }
10263 
10264  return VK_SUCCESS;
10265 }
10266 
10267 void VmaBlockMetadata_Linear::Alloc(
10268  const VmaAllocationRequest& request,
10269  VmaSuballocationType type,
10270  VkDeviceSize allocSize,
10271  VmaAllocation hAllocation)
10272 {
10273  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10274 
10275  switch(request.type)
10276  {
10277  case VmaAllocationRequestType::UpperAddress:
10278  {
10279  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10280  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10281  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10282  suballocations2nd.push_back(newSuballoc);
10283  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10284  }
10285  break;
10286  case VmaAllocationRequestType::EndOf1st:
10287  {
10288  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10289 
10290  VMA_ASSERT(suballocations1st.empty() ||
10291  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
10292  // Check if it fits before the end of the block.
10293  VMA_ASSERT(request.offset + allocSize <= GetSize());
10294 
10295  suballocations1st.push_back(newSuballoc);
10296  }
10297  break;
10298  case VmaAllocationRequestType::EndOf2nd:
10299  {
10300  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10301  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10302  VMA_ASSERT(!suballocations1st.empty() &&
10303  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
10304  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10305 
10306  switch(m_2ndVectorMode)
10307  {
10308  case SECOND_VECTOR_EMPTY:
10309  // First allocation from second part ring buffer.
10310  VMA_ASSERT(suballocations2nd.empty());
10311  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10312  break;
10313  case SECOND_VECTOR_RING_BUFFER:
10314  // 2-part ring buffer is already started.
10315  VMA_ASSERT(!suballocations2nd.empty());
10316  break;
10317  case SECOND_VECTOR_DOUBLE_STACK:
10318  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10319  break;
10320  default:
10321  VMA_ASSERT(0);
10322  }
10323 
10324  suballocations2nd.push_back(newSuballoc);
10325  }
10326  break;
10327  default:
10328  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10329  }
10330 
10331  m_SumFreeSize -= newSuballoc.size;
10332 }
10333 
10334 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10335 {
10336  FreeAtOffset(allocation->GetOffset());
10337 }
10338 
10339 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10340 {
10341  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10342  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10343 
10344  if(!suballocations1st.empty())
10345  {
10346  // First allocation: Mark it as next empty at the beginning.
10347  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10348  if(firstSuballoc.offset == offset)
10349  {
10350  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10351  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10352  m_SumFreeSize += firstSuballoc.size;
10353  ++m_1stNullItemsBeginCount;
10354  CleanupAfterFree();
10355  return;
10356  }
10357  }
10358 
10359  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10360  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10361  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10362  {
10363  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10364  if(lastSuballoc.offset == offset)
10365  {
10366  m_SumFreeSize += lastSuballoc.size;
10367  suballocations2nd.pop_back();
10368  CleanupAfterFree();
10369  return;
10370  }
10371  }
10372  // Last allocation in 1st vector.
10373  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10374  {
10375  VmaSuballocation& lastSuballoc = suballocations1st.back();
10376  if(lastSuballoc.offset == offset)
10377  {
10378  m_SumFreeSize += lastSuballoc.size;
10379  suballocations1st.pop_back();
10380  CleanupAfterFree();
10381  return;
10382  }
10383  }
10384 
10385  // Item from the middle of 1st vector.
10386  {
10387  VmaSuballocation refSuballoc;
10388  refSuballoc.offset = offset;
10389  // Rest of members stays uninitialized intentionally for better performance.
10390  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
10391  suballocations1st.begin() + m_1stNullItemsBeginCount,
10392  suballocations1st.end(),
10393  refSuballoc);
10394  if(it != suballocations1st.end())
10395  {
10396  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10397  it->hAllocation = VK_NULL_HANDLE;
10398  ++m_1stNullItemsMiddleCount;
10399  m_SumFreeSize += it->size;
10400  CleanupAfterFree();
10401  return;
10402  }
10403  }
10404 
10405  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10406  {
10407  // Item from the middle of 2nd vector.
10408  VmaSuballocation refSuballoc;
10409  refSuballoc.offset = offset;
10410  // Rest of members stays uninitialized intentionally for better performance.
10411  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10412  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
10413  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
10414  if(it != suballocations2nd.end())
10415  {
10416  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10417  it->hAllocation = VK_NULL_HANDLE;
10418  ++m_2ndNullItemsCount;
10419  m_SumFreeSize += it->size;
10420  CleanupAfterFree();
10421  return;
10422  }
10423  }
10424 
10425  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10426 }
10427 
10428 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10429 {
10430  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10431  const size_t suballocCount = AccessSuballocations1st().size();
10432  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10433 }
10434 
10435 void VmaBlockMetadata_Linear::CleanupAfterFree()
10436 {
10437  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10438  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10439 
10440  if(IsEmpty())
10441  {
10442  suballocations1st.clear();
10443  suballocations2nd.clear();
10444  m_1stNullItemsBeginCount = 0;
10445  m_1stNullItemsMiddleCount = 0;
10446  m_2ndNullItemsCount = 0;
10447  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10448  }
10449  else
10450  {
10451  const size_t suballoc1stCount = suballocations1st.size();
10452  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10453  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10454 
10455  // Find more null items at the beginning of 1st vector.
10456  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10457  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10458  {
10459  ++m_1stNullItemsBeginCount;
10460  --m_1stNullItemsMiddleCount;
10461  }
10462 
10463  // Find more null items at the end of 1st vector.
10464  while(m_1stNullItemsMiddleCount > 0 &&
10465  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10466  {
10467  --m_1stNullItemsMiddleCount;
10468  suballocations1st.pop_back();
10469  }
10470 
10471  // Find more null items at the end of 2nd vector.
10472  while(m_2ndNullItemsCount > 0 &&
10473  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10474  {
10475  --m_2ndNullItemsCount;
10476  suballocations2nd.pop_back();
10477  }
10478 
10479  // Find more null items at the beginning of 2nd vector.
10480  while(m_2ndNullItemsCount > 0 &&
10481  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
10482  {
10483  --m_2ndNullItemsCount;
10484  suballocations2nd.remove(0);
10485  }
10486 
10487  if(ShouldCompact1st())
10488  {
10489  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10490  size_t srcIndex = m_1stNullItemsBeginCount;
10491  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10492  {
10493  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10494  {
10495  ++srcIndex;
10496  }
10497  if(dstIndex != srcIndex)
10498  {
10499  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10500  }
10501  ++srcIndex;
10502  }
10503  suballocations1st.resize(nonNullItemCount);
10504  m_1stNullItemsBeginCount = 0;
10505  m_1stNullItemsMiddleCount = 0;
10506  }
10507 
10508  // 2nd vector became empty.
10509  if(suballocations2nd.empty())
10510  {
10511  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10512  }
10513 
10514  // 1st vector became empty.
10515  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10516  {
10517  suballocations1st.clear();
10518  m_1stNullItemsBeginCount = 0;
10519 
10520  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10521  {
10522  // Swap 1st with 2nd. Now 2nd is empty.
10523  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10524  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10525  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10526  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10527  {
10528  ++m_1stNullItemsBeginCount;
10529  --m_1stNullItemsMiddleCount;
10530  }
10531  m_2ndNullItemsCount = 0;
10532  m_1stVectorIndex ^= 1;
10533  }
10534  }
10535  }
10536 
10537  VMA_HEAVY_ASSERT(Validate());
10538 }
10539 
10540 
10542 // class VmaBlockMetadata_Buddy
10543 
10544 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10545  VmaBlockMetadata(hAllocator),
10546  m_Root(VMA_NULL),
10547  m_AllocationCount(0),
10548  m_FreeCount(1),
10549  m_SumFreeSize(0)
10550 {
10551  memset(m_FreeList, 0, sizeof(m_FreeList));
10552 }
10553 
10554 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10555 {
10556  DeleteNode(m_Root);
10557 }
10558 
10559 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10560 {
10561  VmaBlockMetadata::Init(size);
10562 
10563  m_UsableSize = VmaPrevPow2(size);
10564  m_SumFreeSize = m_UsableSize;
10565 
10566  // Calculate m_LevelCount.
10567  m_LevelCount = 1;
10568  while(m_LevelCount < MAX_LEVELS &&
10569  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10570  {
10571  ++m_LevelCount;
10572  }
10573 
10574  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10575  rootNode->offset = 0;
10576  rootNode->type = Node::TYPE_FREE;
10577  rootNode->parent = VMA_NULL;
10578  rootNode->buddy = VMA_NULL;
10579 
10580  m_Root = rootNode;
10581  AddToFreeListFront(0, rootNode);
10582 }
10583 
10584 bool VmaBlockMetadata_Buddy::Validate() const
10585 {
10586  // Validate tree.
10587  ValidationContext ctx;
10588  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10589  {
10590  VMA_VALIDATE(false && "ValidateNode failed.");
10591  }
10592  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10593  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10594 
10595  // Validate free node lists.
10596  for(uint32_t level = 0; level < m_LevelCount; ++level)
10597  {
10598  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10599  m_FreeList[level].front->free.prev == VMA_NULL);
10600 
10601  for(Node* node = m_FreeList[level].front;
10602  node != VMA_NULL;
10603  node = node->free.next)
10604  {
10605  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10606 
10607  if(node->free.next == VMA_NULL)
10608  {
10609  VMA_VALIDATE(m_FreeList[level].back == node);
10610  }
10611  else
10612  {
10613  VMA_VALIDATE(node->free.next->free.prev == node);
10614  }
10615  }
10616  }
10617 
10618  // Validate that free lists ar higher levels are empty.
10619  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10620  {
10621  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10622  }
10623 
10624  return true;
10625 }
10626 
10627 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10628 {
10629  for(uint32_t level = 0; level < m_LevelCount; ++level)
10630  {
10631  if(m_FreeList[level].front != VMA_NULL)
10632  {
10633  return LevelToNodeSize(level);
10634  }
10635  }
10636  return 0;
10637 }
10638 
10639 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10640 {
10641  const VkDeviceSize unusableSize = GetUnusableSize();
10642 
10643  outInfo.blockCount = 1;
10644 
10645  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10646  outInfo.usedBytes = outInfo.unusedBytes = 0;
10647 
10648  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10649  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10650  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10651 
10652  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10653 
10654  if(unusableSize > 0)
10655  {
10656  ++outInfo.unusedRangeCount;
10657  outInfo.unusedBytes += unusableSize;
10658  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10659  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10660  }
10661 }
10662 
10663 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10664 {
10665  const VkDeviceSize unusableSize = GetUnusableSize();
10666 
10667  inoutStats.size += GetSize();
10668  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10669  inoutStats.allocationCount += m_AllocationCount;
10670  inoutStats.unusedRangeCount += m_FreeCount;
10671  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10672 
10673  if(unusableSize > 0)
10674  {
10675  ++inoutStats.unusedRangeCount;
10676  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10677  }
10678 }
10679 
10680 #if VMA_STATS_STRING_ENABLED
10681 
10682 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10683 {
10684  // TODO optimize
10685  VmaStatInfo stat;
10686  CalcAllocationStatInfo(stat);
10687 
10688  PrintDetailedMap_Begin(
10689  json,
10690  stat.unusedBytes,
10691  stat.allocationCount,
10692  stat.unusedRangeCount);
10693 
10694  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10695 
10696  const VkDeviceSize unusableSize = GetUnusableSize();
10697  if(unusableSize > 0)
10698  {
10699  PrintDetailedMap_UnusedRange(json,
10700  m_UsableSize, // offset
10701  unusableSize); // size
10702  }
10703 
10704  PrintDetailedMap_End(json);
10705 }
10706 
10707 #endif // #if VMA_STATS_STRING_ENABLED
10708 
10709 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10710  uint32_t currentFrameIndex,
10711  uint32_t frameInUseCount,
10712  VkDeviceSize bufferImageGranularity,
10713  VkDeviceSize allocSize,
10714  VkDeviceSize allocAlignment,
10715  bool upperAddress,
10716  VmaSuballocationType allocType,
10717  bool canMakeOtherLost,
10718  uint32_t strategy,
10719  VmaAllocationRequest* pAllocationRequest)
10720 {
10721  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10722 
10723  // Simple way to respect bufferImageGranularity. May be optimized some day.
10724  // Whenever it might be an OPTIMAL image...
10725  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10726  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10727  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10728  {
10729  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10730  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10731  }
10732 
10733  if(allocSize > m_UsableSize)
10734  {
10735  return false;
10736  }
10737 
10738  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10739  for(uint32_t level = targetLevel + 1; level--; )
10740  {
10741  for(Node* freeNode = m_FreeList[level].front;
10742  freeNode != VMA_NULL;
10743  freeNode = freeNode->free.next)
10744  {
10745  if(freeNode->offset % allocAlignment == 0)
10746  {
10747  pAllocationRequest->type = VmaAllocationRequestType::Normal;
10748  pAllocationRequest->offset = freeNode->offset;
10749  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10750  pAllocationRequest->sumItemSize = 0;
10751  pAllocationRequest->itemsToMakeLostCount = 0;
10752  pAllocationRequest->customData = (void*)(uintptr_t)level;
10753  return true;
10754  }
10755  }
10756  }
10757 
10758  return false;
10759 }
10760 
10761 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10762  uint32_t currentFrameIndex,
10763  uint32_t frameInUseCount,
10764  VmaAllocationRequest* pAllocationRequest)
10765 {
10766  /*
10767  Lost allocations are not supported in buddy allocator at the moment.
10768  Support might be added in the future.
10769  */
10770  return pAllocationRequest->itemsToMakeLostCount == 0;
10771 }
10772 
10773 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10774 {
10775  /*
10776  Lost allocations are not supported in buddy allocator at the moment.
10777  Support might be added in the future.
10778  */
10779  return 0;
10780 }
10781 
10782 void VmaBlockMetadata_Buddy::Alloc(
10783  const VmaAllocationRequest& request,
10784  VmaSuballocationType type,
10785  VkDeviceSize allocSize,
10786  VmaAllocation hAllocation)
10787 {
10788  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
10789 
10790  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10791  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10792 
10793  Node* currNode = m_FreeList[currLevel].front;
10794  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10795  while(currNode->offset != request.offset)
10796  {
10797  currNode = currNode->free.next;
10798  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10799  }
10800 
10801  // Go down, splitting free nodes.
10802  while(currLevel < targetLevel)
10803  {
10804  // currNode is already first free node at currLevel.
10805  // Remove it from list of free nodes at this currLevel.
10806  RemoveFromFreeList(currLevel, currNode);
10807 
10808  const uint32_t childrenLevel = currLevel + 1;
10809 
10810  // Create two free sub-nodes.
10811  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10812  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10813 
10814  leftChild->offset = currNode->offset;
10815  leftChild->type = Node::TYPE_FREE;
10816  leftChild->parent = currNode;
10817  leftChild->buddy = rightChild;
10818 
10819  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10820  rightChild->type = Node::TYPE_FREE;
10821  rightChild->parent = currNode;
10822  rightChild->buddy = leftChild;
10823 
10824  // Convert current currNode to split type.
10825  currNode->type = Node::TYPE_SPLIT;
10826  currNode->split.leftChild = leftChild;
10827 
10828  // Add child nodes to free list. Order is important!
10829  AddToFreeListFront(childrenLevel, rightChild);
10830  AddToFreeListFront(childrenLevel, leftChild);
10831 
10832  ++m_FreeCount;
10833  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
10834  ++currLevel;
10835  currNode = m_FreeList[currLevel].front;
10836 
10837  /*
10838  We can be sure that currNode, as left child of node previously split,
10839  also fullfills the alignment requirement.
10840  */
10841  }
10842 
10843  // Remove from free list.
10844  VMA_ASSERT(currLevel == targetLevel &&
10845  currNode != VMA_NULL &&
10846  currNode->type == Node::TYPE_FREE);
10847  RemoveFromFreeList(currLevel, currNode);
10848 
10849  // Convert to allocation node.
10850  currNode->type = Node::TYPE_ALLOCATION;
10851  currNode->allocation.alloc = hAllocation;
10852 
10853  ++m_AllocationCount;
10854  --m_FreeCount;
10855  m_SumFreeSize -= allocSize;
10856 }
10857 
10858 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10859 {
10860  if(node->type == Node::TYPE_SPLIT)
10861  {
10862  DeleteNode(node->split.leftChild->buddy);
10863  DeleteNode(node->split.leftChild);
10864  }
10865 
10866  vma_delete(GetAllocationCallbacks(), node);
10867 }
10868 
10869 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
10870 {
10871  VMA_VALIDATE(level < m_LevelCount);
10872  VMA_VALIDATE(curr->parent == parent);
10873  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10874  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10875  switch(curr->type)
10876  {
10877  case Node::TYPE_FREE:
10878  // curr->free.prev, next are validated separately.
10879  ctx.calculatedSumFreeSize += levelNodeSize;
10880  ++ctx.calculatedFreeCount;
10881  break;
10882  case Node::TYPE_ALLOCATION:
10883  ++ctx.calculatedAllocationCount;
10884  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10885  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10886  break;
10887  case Node::TYPE_SPLIT:
10888  {
10889  const uint32_t childrenLevel = level + 1;
10890  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10891  const Node* const leftChild = curr->split.leftChild;
10892  VMA_VALIDATE(leftChild != VMA_NULL);
10893  VMA_VALIDATE(leftChild->offset == curr->offset);
10894  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10895  {
10896  VMA_VALIDATE(false && "ValidateNode for left child failed.");
10897  }
10898  const Node* const rightChild = leftChild->buddy;
10899  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10900  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10901  {
10902  VMA_VALIDATE(false && "ValidateNode for right child failed.");
10903  }
10904  }
10905  break;
10906  default:
10907  return false;
10908  }
10909 
10910  return true;
10911 }
10912 
10913 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
10914 {
10915  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
10916  uint32_t level = 0;
10917  VkDeviceSize currLevelNodeSize = m_UsableSize;
10918  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10919  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10920  {
10921  ++level;
10922  currLevelNodeSize = nextLevelNodeSize;
10923  nextLevelNodeSize = currLevelNodeSize >> 1;
10924  }
10925  return level;
10926 }
10927 
10928 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
10929 {
10930  // Find node and level.
10931  Node* node = m_Root;
10932  VkDeviceSize nodeOffset = 0;
10933  uint32_t level = 0;
10934  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
10935  while(node->type == Node::TYPE_SPLIT)
10936  {
10937  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
10938  if(offset < nodeOffset + nextLevelSize)
10939  {
10940  node = node->split.leftChild;
10941  }
10942  else
10943  {
10944  node = node->split.leftChild->buddy;
10945  nodeOffset += nextLevelSize;
10946  }
10947  ++level;
10948  levelNodeSize = nextLevelSize;
10949  }
10950 
10951  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
10952  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
10953 
10954  ++m_FreeCount;
10955  --m_AllocationCount;
10956  m_SumFreeSize += alloc->GetSize();
10957 
10958  node->type = Node::TYPE_FREE;
10959 
10960  // Join free nodes if possible.
10961  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
10962  {
10963  RemoveFromFreeList(level, node->buddy);
10964  Node* const parent = node->parent;
10965 
10966  vma_delete(GetAllocationCallbacks(), node->buddy);
10967  vma_delete(GetAllocationCallbacks(), node);
10968  parent->type = Node::TYPE_FREE;
10969 
10970  node = parent;
10971  --level;
10972  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
10973  --m_FreeCount;
10974  }
10975 
10976  AddToFreeListFront(level, node);
10977 }
10978 
10979 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
10980 {
10981  switch(node->type)
10982  {
10983  case Node::TYPE_FREE:
10984  ++outInfo.unusedRangeCount;
10985  outInfo.unusedBytes += levelNodeSize;
10986  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
10987  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
10988  break;
10989  case Node::TYPE_ALLOCATION:
10990  {
10991  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10992  ++outInfo.allocationCount;
10993  outInfo.usedBytes += allocSize;
10994  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
10995  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
10996 
10997  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
10998  if(unusedRangeSize > 0)
10999  {
11000  ++outInfo.unusedRangeCount;
11001  outInfo.unusedBytes += unusedRangeSize;
11002  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
11003  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
11004  }
11005  }
11006  break;
11007  case Node::TYPE_SPLIT:
11008  {
11009  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11010  const Node* const leftChild = node->split.leftChild;
11011  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
11012  const Node* const rightChild = leftChild->buddy;
11013  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
11014  }
11015  break;
11016  default:
11017  VMA_ASSERT(0);
11018  }
11019 }
11020 
11021 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
11022 {
11023  VMA_ASSERT(node->type == Node::TYPE_FREE);
11024 
11025  // List is empty.
11026  Node* const frontNode = m_FreeList[level].front;
11027  if(frontNode == VMA_NULL)
11028  {
11029  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
11030  node->free.prev = node->free.next = VMA_NULL;
11031  m_FreeList[level].front = m_FreeList[level].back = node;
11032  }
11033  else
11034  {
11035  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
11036  node->free.prev = VMA_NULL;
11037  node->free.next = frontNode;
11038  frontNode->free.prev = node;
11039  m_FreeList[level].front = node;
11040  }
11041 }
11042 
11043 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
11044 {
11045  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
11046 
11047  // It is at the front.
11048  if(node->free.prev == VMA_NULL)
11049  {
11050  VMA_ASSERT(m_FreeList[level].front == node);
11051  m_FreeList[level].front = node->free.next;
11052  }
11053  else
11054  {
11055  Node* const prevFreeNode = node->free.prev;
11056  VMA_ASSERT(prevFreeNode->free.next == node);
11057  prevFreeNode->free.next = node->free.next;
11058  }
11059 
11060  // It is at the back.
11061  if(node->free.next == VMA_NULL)
11062  {
11063  VMA_ASSERT(m_FreeList[level].back == node);
11064  m_FreeList[level].back = node->free.prev;
11065  }
11066  else
11067  {
11068  Node* const nextFreeNode = node->free.next;
11069  VMA_ASSERT(nextFreeNode->free.prev == node);
11070  nextFreeNode->free.prev = node->free.prev;
11071  }
11072 }
11073 
11074 #if VMA_STATS_STRING_ENABLED
11075 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
11076 {
11077  switch(node->type)
11078  {
11079  case Node::TYPE_FREE:
11080  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
11081  break;
11082  case Node::TYPE_ALLOCATION:
11083  {
11084  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
11085  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11086  if(allocSize < levelNodeSize)
11087  {
11088  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
11089  }
11090  }
11091  break;
11092  case Node::TYPE_SPLIT:
11093  {
11094  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11095  const Node* const leftChild = node->split.leftChild;
11096  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
11097  const Node* const rightChild = leftChild->buddy;
11098  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
11099  }
11100  break;
11101  default:
11102  VMA_ASSERT(0);
11103  }
11104 }
11105 #endif // #if VMA_STATS_STRING_ENABLED
11106 
11107 
11109 // class VmaDeviceMemoryBlock
11110 
11111 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
11112  m_pMetadata(VMA_NULL),
11113  m_MemoryTypeIndex(UINT32_MAX),
11114  m_Id(0),
11115  m_hMemory(VK_NULL_HANDLE),
11116  m_MapCount(0),
11117  m_pMappedData(VMA_NULL)
11118 {
11119 }
11120 
11121 void VmaDeviceMemoryBlock::Init(
11122  VmaAllocator hAllocator,
11123  VmaPool hParentPool,
11124  uint32_t newMemoryTypeIndex,
11125  VkDeviceMemory newMemory,
11126  VkDeviceSize newSize,
11127  uint32_t id,
11128  uint32_t algorithm)
11129 {
11130  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11131 
11132  m_hParentPool = hParentPool;
11133  m_MemoryTypeIndex = newMemoryTypeIndex;
11134  m_Id = id;
11135  m_hMemory = newMemory;
11136 
11137  switch(algorithm)
11138  {
11140  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
11141  break;
11143  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11144  break;
11145  default:
11146  VMA_ASSERT(0);
11147  // Fall-through.
11148  case 0:
11149  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11150  }
11151  m_pMetadata->Init(newSize);
11152 }
11153 
11154 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11155 {
11156  // This is the most important assert in the entire library.
11157  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11158  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11159 
11160  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11161  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11162  m_hMemory = VK_NULL_HANDLE;
11163 
11164  vma_delete(allocator, m_pMetadata);
11165  m_pMetadata = VMA_NULL;
11166 }
11167 
11168 bool VmaDeviceMemoryBlock::Validate() const
11169 {
11170  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11171  (m_pMetadata->GetSize() != 0));
11172 
11173  return m_pMetadata->Validate();
11174 }
11175 
11176 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11177 {
11178  void* pData = nullptr;
11179  VkResult res = Map(hAllocator, 1, &pData);
11180  if(res != VK_SUCCESS)
11181  {
11182  return res;
11183  }
11184 
11185  res = m_pMetadata->CheckCorruption(pData);
11186 
11187  Unmap(hAllocator, 1);
11188 
11189  return res;
11190 }
11191 
11192 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11193 {
11194  if(count == 0)
11195  {
11196  return VK_SUCCESS;
11197  }
11198 
11199  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11200  if(m_MapCount != 0)
11201  {
11202  m_MapCount += count;
11203  VMA_ASSERT(m_pMappedData != VMA_NULL);
11204  if(ppData != VMA_NULL)
11205  {
11206  *ppData = m_pMappedData;
11207  }
11208  return VK_SUCCESS;
11209  }
11210  else
11211  {
11212  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11213  hAllocator->m_hDevice,
11214  m_hMemory,
11215  0, // offset
11216  VK_WHOLE_SIZE,
11217  0, // flags
11218  &m_pMappedData);
11219  if(result == VK_SUCCESS)
11220  {
11221  if(ppData != VMA_NULL)
11222  {
11223  *ppData = m_pMappedData;
11224  }
11225  m_MapCount = count;
11226  }
11227  return result;
11228  }
11229 }
11230 
11231 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11232 {
11233  if(count == 0)
11234  {
11235  return;
11236  }
11237 
11238  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11239  if(m_MapCount >= count)
11240  {
11241  m_MapCount -= count;
11242  if(m_MapCount == 0)
11243  {
11244  m_pMappedData = VMA_NULL;
11245  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11246  }
11247  }
11248  else
11249  {
11250  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11251  }
11252 }
11253 
11254 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11255 {
11256  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11257  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11258 
11259  void* pData;
11260  VkResult res = Map(hAllocator, 1, &pData);
11261  if(res != VK_SUCCESS)
11262  {
11263  return res;
11264  }
11265 
11266  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11267  VmaWriteMagicValue(pData, allocOffset + allocSize);
11268 
11269  Unmap(hAllocator, 1);
11270 
11271  return VK_SUCCESS;
11272 }
11273 
11274 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11275 {
11276  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11277  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11278 
11279  void* pData;
11280  VkResult res = Map(hAllocator, 1, &pData);
11281  if(res != VK_SUCCESS)
11282  {
11283  return res;
11284  }
11285 
11286  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11287  {
11288  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11289  }
11290  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11291  {
11292  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11293  }
11294 
11295  Unmap(hAllocator, 1);
11296 
11297  return VK_SUCCESS;
11298 }
11299 
11300 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11301  const VmaAllocator hAllocator,
11302  const VmaAllocation hAllocation,
11303  VkBuffer hBuffer)
11304 {
11305  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11306  hAllocation->GetBlock() == this);
11307  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11308  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11309  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
11310  hAllocator->m_hDevice,
11311  hBuffer,
11312  m_hMemory,
11313  hAllocation->GetOffset());
11314 }
11315 
11316 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11317  const VmaAllocator hAllocator,
11318  const VmaAllocation hAllocation,
11319  VkImage hImage)
11320 {
11321  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11322  hAllocation->GetBlock() == this);
11323  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11324  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11325  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
11326  hAllocator->m_hDevice,
11327  hImage,
11328  m_hMemory,
11329  hAllocation->GetOffset());
11330 }
11331 
11332 static void InitStatInfo(VmaStatInfo& outInfo)
11333 {
11334  memset(&outInfo, 0, sizeof(outInfo));
11335  outInfo.allocationSizeMin = UINT64_MAX;
11336  outInfo.unusedRangeSizeMin = UINT64_MAX;
11337 }
11338 
11339 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11340 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11341 {
11342  inoutInfo.blockCount += srcInfo.blockCount;
11343  inoutInfo.allocationCount += srcInfo.allocationCount;
11344  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11345  inoutInfo.usedBytes += srcInfo.usedBytes;
11346  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11347  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11348  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11349  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11350  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11351 }
11352 
11353 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11354 {
11355  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11356  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11357  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11358  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11359 }
11360 
11361 VmaPool_T::VmaPool_T(
11362  VmaAllocator hAllocator,
11363  const VmaPoolCreateInfo& createInfo,
11364  VkDeviceSize preferredBlockSize) :
11365  m_BlockVector(
11366  hAllocator,
11367  this, // hParentPool
11368  createInfo.memoryTypeIndex,
11369  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11370  createInfo.minBlockCount,
11371  createInfo.maxBlockCount,
11372  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11373  createInfo.frameInUseCount,
11374  true, // isCustomPool
11375  createInfo.blockSize != 0, // explicitBlockSize
11376  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11377  m_Id(0)
11378 {
11379 }
11380 
11381 VmaPool_T::~VmaPool_T()
11382 {
11383 }
11384 
11385 #if VMA_STATS_STRING_ENABLED
11386 
11387 #endif // #if VMA_STATS_STRING_ENABLED
11388 
11389 VmaBlockVector::VmaBlockVector(
11390  VmaAllocator hAllocator,
11391  VmaPool hParentPool,
11392  uint32_t memoryTypeIndex,
11393  VkDeviceSize preferredBlockSize,
11394  size_t minBlockCount,
11395  size_t maxBlockCount,
11396  VkDeviceSize bufferImageGranularity,
11397  uint32_t frameInUseCount,
11398  bool isCustomPool,
11399  bool explicitBlockSize,
11400  uint32_t algorithm) :
11401  m_hAllocator(hAllocator),
11402  m_hParentPool(hParentPool),
11403  m_MemoryTypeIndex(memoryTypeIndex),
11404  m_PreferredBlockSize(preferredBlockSize),
11405  m_MinBlockCount(minBlockCount),
11406  m_MaxBlockCount(maxBlockCount),
11407  m_BufferImageGranularity(bufferImageGranularity),
11408  m_FrameInUseCount(frameInUseCount),
11409  m_IsCustomPool(isCustomPool),
11410  m_ExplicitBlockSize(explicitBlockSize),
11411  m_Algorithm(algorithm),
11412  m_HasEmptyBlock(false),
11413  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11414  m_NextBlockId(0)
11415 {
11416 }
11417 
11418 VmaBlockVector::~VmaBlockVector()
11419 {
11420  for(size_t i = m_Blocks.size(); i--; )
11421  {
11422  m_Blocks[i]->Destroy(m_hAllocator);
11423  vma_delete(m_hAllocator, m_Blocks[i]);
11424  }
11425 }
11426 
11427 VkResult VmaBlockVector::CreateMinBlocks()
11428 {
11429  for(size_t i = 0; i < m_MinBlockCount; ++i)
11430  {
11431  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11432  if(res != VK_SUCCESS)
11433  {
11434  return res;
11435  }
11436  }
11437  return VK_SUCCESS;
11438 }
11439 
11440 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11441 {
11442  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11443 
11444  const size_t blockCount = m_Blocks.size();
11445 
11446  pStats->size = 0;
11447  pStats->unusedSize = 0;
11448  pStats->allocationCount = 0;
11449  pStats->unusedRangeCount = 0;
11450  pStats->unusedRangeSizeMax = 0;
11451  pStats->blockCount = blockCount;
11452 
11453  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11454  {
11455  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11456  VMA_ASSERT(pBlock);
11457  VMA_HEAVY_ASSERT(pBlock->Validate());
11458  pBlock->m_pMetadata->AddPoolStats(*pStats);
11459  }
11460 }
11461 
11462 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11463 {
11464  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11465  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11466  (VMA_DEBUG_MARGIN > 0) &&
11467  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
11468  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11469 }
11470 
11471 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11472 
11473 VkResult VmaBlockVector::Allocate(
11474  uint32_t currentFrameIndex,
11475  VkDeviceSize size,
11476  VkDeviceSize alignment,
11477  const VmaAllocationCreateInfo& createInfo,
11478  VmaSuballocationType suballocType,
11479  size_t allocationCount,
11480  VmaAllocation* pAllocations)
11481 {
11482  size_t allocIndex;
11483  VkResult res = VK_SUCCESS;
11484 
11485  if(IsCorruptionDetectionEnabled())
11486  {
11487  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11488  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11489  }
11490 
11491  {
11492  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11493  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11494  {
11495  res = AllocatePage(
11496  currentFrameIndex,
11497  size,
11498  alignment,
11499  createInfo,
11500  suballocType,
11501  pAllocations + allocIndex);
11502  if(res != VK_SUCCESS)
11503  {
11504  break;
11505  }
11506  }
11507  }
11508 
11509  if(res != VK_SUCCESS)
11510  {
11511  // Free all already created allocations.
11512  while(allocIndex--)
11513  {
11514  Free(pAllocations[allocIndex]);
11515  }
11516  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11517  }
11518 
11519  return res;
11520 }
11521 
11522 VkResult VmaBlockVector::AllocatePage(
11523  uint32_t currentFrameIndex,
11524  VkDeviceSize size,
11525  VkDeviceSize alignment,
11526  const VmaAllocationCreateInfo& createInfo,
11527  VmaSuballocationType suballocType,
11528  VmaAllocation* pAllocation)
11529 {
11530  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11531  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11532  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11533  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11534  const bool canCreateNewBlock =
11535  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11536  (m_Blocks.size() < m_MaxBlockCount);
11537  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11538 
11539  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11540  // Which in turn is available only when maxBlockCount = 1.
11541  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11542  {
11543  canMakeOtherLost = false;
11544  }
11545 
11546  // Upper address can only be used with linear allocator and within single memory block.
11547  if(isUpperAddress &&
11548  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11549  {
11550  return VK_ERROR_FEATURE_NOT_PRESENT;
11551  }
11552 
11553  // Validate strategy.
11554  switch(strategy)
11555  {
11556  case 0:
11558  break;
11562  break;
11563  default:
11564  return VK_ERROR_FEATURE_NOT_PRESENT;
11565  }
11566 
11567  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11568  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11569  {
11570  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11571  }
11572 
11573  /*
11574  Under certain condition, this whole section can be skipped for optimization, so
11575  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11576  e.g. for custom pools with linear algorithm.
11577  */
11578  if(!canMakeOtherLost || canCreateNewBlock)
11579  {
11580  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11581  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11583 
11584  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11585  {
11586  // Use only last block.
11587  if(!m_Blocks.empty())
11588  {
11589  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11590  VMA_ASSERT(pCurrBlock);
11591  VkResult res = AllocateFromBlock(
11592  pCurrBlock,
11593  currentFrameIndex,
11594  size,
11595  alignment,
11596  allocFlagsCopy,
11597  createInfo.pUserData,
11598  suballocType,
11599  strategy,
11600  pAllocation);
11601  if(res == VK_SUCCESS)
11602  {
11603  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11604  return VK_SUCCESS;
11605  }
11606  }
11607  }
11608  else
11609  {
11611  {
11612  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11613  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11614  {
11615  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11616  VMA_ASSERT(pCurrBlock);
11617  VkResult res = AllocateFromBlock(
11618  pCurrBlock,
11619  currentFrameIndex,
11620  size,
11621  alignment,
11622  allocFlagsCopy,
11623  createInfo.pUserData,
11624  suballocType,
11625  strategy,
11626  pAllocation);
11627  if(res == VK_SUCCESS)
11628  {
11629  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11630  return VK_SUCCESS;
11631  }
11632  }
11633  }
11634  else // WORST_FIT, FIRST_FIT
11635  {
11636  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11637  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11638  {
11639  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11640  VMA_ASSERT(pCurrBlock);
11641  VkResult res = AllocateFromBlock(
11642  pCurrBlock,
11643  currentFrameIndex,
11644  size,
11645  alignment,
11646  allocFlagsCopy,
11647  createInfo.pUserData,
11648  suballocType,
11649  strategy,
11650  pAllocation);
11651  if(res == VK_SUCCESS)
11652  {
11653  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11654  return VK_SUCCESS;
11655  }
11656  }
11657  }
11658  }
11659 
11660  // 2. Try to create new block.
11661  if(canCreateNewBlock)
11662  {
11663  // Calculate optimal size for new block.
11664  VkDeviceSize newBlockSize = m_PreferredBlockSize;
11665  uint32_t newBlockSizeShift = 0;
11666  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11667 
11668  if(!m_ExplicitBlockSize)
11669  {
11670  // Allocate 1/8, 1/4, 1/2 as first blocks.
11671  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11672  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11673  {
11674  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11675  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11676  {
11677  newBlockSize = smallerNewBlockSize;
11678  ++newBlockSizeShift;
11679  }
11680  else
11681  {
11682  break;
11683  }
11684  }
11685  }
11686 
11687  size_t newBlockIndex = 0;
11688  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11689  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11690  if(!m_ExplicitBlockSize)
11691  {
11692  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11693  {
11694  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11695  if(smallerNewBlockSize >= size)
11696  {
11697  newBlockSize = smallerNewBlockSize;
11698  ++newBlockSizeShift;
11699  res = CreateBlock(newBlockSize, &newBlockIndex);
11700  }
11701  else
11702  {
11703  break;
11704  }
11705  }
11706  }
11707 
11708  if(res == VK_SUCCESS)
11709  {
11710  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11711  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11712 
11713  res = AllocateFromBlock(
11714  pBlock,
11715  currentFrameIndex,
11716  size,
11717  alignment,
11718  allocFlagsCopy,
11719  createInfo.pUserData,
11720  suballocType,
11721  strategy,
11722  pAllocation);
11723  if(res == VK_SUCCESS)
11724  {
11725  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
11726  return VK_SUCCESS;
11727  }
11728  else
11729  {
11730  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11731  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11732  }
11733  }
11734  }
11735  }
11736 
11737  // 3. Try to allocate from existing blocks with making other allocations lost.
11738  if(canMakeOtherLost)
11739  {
11740  uint32_t tryIndex = 0;
11741  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11742  {
11743  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11744  VmaAllocationRequest bestRequest = {};
11745  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11746 
11747  // 1. Search existing allocations.
11749  {
11750  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11751  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11752  {
11753  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11754  VMA_ASSERT(pCurrBlock);
11755  VmaAllocationRequest currRequest = {};
11756  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11757  currentFrameIndex,
11758  m_FrameInUseCount,
11759  m_BufferImageGranularity,
11760  size,
11761  alignment,
11762  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11763  suballocType,
11764  canMakeOtherLost,
11765  strategy,
11766  &currRequest))
11767  {
11768  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11769  if(pBestRequestBlock == VMA_NULL ||
11770  currRequestCost < bestRequestCost)
11771  {
11772  pBestRequestBlock = pCurrBlock;
11773  bestRequest = currRequest;
11774  bestRequestCost = currRequestCost;
11775 
11776  if(bestRequestCost == 0)
11777  {
11778  break;
11779  }
11780  }
11781  }
11782  }
11783  }
11784  else // WORST_FIT, FIRST_FIT
11785  {
11786  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11787  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11788  {
11789  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11790  VMA_ASSERT(pCurrBlock);
11791  VmaAllocationRequest currRequest = {};
11792  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11793  currentFrameIndex,
11794  m_FrameInUseCount,
11795  m_BufferImageGranularity,
11796  size,
11797  alignment,
11798  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11799  suballocType,
11800  canMakeOtherLost,
11801  strategy,
11802  &currRequest))
11803  {
11804  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11805  if(pBestRequestBlock == VMA_NULL ||
11806  currRequestCost < bestRequestCost ||
11808  {
11809  pBestRequestBlock = pCurrBlock;
11810  bestRequest = currRequest;
11811  bestRequestCost = currRequestCost;
11812 
11813  if(bestRequestCost == 0 ||
11815  {
11816  break;
11817  }
11818  }
11819  }
11820  }
11821  }
11822 
11823  if(pBestRequestBlock != VMA_NULL)
11824  {
11825  if(mapped)
11826  {
11827  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11828  if(res != VK_SUCCESS)
11829  {
11830  return res;
11831  }
11832  }
11833 
11834  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11835  currentFrameIndex,
11836  m_FrameInUseCount,
11837  &bestRequest))
11838  {
11839  // We no longer have an empty Allocation.
11840  if(pBestRequestBlock->m_pMetadata->IsEmpty())
11841  {
11842  m_HasEmptyBlock = false;
11843  }
11844  // Allocate from this pBlock.
11845  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
11846  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
11847  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
11848  (*pAllocation)->InitBlockAllocation(
11849  pBestRequestBlock,
11850  bestRequest.offset,
11851  alignment,
11852  size,
11853  suballocType,
11854  mapped,
11855  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11856  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11857  VMA_DEBUG_LOG(" Returned from existing block");
11858  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
11859  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11860  {
11861  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11862  }
11863  if(IsCorruptionDetectionEnabled())
11864  {
11865  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11866  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11867  }
11868  return VK_SUCCESS;
11869  }
11870  // else: Some allocations must have been touched while we are here. Next try.
11871  }
11872  else
11873  {
11874  // Could not find place in any of the blocks - break outer loop.
11875  break;
11876  }
11877  }
11878  /* Maximum number of tries exceeded - a very unlike event when many other
11879  threads are simultaneously touching allocations making it impossible to make
11880  lost at the same time as we try to allocate. */
11881  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11882  {
11883  return VK_ERROR_TOO_MANY_OBJECTS;
11884  }
11885  }
11886 
11887  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11888 }
11889 
11890 void VmaBlockVector::Free(
11891  VmaAllocation hAllocation)
11892 {
11893  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11894 
11895  // Scope for lock.
11896  {
11897  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11898 
11899  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11900 
11901  if(IsCorruptionDetectionEnabled())
11902  {
11903  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11904  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
11905  }
11906 
11907  if(hAllocation->IsPersistentMap())
11908  {
11909  pBlock->Unmap(m_hAllocator, 1);
11910  }
11911 
11912  pBlock->m_pMetadata->Free(hAllocation);
11913  VMA_HEAVY_ASSERT(pBlock->Validate());
11914 
11915  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
11916 
11917  // pBlock became empty after this deallocation.
11918  if(pBlock->m_pMetadata->IsEmpty())
11919  {
11920  // Already has empty Allocation. We don't want to have two, so delete this one.
11921  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11922  {
11923  pBlockToDelete = pBlock;
11924  Remove(pBlock);
11925  }
11926  // We now have first empty block.
11927  else
11928  {
11929  m_HasEmptyBlock = true;
11930  }
11931  }
11932  // pBlock didn't become empty, but we have another empty block - find and free that one.
11933  // (This is optional, heuristics.)
11934  else if(m_HasEmptyBlock)
11935  {
11936  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
11937  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
11938  {
11939  pBlockToDelete = pLastBlock;
11940  m_Blocks.pop_back();
11941  m_HasEmptyBlock = false;
11942  }
11943  }
11944 
11945  IncrementallySortBlocks();
11946  }
11947 
11948  // Destruction of a free Allocation. Deferred until this point, outside of mutex
11949  // lock, for performance reason.
11950  if(pBlockToDelete != VMA_NULL)
11951  {
11952  VMA_DEBUG_LOG(" Deleted empty allocation");
11953  pBlockToDelete->Destroy(m_hAllocator);
11954  vma_delete(m_hAllocator, pBlockToDelete);
11955  }
11956 }
11957 
11958 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
11959 {
11960  VkDeviceSize result = 0;
11961  for(size_t i = m_Blocks.size(); i--; )
11962  {
11963  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
11964  if(result >= m_PreferredBlockSize)
11965  {
11966  break;
11967  }
11968  }
11969  return result;
11970 }
11971 
11972 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
11973 {
11974  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11975  {
11976  if(m_Blocks[blockIndex] == pBlock)
11977  {
11978  VmaVectorRemove(m_Blocks, blockIndex);
11979  return;
11980  }
11981  }
11982  VMA_ASSERT(0);
11983 }
11984 
11985 void VmaBlockVector::IncrementallySortBlocks()
11986 {
11987  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11988  {
11989  // Bubble sort only until first swap.
11990  for(size_t i = 1; i < m_Blocks.size(); ++i)
11991  {
11992  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
11993  {
11994  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
11995  return;
11996  }
11997  }
11998  }
11999 }
12000 
12001 VkResult VmaBlockVector::AllocateFromBlock(
12002  VmaDeviceMemoryBlock* pBlock,
12003  uint32_t currentFrameIndex,
12004  VkDeviceSize size,
12005  VkDeviceSize alignment,
12006  VmaAllocationCreateFlags allocFlags,
12007  void* pUserData,
12008  VmaSuballocationType suballocType,
12009  uint32_t strategy,
12010  VmaAllocation* pAllocation)
12011 {
12012  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
12013  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12014  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12015  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12016 
12017  VmaAllocationRequest currRequest = {};
12018  if(pBlock->m_pMetadata->CreateAllocationRequest(
12019  currentFrameIndex,
12020  m_FrameInUseCount,
12021  m_BufferImageGranularity,
12022  size,
12023  alignment,
12024  isUpperAddress,
12025  suballocType,
12026  false, // canMakeOtherLost
12027  strategy,
12028  &currRequest))
12029  {
12030  // Allocate from pCurrBlock.
12031  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
12032 
12033  if(mapped)
12034  {
12035  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12036  if(res != VK_SUCCESS)
12037  {
12038  return res;
12039  }
12040  }
12041 
12042  // We no longer have an empty Allocation.
12043  if(pBlock->m_pMetadata->IsEmpty())
12044  {
12045  m_HasEmptyBlock = false;
12046  }
12047 
12048  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
12049  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
12050  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
12051  (*pAllocation)->InitBlockAllocation(
12052  pBlock,
12053  currRequest.offset,
12054  alignment,
12055  size,
12056  suballocType,
12057  mapped,
12058  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12059  VMA_HEAVY_ASSERT(pBlock->Validate());
12060  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12061  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12062  {
12063  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12064  }
12065  if(IsCorruptionDetectionEnabled())
12066  {
12067  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
12068  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12069  }
12070  return VK_SUCCESS;
12071  }
12072  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12073 }
12074 
12075 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12076 {
12077  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12078  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12079  allocInfo.allocationSize = blockSize;
12080  VkDeviceMemory mem = VK_NULL_HANDLE;
12081  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12082  if(res < 0)
12083  {
12084  return res;
12085  }
12086 
12087  // New VkDeviceMemory successfully created.
12088 
12089  // Create new Allocation for it.
12090  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12091  pBlock->Init(
12092  m_hAllocator,
12093  m_hParentPool,
12094  m_MemoryTypeIndex,
12095  mem,
12096  allocInfo.allocationSize,
12097  m_NextBlockId++,
12098  m_Algorithm);
12099 
12100  m_Blocks.push_back(pBlock);
12101  if(pNewBlockIndex != VMA_NULL)
12102  {
12103  *pNewBlockIndex = m_Blocks.size() - 1;
12104  }
12105 
12106  return VK_SUCCESS;
12107 }
12108 
12109 void VmaBlockVector::ApplyDefragmentationMovesCpu(
12110  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12111  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
12112 {
12113  const size_t blockCount = m_Blocks.size();
12114  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
12115 
12116  enum BLOCK_FLAG
12117  {
12118  BLOCK_FLAG_USED = 0x00000001,
12119  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
12120  };
12121 
12122  struct BlockInfo
12123  {
12124  uint32_t flags;
12125  void* pMappedData;
12126  };
12127  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
12128  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
12129  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
12130 
12131  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12132  const size_t moveCount = moves.size();
12133  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12134  {
12135  const VmaDefragmentationMove& move = moves[moveIndex];
12136  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
12137  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
12138  }
12139 
12140  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12141 
12142  // Go over all blocks. Get mapped pointer or map if necessary.
12143  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12144  {
12145  BlockInfo& currBlockInfo = blockInfo[blockIndex];
12146  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12147  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12148  {
12149  currBlockInfo.pMappedData = pBlock->GetMappedData();
12150  // It is not originally mapped - map it.
12151  if(currBlockInfo.pMappedData == VMA_NULL)
12152  {
12153  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12154  if(pDefragCtx->res == VK_SUCCESS)
12155  {
12156  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12157  }
12158  }
12159  }
12160  }
12161 
12162  // Go over all moves. Do actual data transfer.
12163  if(pDefragCtx->res == VK_SUCCESS)
12164  {
12165  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12166  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12167 
12168  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12169  {
12170  const VmaDefragmentationMove& move = moves[moveIndex];
12171 
12172  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12173  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12174 
12175  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12176 
12177  // Invalidate source.
12178  if(isNonCoherent)
12179  {
12180  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12181  memRange.memory = pSrcBlock->GetDeviceMemory();
12182  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12183  memRange.size = VMA_MIN(
12184  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12185  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12186  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12187  }
12188 
12189  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12190  memmove(
12191  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12192  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12193  static_cast<size_t>(move.size));
12194 
12195  if(IsCorruptionDetectionEnabled())
12196  {
12197  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12198  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12199  }
12200 
12201  // Flush destination.
12202  if(isNonCoherent)
12203  {
12204  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12205  memRange.memory = pDstBlock->GetDeviceMemory();
12206  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12207  memRange.size = VMA_MIN(
12208  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12209  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12210  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12211  }
12212  }
12213  }
12214 
12215  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12216  // Regardless of pCtx->res == VK_SUCCESS.
12217  for(size_t blockIndex = blockCount; blockIndex--; )
12218  {
12219  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12220  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12221  {
12222  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12223  pBlock->Unmap(m_hAllocator, 1);
12224  }
12225  }
12226 }
12227 
12228 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12229  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12230  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12231  VkCommandBuffer commandBuffer)
12232 {
12233  const size_t blockCount = m_Blocks.size();
12234 
12235  pDefragCtx->blockContexts.resize(blockCount);
12236  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12237 
12238  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12239  const size_t moveCount = moves.size();
12240  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12241  {
12242  const VmaDefragmentationMove& move = moves[moveIndex];
12243  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12244  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12245  }
12246 
12247  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12248 
12249  // Go over all blocks. Create and bind buffer for whole block if necessary.
12250  {
12251  VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
12252  bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
12253  VK_BUFFER_USAGE_TRANSFER_DST_BIT;
12254 
12255  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12256  {
12257  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12258  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12259  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12260  {
12261  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12262  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12263  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12264  if(pDefragCtx->res == VK_SUCCESS)
12265  {
12266  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12267  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12268  }
12269  }
12270  }
12271  }
12272 
12273  // Go over all moves. Post data transfer commands to command buffer.
12274  if(pDefragCtx->res == VK_SUCCESS)
12275  {
12276  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12277  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12278 
12279  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12280  {
12281  const VmaDefragmentationMove& move = moves[moveIndex];
12282 
12283  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12284  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12285 
12286  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12287 
12288  VkBufferCopy region = {
12289  move.srcOffset,
12290  move.dstOffset,
12291  move.size };
12292  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12293  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12294  }
12295  }
12296 
12297  // Save buffers to defrag context for later destruction.
12298  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12299  {
12300  pDefragCtx->res = VK_NOT_READY;
12301  }
12302 }
12303 
12304 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12305 {
12306  m_HasEmptyBlock = false;
12307  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12308  {
12309  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12310  if(pBlock->m_pMetadata->IsEmpty())
12311  {
12312  if(m_Blocks.size() > m_MinBlockCount)
12313  {
12314  if(pDefragmentationStats != VMA_NULL)
12315  {
12316  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12317  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12318  }
12319 
12320  VmaVectorRemove(m_Blocks, blockIndex);
12321  pBlock->Destroy(m_hAllocator);
12322  vma_delete(m_hAllocator, pBlock);
12323  }
12324  else
12325  {
12326  m_HasEmptyBlock = true;
12327  }
12328  }
12329  }
12330 }
12331 
12332 #if VMA_STATS_STRING_ENABLED
12333 
12334 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12335 {
12336  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12337 
12338  json.BeginObject();
12339 
12340  if(m_IsCustomPool)
12341  {
12342  json.WriteString("MemoryTypeIndex");
12343  json.WriteNumber(m_MemoryTypeIndex);
12344 
12345  json.WriteString("BlockSize");
12346  json.WriteNumber(m_PreferredBlockSize);
12347 
12348  json.WriteString("BlockCount");
12349  json.BeginObject(true);
12350  if(m_MinBlockCount > 0)
12351  {
12352  json.WriteString("Min");
12353  json.WriteNumber((uint64_t)m_MinBlockCount);
12354  }
12355  if(m_MaxBlockCount < SIZE_MAX)
12356  {
12357  json.WriteString("Max");
12358  json.WriteNumber((uint64_t)m_MaxBlockCount);
12359  }
12360  json.WriteString("Cur");
12361  json.WriteNumber((uint64_t)m_Blocks.size());
12362  json.EndObject();
12363 
12364  if(m_FrameInUseCount > 0)
12365  {
12366  json.WriteString("FrameInUseCount");
12367  json.WriteNumber(m_FrameInUseCount);
12368  }
12369 
12370  if(m_Algorithm != 0)
12371  {
12372  json.WriteString("Algorithm");
12373  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12374  }
12375  }
12376  else
12377  {
12378  json.WriteString("PreferredBlockSize");
12379  json.WriteNumber(m_PreferredBlockSize);
12380  }
12381 
12382  json.WriteString("Blocks");
12383  json.BeginObject();
12384  for(size_t i = 0; i < m_Blocks.size(); ++i)
12385  {
12386  json.BeginString();
12387  json.ContinueString(m_Blocks[i]->GetId());
12388  json.EndString();
12389 
12390  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12391  }
12392  json.EndObject();
12393 
12394  json.EndObject();
12395 }
12396 
12397 #endif // #if VMA_STATS_STRING_ENABLED
12398 
12399 void VmaBlockVector::Defragment(
12400  class VmaBlockVectorDefragmentationContext* pCtx,
12401  VmaDefragmentationStats* pStats,
12402  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12403  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12404  VkCommandBuffer commandBuffer)
12405 {
12406  pCtx->res = VK_SUCCESS;
12407 
12408  const VkMemoryPropertyFlags memPropFlags =
12409  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12410  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12411  const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
12412 
12413  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12414  isHostVisible;
12415  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12416  !IsCorruptionDetectionEnabled();
12417 
12418  // There are options to defragment this memory type.
12419  if(canDefragmentOnCpu || canDefragmentOnGpu)
12420  {
12421  bool defragmentOnGpu;
12422  // There is only one option to defragment this memory type.
12423  if(canDefragmentOnGpu != canDefragmentOnCpu)
12424  {
12425  defragmentOnGpu = canDefragmentOnGpu;
12426  }
12427  // Both options are available: Heuristics to choose the best one.
12428  else
12429  {
12430  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12431  m_hAllocator->IsIntegratedGpu();
12432  }
12433 
12434  bool overlappingMoveSupported = !defragmentOnGpu;
12435 
12436  if(m_hAllocator->m_UseMutex)
12437  {
12438  m_Mutex.LockWrite();
12439  pCtx->mutexLocked = true;
12440  }
12441 
12442  pCtx->Begin(overlappingMoveSupported);
12443 
12444  // Defragment.
12445 
12446  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12447  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12448  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12449  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12450  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12451 
12452  // Accumulate statistics.
12453  if(pStats != VMA_NULL)
12454  {
12455  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12456  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12457  pStats->bytesMoved += bytesMoved;
12458  pStats->allocationsMoved += allocationsMoved;
12459  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12460  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12461  if(defragmentOnGpu)
12462  {
12463  maxGpuBytesToMove -= bytesMoved;
12464  maxGpuAllocationsToMove -= allocationsMoved;
12465  }
12466  else
12467  {
12468  maxCpuBytesToMove -= bytesMoved;
12469  maxCpuAllocationsToMove -= allocationsMoved;
12470  }
12471  }
12472 
12473  if(pCtx->res >= VK_SUCCESS)
12474  {
12475  if(defragmentOnGpu)
12476  {
12477  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12478  }
12479  else
12480  {
12481  ApplyDefragmentationMovesCpu(pCtx, moves);
12482  }
12483  }
12484  }
12485 }
12486 
12487 void VmaBlockVector::DefragmentationEnd(
12488  class VmaBlockVectorDefragmentationContext* pCtx,
12489  VmaDefragmentationStats* pStats)
12490 {
12491  // Destroy buffers.
12492  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12493  {
12494  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12495  if(blockCtx.hBuffer)
12496  {
12497  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12498  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12499  }
12500  }
12501 
12502  if(pCtx->res >= VK_SUCCESS)
12503  {
12504  FreeEmptyBlocks(pStats);
12505  }
12506 
12507  if(pCtx->mutexLocked)
12508  {
12509  VMA_ASSERT(m_hAllocator->m_UseMutex);
12510  m_Mutex.UnlockWrite();
12511  }
12512 }
12513 
12514 size_t VmaBlockVector::CalcAllocationCount() const
12515 {
12516  size_t result = 0;
12517  for(size_t i = 0; i < m_Blocks.size(); ++i)
12518  {
12519  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12520  }
12521  return result;
12522 }
12523 
12524 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12525 {
12526  if(m_BufferImageGranularity == 1)
12527  {
12528  return false;
12529  }
12530  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12531  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12532  {
12533  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12534  VMA_ASSERT(m_Algorithm == 0);
12535  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12536  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12537  {
12538  return true;
12539  }
12540  }
12541  return false;
12542 }
12543 
12544 void VmaBlockVector::MakePoolAllocationsLost(
12545  uint32_t currentFrameIndex,
12546  size_t* pLostAllocationCount)
12547 {
12548  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12549  size_t lostAllocationCount = 0;
12550  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12551  {
12552  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12553  VMA_ASSERT(pBlock);
12554  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12555  }
12556  if(pLostAllocationCount != VMA_NULL)
12557  {
12558  *pLostAllocationCount = lostAllocationCount;
12559  }
12560 }
12561 
12562 VkResult VmaBlockVector::CheckCorruption()
12563 {
12564  if(!IsCorruptionDetectionEnabled())
12565  {
12566  return VK_ERROR_FEATURE_NOT_PRESENT;
12567  }
12568 
12569  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12570  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12571  {
12572  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12573  VMA_ASSERT(pBlock);
12574  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12575  if(res != VK_SUCCESS)
12576  {
12577  return res;
12578  }
12579  }
12580  return VK_SUCCESS;
12581 }
12582 
12583 void VmaBlockVector::AddStats(VmaStats* pStats)
12584 {
12585  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12586  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12587 
12588  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12589 
12590  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12591  {
12592  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12593  VMA_ASSERT(pBlock);
12594  VMA_HEAVY_ASSERT(pBlock->Validate());
12595  VmaStatInfo allocationStatInfo;
12596  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12597  VmaAddStatInfo(pStats->total, allocationStatInfo);
12598  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12599  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12600  }
12601 }
12602 
12604 // VmaDefragmentationAlgorithm_Generic members definition
12605 
12606 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12607  VmaAllocator hAllocator,
12608  VmaBlockVector* pBlockVector,
12609  uint32_t currentFrameIndex,
12610  bool overlappingMoveSupported) :
12611  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12612  m_AllAllocations(false),
12613  m_AllocationCount(0),
12614  m_BytesMoved(0),
12615  m_AllocationsMoved(0),
12616  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12617 {
12618  // Create block info for each block.
12619  const size_t blockCount = m_pBlockVector->m_Blocks.size();
12620  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12621  {
12622  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12623  pBlockInfo->m_OriginalBlockIndex = blockIndex;
12624  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12625  m_Blocks.push_back(pBlockInfo);
12626  }
12627 
12628  // Sort them by m_pBlock pointer value.
12629  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12630 }
12631 
12632 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12633 {
12634  for(size_t i = m_Blocks.size(); i--; )
12635  {
12636  vma_delete(m_hAllocator, m_Blocks[i]);
12637  }
12638 }
12639 
12640 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12641 {
12642  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12643  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12644  {
12645  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12646  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12647  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12648  {
12649  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12650  (*it)->m_Allocations.push_back(allocInfo);
12651  }
12652  else
12653  {
12654  VMA_ASSERT(0);
12655  }
12656 
12657  ++m_AllocationCount;
12658  }
12659 }
12660 
12661 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12662  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12663  VkDeviceSize maxBytesToMove,
12664  uint32_t maxAllocationsToMove)
12665 {
12666  if(m_Blocks.empty())
12667  {
12668  return VK_SUCCESS;
12669  }
12670 
12671  // This is a choice based on research.
12672  // Option 1:
12673  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12674  // Option 2:
12675  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12676  // Option 3:
12677  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12678 
12679  size_t srcBlockMinIndex = 0;
12680  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12681  /*
12682  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12683  {
12684  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12685  if(blocksWithNonMovableCount > 0)
12686  {
12687  srcBlockMinIndex = blocksWithNonMovableCount - 1;
12688  }
12689  }
12690  */
12691 
12692  size_t srcBlockIndex = m_Blocks.size() - 1;
12693  size_t srcAllocIndex = SIZE_MAX;
12694  for(;;)
12695  {
12696  // 1. Find next allocation to move.
12697  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12698  // 1.2. Then start from last to first m_Allocations.
12699  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12700  {
12701  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12702  {
12703  // Finished: no more allocations to process.
12704  if(srcBlockIndex == srcBlockMinIndex)
12705  {
12706  return VK_SUCCESS;
12707  }
12708  else
12709  {
12710  --srcBlockIndex;
12711  srcAllocIndex = SIZE_MAX;
12712  }
12713  }
12714  else
12715  {
12716  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12717  }
12718  }
12719 
12720  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12721  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12722 
12723  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12724  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12725  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12726  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12727 
12728  // 2. Try to find new place for this allocation in preceding or current block.
12729  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12730  {
12731  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12732  VmaAllocationRequest dstAllocRequest;
12733  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12734  m_CurrentFrameIndex,
12735  m_pBlockVector->GetFrameInUseCount(),
12736  m_pBlockVector->GetBufferImageGranularity(),
12737  size,
12738  alignment,
12739  false, // upperAddress
12740  suballocType,
12741  false, // canMakeOtherLost
12742  strategy,
12743  &dstAllocRequest) &&
12744  MoveMakesSense(
12745  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12746  {
12747  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12748 
12749  // Reached limit on number of allocations or bytes to move.
12750  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12751  (m_BytesMoved + size > maxBytesToMove))
12752  {
12753  return VK_SUCCESS;
12754  }
12755 
12756  VmaDefragmentationMove move;
12757  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12758  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12759  move.srcOffset = srcOffset;
12760  move.dstOffset = dstAllocRequest.offset;
12761  move.size = size;
12762  moves.push_back(move);
12763 
12764  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12765  dstAllocRequest,
12766  suballocType,
12767  size,
12768  allocInfo.m_hAllocation);
12769  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12770 
12771  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12772 
12773  if(allocInfo.m_pChanged != VMA_NULL)
12774  {
12775  *allocInfo.m_pChanged = VK_TRUE;
12776  }
12777 
12778  ++m_AllocationsMoved;
12779  m_BytesMoved += size;
12780 
12781  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12782 
12783  break;
12784  }
12785  }
12786 
12787  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
12788 
12789  if(srcAllocIndex > 0)
12790  {
12791  --srcAllocIndex;
12792  }
12793  else
12794  {
12795  if(srcBlockIndex > 0)
12796  {
12797  --srcBlockIndex;
12798  srcAllocIndex = SIZE_MAX;
12799  }
12800  else
12801  {
12802  return VK_SUCCESS;
12803  }
12804  }
12805  }
12806 }
12807 
12808 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
12809 {
12810  size_t result = 0;
12811  for(size_t i = 0; i < m_Blocks.size(); ++i)
12812  {
12813  if(m_Blocks[i]->m_HasNonMovableAllocations)
12814  {
12815  ++result;
12816  }
12817  }
12818  return result;
12819 }
12820 
12821 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12822  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12823  VkDeviceSize maxBytesToMove,
12824  uint32_t maxAllocationsToMove)
12825 {
12826  if(!m_AllAllocations && m_AllocationCount == 0)
12827  {
12828  return VK_SUCCESS;
12829  }
12830 
12831  const size_t blockCount = m_Blocks.size();
12832  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12833  {
12834  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12835 
12836  if(m_AllAllocations)
12837  {
12838  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12839  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12840  it != pMetadata->m_Suballocations.end();
12841  ++it)
12842  {
12843  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12844  {
12845  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12846  pBlockInfo->m_Allocations.push_back(allocInfo);
12847  }
12848  }
12849  }
12850 
12851  pBlockInfo->CalcHasNonMovableAllocations();
12852 
12853  // This is a choice based on research.
12854  // Option 1:
12855  pBlockInfo->SortAllocationsByOffsetDescending();
12856  // Option 2:
12857  //pBlockInfo->SortAllocationsBySizeDescending();
12858  }
12859 
12860  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
12861  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12862 
12863  // This is a choice based on research.
12864  const uint32_t roundCount = 2;
12865 
12866  // Execute defragmentation rounds (the main part).
12867  VkResult result = VK_SUCCESS;
12868  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12869  {
12870  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12871  }
12872 
12873  return result;
12874 }
12875 
12876 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12877  size_t dstBlockIndex, VkDeviceSize dstOffset,
12878  size_t srcBlockIndex, VkDeviceSize srcOffset)
12879 {
12880  if(dstBlockIndex < srcBlockIndex)
12881  {
12882  return true;
12883  }
12884  if(dstBlockIndex > srcBlockIndex)
12885  {
12886  return false;
12887  }
12888  if(dstOffset < srcOffset)
12889  {
12890  return true;
12891  }
12892  return false;
12893 }
12894 
12896 // VmaDefragmentationAlgorithm_Fast
12897 
12898 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12899  VmaAllocator hAllocator,
12900  VmaBlockVector* pBlockVector,
12901  uint32_t currentFrameIndex,
12902  bool overlappingMoveSupported) :
12903  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12904  m_OverlappingMoveSupported(overlappingMoveSupported),
12905  m_AllocationCount(0),
12906  m_AllAllocations(false),
12907  m_BytesMoved(0),
12908  m_AllocationsMoved(0),
12909  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12910 {
12911  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12912 
12913 }
12914 
12915 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12916 {
12917 }
12918 
12919 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12920  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12921  VkDeviceSize maxBytesToMove,
12922  uint32_t maxAllocationsToMove)
12923 {
12924  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12925 
12926  const size_t blockCount = m_pBlockVector->GetBlockCount();
12927  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12928  {
12929  return VK_SUCCESS;
12930  }
12931 
12932  PreprocessMetadata();
12933 
12934  // Sort blocks in order from most destination.
12935 
12936  m_BlockInfos.resize(blockCount);
12937  for(size_t i = 0; i < blockCount; ++i)
12938  {
12939  m_BlockInfos[i].origBlockIndex = i;
12940  }
12941 
12942  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
12943  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
12944  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
12945  });
12946 
12947  // THE MAIN ALGORITHM
12948 
12949  FreeSpaceDatabase freeSpaceDb;
12950 
12951  size_t dstBlockInfoIndex = 0;
12952  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12953  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12954  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12955  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
12956  VkDeviceSize dstOffset = 0;
12957 
12958  bool end = false;
12959  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
12960  {
12961  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
12962  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
12963  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
12964  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
12965  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
12966  {
12967  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
12968  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
12969  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
12970  if(m_AllocationsMoved == maxAllocationsToMove ||
12971  m_BytesMoved + srcAllocSize > maxBytesToMove)
12972  {
12973  end = true;
12974  break;
12975  }
12976  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
12977 
12978  // Try to place it in one of free spaces from the database.
12979  size_t freeSpaceInfoIndex;
12980  VkDeviceSize dstAllocOffset;
12981  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
12982  freeSpaceInfoIndex, dstAllocOffset))
12983  {
12984  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
12985  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
12986  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
12987 
12988  // Same block
12989  if(freeSpaceInfoIndex == srcBlockInfoIndex)
12990  {
12991  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12992 
12993  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12994 
12995  VmaSuballocation suballoc = *srcSuballocIt;
12996  suballoc.offset = dstAllocOffset;
12997  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
12998  m_BytesMoved += srcAllocSize;
12999  ++m_AllocationsMoved;
13000 
13001  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13002  ++nextSuballocIt;
13003  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13004  srcSuballocIt = nextSuballocIt;
13005 
13006  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13007 
13008  VmaDefragmentationMove move = {
13009  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13010  srcAllocOffset, dstAllocOffset,
13011  srcAllocSize };
13012  moves.push_back(move);
13013  }
13014  // Different block
13015  else
13016  {
13017  // MOVE OPTION 2: Move the allocation to a different block.
13018 
13019  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
13020 
13021  VmaSuballocation suballoc = *srcSuballocIt;
13022  suballoc.offset = dstAllocOffset;
13023  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
13024  m_BytesMoved += srcAllocSize;
13025  ++m_AllocationsMoved;
13026 
13027  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13028  ++nextSuballocIt;
13029  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13030  srcSuballocIt = nextSuballocIt;
13031 
13032  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13033 
13034  VmaDefragmentationMove move = {
13035  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13036  srcAllocOffset, dstAllocOffset,
13037  srcAllocSize };
13038  moves.push_back(move);
13039  }
13040  }
13041  else
13042  {
13043  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
13044 
13045  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
13046  while(dstBlockInfoIndex < srcBlockInfoIndex &&
13047  dstAllocOffset + srcAllocSize > dstBlockSize)
13048  {
13049  // But before that, register remaining free space at the end of dst block.
13050  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
13051 
13052  ++dstBlockInfoIndex;
13053  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13054  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13055  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13056  dstBlockSize = pDstMetadata->GetSize();
13057  dstOffset = 0;
13058  dstAllocOffset = 0;
13059  }
13060 
13061  // Same block
13062  if(dstBlockInfoIndex == srcBlockInfoIndex)
13063  {
13064  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13065 
13066  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
13067 
13068  bool skipOver = overlap;
13069  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
13070  {
13071  // If destination and source place overlap, skip if it would move it
13072  // by only < 1/64 of its size.
13073  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
13074  }
13075 
13076  if(skipOver)
13077  {
13078  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
13079 
13080  dstOffset = srcAllocOffset + srcAllocSize;
13081  ++srcSuballocIt;
13082  }
13083  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13084  else
13085  {
13086  srcSuballocIt->offset = dstAllocOffset;
13087  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
13088  dstOffset = dstAllocOffset + srcAllocSize;
13089  m_BytesMoved += srcAllocSize;
13090  ++m_AllocationsMoved;
13091  ++srcSuballocIt;
13092  VmaDefragmentationMove move = {
13093  srcOrigBlockIndex, dstOrigBlockIndex,
13094  srcAllocOffset, dstAllocOffset,
13095  srcAllocSize };
13096  moves.push_back(move);
13097  }
13098  }
13099  // Different block
13100  else
13101  {
13102  // MOVE OPTION 2: Move the allocation to a different block.
13103 
13104  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
13105  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
13106 
13107  VmaSuballocation suballoc = *srcSuballocIt;
13108  suballoc.offset = dstAllocOffset;
13109  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
13110  dstOffset = dstAllocOffset + srcAllocSize;
13111  m_BytesMoved += srcAllocSize;
13112  ++m_AllocationsMoved;
13113 
13114  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13115  ++nextSuballocIt;
13116  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13117  srcSuballocIt = nextSuballocIt;
13118 
13119  pDstMetadata->m_Suballocations.push_back(suballoc);
13120 
13121  VmaDefragmentationMove move = {
13122  srcOrigBlockIndex, dstOrigBlockIndex,
13123  srcAllocOffset, dstAllocOffset,
13124  srcAllocSize };
13125  moves.push_back(move);
13126  }
13127  }
13128  }
13129  }
13130 
13131  m_BlockInfos.clear();
13132 
13133  PostprocessMetadata();
13134 
13135  return VK_SUCCESS;
13136 }
13137 
13138 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
13139 {
13140  const size_t blockCount = m_pBlockVector->GetBlockCount();
13141  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13142  {
13143  VmaBlockMetadata_Generic* const pMetadata =
13144  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13145  pMetadata->m_FreeCount = 0;
13146  pMetadata->m_SumFreeSize = pMetadata->GetSize();
13147  pMetadata->m_FreeSuballocationsBySize.clear();
13148  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13149  it != pMetadata->m_Suballocations.end(); )
13150  {
13151  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13152  {
13153  VmaSuballocationList::iterator nextIt = it;
13154  ++nextIt;
13155  pMetadata->m_Suballocations.erase(it);
13156  it = nextIt;
13157  }
13158  else
13159  {
13160  ++it;
13161  }
13162  }
13163  }
13164 }
13165 
13166 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13167 {
13168  const size_t blockCount = m_pBlockVector->GetBlockCount();
13169  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13170  {
13171  VmaBlockMetadata_Generic* const pMetadata =
13172  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13173  const VkDeviceSize blockSize = pMetadata->GetSize();
13174 
13175  // No allocations in this block - entire area is free.
13176  if(pMetadata->m_Suballocations.empty())
13177  {
13178  pMetadata->m_FreeCount = 1;
13179  //pMetadata->m_SumFreeSize is already set to blockSize.
13180  VmaSuballocation suballoc = {
13181  0, // offset
13182  blockSize, // size
13183  VMA_NULL, // hAllocation
13184  VMA_SUBALLOCATION_TYPE_FREE };
13185  pMetadata->m_Suballocations.push_back(suballoc);
13186  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13187  }
13188  // There are some allocations in this block.
13189  else
13190  {
13191  VkDeviceSize offset = 0;
13192  VmaSuballocationList::iterator it;
13193  for(it = pMetadata->m_Suballocations.begin();
13194  it != pMetadata->m_Suballocations.end();
13195  ++it)
13196  {
13197  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13198  VMA_ASSERT(it->offset >= offset);
13199 
13200  // Need to insert preceding free space.
13201  if(it->offset > offset)
13202  {
13203  ++pMetadata->m_FreeCount;
13204  const VkDeviceSize freeSize = it->offset - offset;
13205  VmaSuballocation suballoc = {
13206  offset, // offset
13207  freeSize, // size
13208  VMA_NULL, // hAllocation
13209  VMA_SUBALLOCATION_TYPE_FREE };
13210  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13211  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13212  {
13213  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13214  }
13215  }
13216 
13217  pMetadata->m_SumFreeSize -= it->size;
13218  offset = it->offset + it->size;
13219  }
13220 
13221  // Need to insert trailing free space.
13222  if(offset < blockSize)
13223  {
13224  ++pMetadata->m_FreeCount;
13225  const VkDeviceSize freeSize = blockSize - offset;
13226  VmaSuballocation suballoc = {
13227  offset, // offset
13228  freeSize, // size
13229  VMA_NULL, // hAllocation
13230  VMA_SUBALLOCATION_TYPE_FREE };
13231  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13232  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13233  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13234  {
13235  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13236  }
13237  }
13238 
13239  VMA_SORT(
13240  pMetadata->m_FreeSuballocationsBySize.begin(),
13241  pMetadata->m_FreeSuballocationsBySize.end(),
13242  VmaSuballocationItemSizeLess());
13243  }
13244 
13245  VMA_HEAVY_ASSERT(pMetadata->Validate());
13246  }
13247 }
13248 
13249 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13250 {
13251  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13252  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13253  while(it != pMetadata->m_Suballocations.end())
13254  {
13255  if(it->offset < suballoc.offset)
13256  {
13257  ++it;
13258  }
13259  }
13260  pMetadata->m_Suballocations.insert(it, suballoc);
13261 }
13262 
13264 // VmaBlockVectorDefragmentationContext
13265 
13266 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13267  VmaAllocator hAllocator,
13268  VmaPool hCustomPool,
13269  VmaBlockVector* pBlockVector,
13270  uint32_t currFrameIndex,
13271  uint32_t algorithmFlags) :
13272  res(VK_SUCCESS),
13273  mutexLocked(false),
13274  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13275  m_hAllocator(hAllocator),
13276  m_hCustomPool(hCustomPool),
13277  m_pBlockVector(pBlockVector),
13278  m_CurrFrameIndex(currFrameIndex),
13279  m_AlgorithmFlags(algorithmFlags),
13280  m_pAlgorithm(VMA_NULL),
13281  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13282  m_AllAllocations(false)
13283 {
13284 }
13285 
13286 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13287 {
13288  vma_delete(m_hAllocator, m_pAlgorithm);
13289 }
13290 
13291 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13292 {
13293  AllocInfo info = { hAlloc, pChanged };
13294  m_Allocations.push_back(info);
13295 }
13296 
13297 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13298 {
13299  const bool allAllocations = m_AllAllocations ||
13300  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13301 
13302  /********************************
13303  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13304  ********************************/
13305 
13306  /*
13307  Fast algorithm is supported only when certain criteria are met:
13308  - VMA_DEBUG_MARGIN is 0.
13309  - All allocations in this block vector are moveable.
13310  - There is no possibility of image/buffer granularity conflict.
13311  */
13312  if(VMA_DEBUG_MARGIN == 0 &&
13313  allAllocations &&
13314  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13315  {
13316  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13317  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13318  }
13319  else
13320  {
13321  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13322  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13323  }
13324 
13325  if(allAllocations)
13326  {
13327  m_pAlgorithm->AddAll();
13328  }
13329  else
13330  {
13331  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13332  {
13333  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13334  }
13335  }
13336 }
13337 
13339 // VmaDefragmentationContext
13340 
13341 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13342  VmaAllocator hAllocator,
13343  uint32_t currFrameIndex,
13344  uint32_t flags,
13345  VmaDefragmentationStats* pStats) :
13346  m_hAllocator(hAllocator),
13347  m_CurrFrameIndex(currFrameIndex),
13348  m_Flags(flags),
13349  m_pStats(pStats),
13350  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13351 {
13352  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13353 }
13354 
13355 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13356 {
13357  for(size_t i = m_CustomPoolContexts.size(); i--; )
13358  {
13359  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13360  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13361  vma_delete(m_hAllocator, pBlockVectorCtx);
13362  }
13363  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13364  {
13365  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13366  if(pBlockVectorCtx)
13367  {
13368  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13369  vma_delete(m_hAllocator, pBlockVectorCtx);
13370  }
13371  }
13372 }
13373 
13374 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13375 {
13376  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13377  {
13378  VmaPool pool = pPools[poolIndex];
13379  VMA_ASSERT(pool);
13380  // Pools with algorithm other than default are not defragmented.
13381  if(pool->m_BlockVector.GetAlgorithm() == 0)
13382  {
13383  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13384 
13385  for(size_t i = m_CustomPoolContexts.size(); i--; )
13386  {
13387  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13388  {
13389  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13390  break;
13391  }
13392  }
13393 
13394  if(!pBlockVectorDefragCtx)
13395  {
13396  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13397  m_hAllocator,
13398  pool,
13399  &pool->m_BlockVector,
13400  m_CurrFrameIndex,
13401  m_Flags);
13402  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13403  }
13404 
13405  pBlockVectorDefragCtx->AddAll();
13406  }
13407  }
13408 }
13409 
13410 void VmaDefragmentationContext_T::AddAllocations(
13411  uint32_t allocationCount,
13412  VmaAllocation* pAllocations,
13413  VkBool32* pAllocationsChanged)
13414 {
13415  // Dispatch pAllocations among defragmentators. Create them when necessary.
13416  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13417  {
13418  const VmaAllocation hAlloc = pAllocations[allocIndex];
13419  VMA_ASSERT(hAlloc);
13420  // DedicatedAlloc cannot be defragmented.
13421  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13422  // Lost allocation cannot be defragmented.
13423  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13424  {
13425  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13426 
13427  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
13428  // This allocation belongs to custom pool.
13429  if(hAllocPool != VK_NULL_HANDLE)
13430  {
13431  // Pools with algorithm other than default are not defragmented.
13432  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13433  {
13434  for(size_t i = m_CustomPoolContexts.size(); i--; )
13435  {
13436  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13437  {
13438  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13439  break;
13440  }
13441  }
13442  if(!pBlockVectorDefragCtx)
13443  {
13444  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13445  m_hAllocator,
13446  hAllocPool,
13447  &hAllocPool->m_BlockVector,
13448  m_CurrFrameIndex,
13449  m_Flags);
13450  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13451  }
13452  }
13453  }
13454  // This allocation belongs to default pool.
13455  else
13456  {
13457  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13458  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13459  if(!pBlockVectorDefragCtx)
13460  {
13461  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13462  m_hAllocator,
13463  VMA_NULL, // hCustomPool
13464  m_hAllocator->m_pBlockVectors[memTypeIndex],
13465  m_CurrFrameIndex,
13466  m_Flags);
13467  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13468  }
13469  }
13470 
13471  if(pBlockVectorDefragCtx)
13472  {
13473  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13474  &pAllocationsChanged[allocIndex] : VMA_NULL;
13475  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13476  }
13477  }
13478  }
13479 }
13480 
13481 VkResult VmaDefragmentationContext_T::Defragment(
13482  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13483  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13484  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13485 {
13486  if(pStats)
13487  {
13488  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13489  }
13490 
13491  if(commandBuffer == VK_NULL_HANDLE)
13492  {
13493  maxGpuBytesToMove = 0;
13494  maxGpuAllocationsToMove = 0;
13495  }
13496 
13497  VkResult res = VK_SUCCESS;
13498 
13499  // Process default pools.
13500  for(uint32_t memTypeIndex = 0;
13501  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13502  ++memTypeIndex)
13503  {
13504  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13505  if(pBlockVectorCtx)
13506  {
13507  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13508  pBlockVectorCtx->GetBlockVector()->Defragment(
13509  pBlockVectorCtx,
13510  pStats,
13511  maxCpuBytesToMove, maxCpuAllocationsToMove,
13512  maxGpuBytesToMove, maxGpuAllocationsToMove,
13513  commandBuffer);
13514  if(pBlockVectorCtx->res != VK_SUCCESS)
13515  {
13516  res = pBlockVectorCtx->res;
13517  }
13518  }
13519  }
13520 
13521  // Process custom pools.
13522  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13523  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13524  ++customCtxIndex)
13525  {
13526  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13527  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13528  pBlockVectorCtx->GetBlockVector()->Defragment(
13529  pBlockVectorCtx,
13530  pStats,
13531  maxCpuBytesToMove, maxCpuAllocationsToMove,
13532  maxGpuBytesToMove, maxGpuAllocationsToMove,
13533  commandBuffer);
13534  if(pBlockVectorCtx->res != VK_SUCCESS)
13535  {
13536  res = pBlockVectorCtx->res;
13537  }
13538  }
13539 
13540  return res;
13541 }
13542 
13544 // VmaRecorder
13545 
13546 #if VMA_RECORDING_ENABLED
13547 
13548 VmaRecorder::VmaRecorder() :
13549  m_UseMutex(true),
13550  m_Flags(0),
13551  m_File(VMA_NULL),
13552  m_Freq(INT64_MAX),
13553  m_StartCounter(INT64_MAX)
13554 {
13555 }
13556 
13557 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13558 {
13559  m_UseMutex = useMutex;
13560  m_Flags = settings.flags;
13561 
13562  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13563  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13564 
13565  // Open file for writing.
13566  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13567  if(err != 0)
13568  {
13569  return VK_ERROR_INITIALIZATION_FAILED;
13570  }
13571 
13572  // Write header.
13573  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13574  fprintf(m_File, "%s\n", "1,5");
13575 
13576  return VK_SUCCESS;
13577 }
13578 
13579 VmaRecorder::~VmaRecorder()
13580 {
13581  if(m_File != VMA_NULL)
13582  {
13583  fclose(m_File);
13584  }
13585 }
13586 
13587 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13588 {
13589  CallParams callParams;
13590  GetBasicParams(callParams);
13591 
13592  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13593  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13594  Flush();
13595 }
13596 
13597 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13598 {
13599  CallParams callParams;
13600  GetBasicParams(callParams);
13601 
13602  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13603  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13604  Flush();
13605 }
13606 
13607 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13608 {
13609  CallParams callParams;
13610  GetBasicParams(callParams);
13611 
13612  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13613  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13614  createInfo.memoryTypeIndex,
13615  createInfo.flags,
13616  createInfo.blockSize,
13617  (uint64_t)createInfo.minBlockCount,
13618  (uint64_t)createInfo.maxBlockCount,
13619  createInfo.frameInUseCount,
13620  pool);
13621  Flush();
13622 }
13623 
13624 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13625 {
13626  CallParams callParams;
13627  GetBasicParams(callParams);
13628 
13629  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13630  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13631  pool);
13632  Flush();
13633 }
13634 
13635 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13636  const VkMemoryRequirements& vkMemReq,
13637  const VmaAllocationCreateInfo& createInfo,
13638  VmaAllocation allocation)
13639 {
13640  CallParams callParams;
13641  GetBasicParams(callParams);
13642 
13643  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13644  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13645  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13646  vkMemReq.size,
13647  vkMemReq.alignment,
13648  vkMemReq.memoryTypeBits,
13649  createInfo.flags,
13650  createInfo.usage,
13651  createInfo.requiredFlags,
13652  createInfo.preferredFlags,
13653  createInfo.memoryTypeBits,
13654  createInfo.pool,
13655  allocation,
13656  userDataStr.GetString());
13657  Flush();
13658 }
13659 
13660 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
13661  const VkMemoryRequirements& vkMemReq,
13662  const VmaAllocationCreateInfo& createInfo,
13663  uint64_t allocationCount,
13664  const VmaAllocation* pAllocations)
13665 {
13666  CallParams callParams;
13667  GetBasicParams(callParams);
13668 
13669  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13670  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13671  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
13672  vkMemReq.size,
13673  vkMemReq.alignment,
13674  vkMemReq.memoryTypeBits,
13675  createInfo.flags,
13676  createInfo.usage,
13677  createInfo.requiredFlags,
13678  createInfo.preferredFlags,
13679  createInfo.memoryTypeBits,
13680  createInfo.pool);
13681  PrintPointerList(allocationCount, pAllocations);
13682  fprintf(m_File, ",%s\n", userDataStr.GetString());
13683  Flush();
13684 }
13685 
13686 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13687  const VkMemoryRequirements& vkMemReq,
13688  bool requiresDedicatedAllocation,
13689  bool prefersDedicatedAllocation,
13690  const VmaAllocationCreateInfo& createInfo,
13691  VmaAllocation allocation)
13692 {
13693  CallParams callParams;
13694  GetBasicParams(callParams);
13695 
13696  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13697  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13698  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13699  vkMemReq.size,
13700  vkMemReq.alignment,
13701  vkMemReq.memoryTypeBits,
13702  requiresDedicatedAllocation ? 1 : 0,
13703  prefersDedicatedAllocation ? 1 : 0,
13704  createInfo.flags,
13705  createInfo.usage,
13706  createInfo.requiredFlags,
13707  createInfo.preferredFlags,
13708  createInfo.memoryTypeBits,
13709  createInfo.pool,
13710  allocation,
13711  userDataStr.GetString());
13712  Flush();
13713 }
13714 
13715 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13716  const VkMemoryRequirements& vkMemReq,
13717  bool requiresDedicatedAllocation,
13718  bool prefersDedicatedAllocation,
13719  const VmaAllocationCreateInfo& createInfo,
13720  VmaAllocation allocation)
13721 {
13722  CallParams callParams;
13723  GetBasicParams(callParams);
13724 
13725  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13726  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13727  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13728  vkMemReq.size,
13729  vkMemReq.alignment,
13730  vkMemReq.memoryTypeBits,
13731  requiresDedicatedAllocation ? 1 : 0,
13732  prefersDedicatedAllocation ? 1 : 0,
13733  createInfo.flags,
13734  createInfo.usage,
13735  createInfo.requiredFlags,
13736  createInfo.preferredFlags,
13737  createInfo.memoryTypeBits,
13738  createInfo.pool,
13739  allocation,
13740  userDataStr.GetString());
13741  Flush();
13742 }
13743 
13744 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13745  VmaAllocation allocation)
13746 {
13747  CallParams callParams;
13748  GetBasicParams(callParams);
13749 
13750  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13751  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13752  allocation);
13753  Flush();
13754 }
13755 
13756 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
13757  uint64_t allocationCount,
13758  const VmaAllocation* pAllocations)
13759 {
13760  CallParams callParams;
13761  GetBasicParams(callParams);
13762 
13763  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13764  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
13765  PrintPointerList(allocationCount, pAllocations);
13766  fprintf(m_File, "\n");
13767  Flush();
13768 }
13769 
13770 void VmaRecorder::RecordResizeAllocation(
13771  uint32_t frameIndex,
13772  VmaAllocation allocation,
13773  VkDeviceSize newSize)
13774 {
13775  CallParams callParams;
13776  GetBasicParams(callParams);
13777 
13778  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13779  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
13780  allocation, newSize);
13781  Flush();
13782 }
13783 
13784 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13785  VmaAllocation allocation,
13786  const void* pUserData)
13787 {
13788  CallParams callParams;
13789  GetBasicParams(callParams);
13790 
13791  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13792  UserDataString userDataStr(
13793  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
13794  pUserData);
13795  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13796  allocation,
13797  userDataStr.GetString());
13798  Flush();
13799 }
13800 
13801 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13802  VmaAllocation allocation)
13803 {
13804  CallParams callParams;
13805  GetBasicParams(callParams);
13806 
13807  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13808  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13809  allocation);
13810  Flush();
13811 }
13812 
13813 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13814  VmaAllocation allocation)
13815 {
13816  CallParams callParams;
13817  GetBasicParams(callParams);
13818 
13819  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13820  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13821  allocation);
13822  Flush();
13823 }
13824 
13825 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13826  VmaAllocation allocation)
13827 {
13828  CallParams callParams;
13829  GetBasicParams(callParams);
13830 
13831  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13832  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13833  allocation);
13834  Flush();
13835 }
13836 
13837 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13838  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13839 {
13840  CallParams callParams;
13841  GetBasicParams(callParams);
13842 
13843  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13844  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13845  allocation,
13846  offset,
13847  size);
13848  Flush();
13849 }
13850 
13851 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13852  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13853 {
13854  CallParams callParams;
13855  GetBasicParams(callParams);
13856 
13857  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13858  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13859  allocation,
13860  offset,
13861  size);
13862  Flush();
13863 }
13864 
13865 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13866  const VkBufferCreateInfo& bufCreateInfo,
13867  const VmaAllocationCreateInfo& allocCreateInfo,
13868  VmaAllocation allocation)
13869 {
13870  CallParams callParams;
13871  GetBasicParams(callParams);
13872 
13873  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13874  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13875  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13876  bufCreateInfo.flags,
13877  bufCreateInfo.size,
13878  bufCreateInfo.usage,
13879  bufCreateInfo.sharingMode,
13880  allocCreateInfo.flags,
13881  allocCreateInfo.usage,
13882  allocCreateInfo.requiredFlags,
13883  allocCreateInfo.preferredFlags,
13884  allocCreateInfo.memoryTypeBits,
13885  allocCreateInfo.pool,
13886  allocation,
13887  userDataStr.GetString());
13888  Flush();
13889 }
13890 
13891 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13892  const VkImageCreateInfo& imageCreateInfo,
13893  const VmaAllocationCreateInfo& allocCreateInfo,
13894  VmaAllocation allocation)
13895 {
13896  CallParams callParams;
13897  GetBasicParams(callParams);
13898 
13899  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13900  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13901  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13902  imageCreateInfo.flags,
13903  imageCreateInfo.imageType,
13904  imageCreateInfo.format,
13905  imageCreateInfo.extent.width,
13906  imageCreateInfo.extent.height,
13907  imageCreateInfo.extent.depth,
13908  imageCreateInfo.mipLevels,
13909  imageCreateInfo.arrayLayers,
13910  imageCreateInfo.samples,
13911  imageCreateInfo.tiling,
13912  imageCreateInfo.usage,
13913  imageCreateInfo.sharingMode,
13914  imageCreateInfo.initialLayout,
13915  allocCreateInfo.flags,
13916  allocCreateInfo.usage,
13917  allocCreateInfo.requiredFlags,
13918  allocCreateInfo.preferredFlags,
13919  allocCreateInfo.memoryTypeBits,
13920  allocCreateInfo.pool,
13921  allocation,
13922  userDataStr.GetString());
13923  Flush();
13924 }
13925 
13926 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13927  VmaAllocation allocation)
13928 {
13929  CallParams callParams;
13930  GetBasicParams(callParams);
13931 
13932  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13933  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13934  allocation);
13935  Flush();
13936 }
13937 
13938 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
13939  VmaAllocation allocation)
13940 {
13941  CallParams callParams;
13942  GetBasicParams(callParams);
13943 
13944  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13945  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
13946  allocation);
13947  Flush();
13948 }
13949 
13950 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
13951  VmaAllocation allocation)
13952 {
13953  CallParams callParams;
13954  GetBasicParams(callParams);
13955 
13956  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13957  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13958  allocation);
13959  Flush();
13960 }
13961 
13962 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
13963  VmaAllocation allocation)
13964 {
13965  CallParams callParams;
13966  GetBasicParams(callParams);
13967 
13968  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13969  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
13970  allocation);
13971  Flush();
13972 }
13973 
13974 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
13975  VmaPool pool)
13976 {
13977  CallParams callParams;
13978  GetBasicParams(callParams);
13979 
13980  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13981  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
13982  pool);
13983  Flush();
13984 }
13985 
13986 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
13987  const VmaDefragmentationInfo2& info,
13989 {
13990  CallParams callParams;
13991  GetBasicParams(callParams);
13992 
13993  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13994  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
13995  info.flags);
13996  PrintPointerList(info.allocationCount, info.pAllocations);
13997  fprintf(m_File, ",");
13998  PrintPointerList(info.poolCount, info.pPools);
13999  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
14000  info.maxCpuBytesToMove,
14002  info.maxGpuBytesToMove,
14004  info.commandBuffer,
14005  ctx);
14006  Flush();
14007 }
14008 
14009 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
14011 {
14012  CallParams callParams;
14013  GetBasicParams(callParams);
14014 
14015  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14016  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
14017  ctx);
14018  Flush();
14019 }
14020 
14021 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
14022 {
14023  if(pUserData != VMA_NULL)
14024  {
14025  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
14026  {
14027  m_Str = (const char*)pUserData;
14028  }
14029  else
14030  {
14031  sprintf_s(m_PtrStr, "%p", pUserData);
14032  m_Str = m_PtrStr;
14033  }
14034  }
14035  else
14036  {
14037  m_Str = "";
14038  }
14039 }
14040 
14041 void VmaRecorder::WriteConfiguration(
14042  const VkPhysicalDeviceProperties& devProps,
14043  const VkPhysicalDeviceMemoryProperties& memProps,
14044  bool dedicatedAllocationExtensionEnabled)
14045 {
14046  fprintf(m_File, "Config,Begin\n");
14047 
14048  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
14049  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
14050  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
14051  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
14052  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
14053  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
14054 
14055  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
14056  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
14057  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
14058 
14059  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
14060  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
14061  {
14062  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
14063  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
14064  }
14065  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
14066  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
14067  {
14068  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
14069  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
14070  }
14071 
14072  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
14073 
14074  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
14075  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
14076  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
14077  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
14078  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
14079  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
14080  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
14081  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
14082  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14083 
14084  fprintf(m_File, "Config,End\n");
14085 }
14086 
14087 void VmaRecorder::GetBasicParams(CallParams& outParams)
14088 {
14089  outParams.threadId = GetCurrentThreadId();
14090 
14091  LARGE_INTEGER counter;
14092  QueryPerformanceCounter(&counter);
14093  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
14094 }
14095 
14096 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
14097 {
14098  if(count)
14099  {
14100  fprintf(m_File, "%p", pItems[0]);
14101  for(uint64_t i = 1; i < count; ++i)
14102  {
14103  fprintf(m_File, " %p", pItems[i]);
14104  }
14105  }
14106 }
14107 
14108 void VmaRecorder::Flush()
14109 {
14110  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
14111  {
14112  fflush(m_File);
14113  }
14114 }
14115 
14116 #endif // #if VMA_RECORDING_ENABLED
14117 
14119 // VmaAllocationObjectAllocator
14120 
14121 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
14122  m_Allocator(pAllocationCallbacks, 1024)
14123 {
14124 }
14125 
14126 VmaAllocation VmaAllocationObjectAllocator::Allocate()
14127 {
14128  VmaMutexLock mutexLock(m_Mutex);
14129  return m_Allocator.Alloc();
14130 }
14131 
14132 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
14133 {
14134  VmaMutexLock mutexLock(m_Mutex);
14135  m_Allocator.Free(hAlloc);
14136 }
14137 
14139 // VmaAllocator_T
14140 
14141 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
14142  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
14143  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
14144  m_hDevice(pCreateInfo->device),
14145  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
14146  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
14147  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
14148  m_AllocationObjectAllocator(&m_AllocationCallbacks),
14149  m_PreferredLargeHeapBlockSize(0),
14150  m_PhysicalDevice(pCreateInfo->physicalDevice),
14151  m_CurrentFrameIndex(0),
14152  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
14153  m_NextPoolId(0)
14155  ,m_pRecorder(VMA_NULL)
14156 #endif
14157 {
14158  if(VMA_DEBUG_DETECT_CORRUPTION)
14159  {
14160  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
14161  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
14162  }
14163 
14164  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
14165 
14166 #if !(VMA_DEDICATED_ALLOCATION)
14168  {
14169  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14170  }
14171 #endif
14172 
14173  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
14174  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14175  memset(&m_MemProps, 0, sizeof(m_MemProps));
14176 
14177  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14178  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
14179 
14180  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14181  {
14182  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
14183  }
14184 
14185  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14186  {
14187  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14188  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14189  }
14190 
14191  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14192 
14193  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14194  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14195 
14196  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14197  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14198  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14199  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14200 
14201  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14202  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14203 
14204  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14205  {
14206  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14207  {
14208  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14209  if(limit != VK_WHOLE_SIZE)
14210  {
14211  m_HeapSizeLimit[heapIndex] = limit;
14212  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14213  {
14214  m_MemProps.memoryHeaps[heapIndex].size = limit;
14215  }
14216  }
14217  }
14218  }
14219 
14220  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14221  {
14222  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14223 
14224  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14225  this,
14226  VK_NULL_HANDLE, // hParentPool
14227  memTypeIndex,
14228  preferredBlockSize,
14229  0,
14230  SIZE_MAX,
14231  GetBufferImageGranularity(),
14232  pCreateInfo->frameInUseCount,
14233  false, // isCustomPool
14234  false, // explicitBlockSize
14235  false); // linearAlgorithm
14236  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14237  // becase minBlockCount is 0.
14238  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14239 
14240  }
14241 }
14242 
14243 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14244 {
14245  VkResult res = VK_SUCCESS;
14246 
14247  if(pCreateInfo->pRecordSettings != VMA_NULL &&
14248  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14249  {
14250 #if VMA_RECORDING_ENABLED
14251  m_pRecorder = vma_new(this, VmaRecorder)();
14252  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14253  if(res != VK_SUCCESS)
14254  {
14255  return res;
14256  }
14257  m_pRecorder->WriteConfiguration(
14258  m_PhysicalDeviceProperties,
14259  m_MemProps,
14260  m_UseKhrDedicatedAllocation);
14261  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14262 #else
14263  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14264  return VK_ERROR_FEATURE_NOT_PRESENT;
14265 #endif
14266  }
14267 
14268  return res;
14269 }
14270 
14271 VmaAllocator_T::~VmaAllocator_T()
14272 {
14273 #if VMA_RECORDING_ENABLED
14274  if(m_pRecorder != VMA_NULL)
14275  {
14276  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14277  vma_delete(this, m_pRecorder);
14278  }
14279 #endif
14280 
14281  VMA_ASSERT(m_Pools.empty());
14282 
14283  for(size_t i = GetMemoryTypeCount(); i--; )
14284  {
14285  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
14286  {
14287  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
14288  }
14289 
14290  vma_delete(this, m_pDedicatedAllocations[i]);
14291  vma_delete(this, m_pBlockVectors[i]);
14292  }
14293 }
14294 
14295 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14296 {
14297 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14298  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
14299  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
14300  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
14301  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
14302  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
14303  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
14304  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
14305  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
14306  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
14307  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
14308  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
14309  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
14310  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
14311  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
14312  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
14313  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
14314  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
14315 #if VMA_DEDICATED_ALLOCATION
14316  if(m_UseKhrDedicatedAllocation)
14317  {
14318  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14319  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14320  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14321  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14322  }
14323 #endif // #if VMA_DEDICATED_ALLOCATION
14324 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14325 
14326 #define VMA_COPY_IF_NOT_NULL(funcName) \
14327  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14328 
14329  if(pVulkanFunctions != VMA_NULL)
14330  {
14331  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14332  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14333  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14334  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14335  VMA_COPY_IF_NOT_NULL(vkMapMemory);
14336  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14337  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14338  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14339  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14340  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14341  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14342  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14343  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14344  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14345  VMA_COPY_IF_NOT_NULL(vkCreateImage);
14346  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14347  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14348 #if VMA_DEDICATED_ALLOCATION
14349  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14350  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14351 #endif
14352  }
14353 
14354 #undef VMA_COPY_IF_NOT_NULL
14355 
14356  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14357  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14358  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14359  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14360  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14361  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14362  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14363  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14364  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14365  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14366  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14367  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14368  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14369  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14370  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14371  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14372  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14373  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14374  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14375 #if VMA_DEDICATED_ALLOCATION
14376  if(m_UseKhrDedicatedAllocation)
14377  {
14378  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14379  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14380  }
14381 #endif
14382 }
14383 
14384 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14385 {
14386  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14387  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14388  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14389  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
14390 }
14391 
14392 VkResult VmaAllocator_T::AllocateMemoryOfType(
14393  VkDeviceSize size,
14394  VkDeviceSize alignment,
14395  bool dedicatedAllocation,
14396  VkBuffer dedicatedBuffer,
14397  VkImage dedicatedImage,
14398  const VmaAllocationCreateInfo& createInfo,
14399  uint32_t memTypeIndex,
14400  VmaSuballocationType suballocType,
14401  size_t allocationCount,
14402  VmaAllocation* pAllocations)
14403 {
14404  VMA_ASSERT(pAllocations != VMA_NULL);
14405  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
14406 
14407  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14408 
14409  // If memory type is not HOST_VISIBLE, disable MAPPED.
14410  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14411  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14412  {
14413  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14414  }
14415 
14416  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14417  VMA_ASSERT(blockVector);
14418 
14419  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14420  bool preferDedicatedMemory =
14421  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14422  dedicatedAllocation ||
14423  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14424  size > preferredBlockSize / 2;
14425 
14426  if(preferDedicatedMemory &&
14427  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14428  finalCreateInfo.pool == VK_NULL_HANDLE)
14429  {
14431  }
14432 
14433  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14434  {
14435  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14436  {
14437  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14438  }
14439  else
14440  {
14441  return AllocateDedicatedMemory(
14442  size,
14443  suballocType,
14444  memTypeIndex,
14445  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14446  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14447  finalCreateInfo.pUserData,
14448  dedicatedBuffer,
14449  dedicatedImage,
14450  allocationCount,
14451  pAllocations);
14452  }
14453  }
14454  else
14455  {
14456  VkResult res = blockVector->Allocate(
14457  m_CurrentFrameIndex.load(),
14458  size,
14459  alignment,
14460  finalCreateInfo,
14461  suballocType,
14462  allocationCount,
14463  pAllocations);
14464  if(res == VK_SUCCESS)
14465  {
14466  return res;
14467  }
14468 
14469  // 5. Try dedicated memory.
14470  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14471  {
14472  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14473  }
14474  else
14475  {
14476  res = AllocateDedicatedMemory(
14477  size,
14478  suballocType,
14479  memTypeIndex,
14480  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14481  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14482  finalCreateInfo.pUserData,
14483  dedicatedBuffer,
14484  dedicatedImage,
14485  allocationCount,
14486  pAllocations);
14487  if(res == VK_SUCCESS)
14488  {
14489  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14490  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14491  return VK_SUCCESS;
14492  }
14493  else
14494  {
14495  // Everything failed: Return error code.
14496  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14497  return res;
14498  }
14499  }
14500  }
14501 }
14502 
14503 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14504  VkDeviceSize size,
14505  VmaSuballocationType suballocType,
14506  uint32_t memTypeIndex,
14507  bool map,
14508  bool isUserDataString,
14509  void* pUserData,
14510  VkBuffer dedicatedBuffer,
14511  VkImage dedicatedImage,
14512  size_t allocationCount,
14513  VmaAllocation* pAllocations)
14514 {
14515  VMA_ASSERT(allocationCount > 0 && pAllocations);
14516 
14517  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14518  allocInfo.memoryTypeIndex = memTypeIndex;
14519  allocInfo.allocationSize = size;
14520 
14521 #if VMA_DEDICATED_ALLOCATION
14522  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14523  if(m_UseKhrDedicatedAllocation)
14524  {
14525  if(dedicatedBuffer != VK_NULL_HANDLE)
14526  {
14527  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14528  dedicatedAllocInfo.buffer = dedicatedBuffer;
14529  allocInfo.pNext = &dedicatedAllocInfo;
14530  }
14531  else if(dedicatedImage != VK_NULL_HANDLE)
14532  {
14533  dedicatedAllocInfo.image = dedicatedImage;
14534  allocInfo.pNext = &dedicatedAllocInfo;
14535  }
14536  }
14537 #endif // #if VMA_DEDICATED_ALLOCATION
14538 
14539  size_t allocIndex;
14540  VkResult res = VK_SUCCESS;
14541  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14542  {
14543  res = AllocateDedicatedMemoryPage(
14544  size,
14545  suballocType,
14546  memTypeIndex,
14547  allocInfo,
14548  map,
14549  isUserDataString,
14550  pUserData,
14551  pAllocations + allocIndex);
14552  if(res != VK_SUCCESS)
14553  {
14554  break;
14555  }
14556  }
14557 
14558  if(res == VK_SUCCESS)
14559  {
14560  // Register them in m_pDedicatedAllocations.
14561  {
14562  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14563  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14564  VMA_ASSERT(pDedicatedAllocations);
14565  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14566  {
14567  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
14568  }
14569  }
14570 
14571  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14572  }
14573  else
14574  {
14575  // Free all already created allocations.
14576  while(allocIndex--)
14577  {
14578  VmaAllocation currAlloc = pAllocations[allocIndex];
14579  VkDeviceMemory hMemory = currAlloc->GetMemory();
14580 
14581  /*
14582  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14583  before vkFreeMemory.
14584 
14585  if(currAlloc->GetMappedData() != VMA_NULL)
14586  {
14587  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14588  }
14589  */
14590 
14591  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14592 
14593  currAlloc->SetUserData(this, VMA_NULL);
14594  currAlloc->Dtor();
14595  m_AllocationObjectAllocator.Free(currAlloc);
14596  }
14597 
14598  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14599  }
14600 
14601  return res;
14602 }
14603 
14604 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14605  VkDeviceSize size,
14606  VmaSuballocationType suballocType,
14607  uint32_t memTypeIndex,
14608  const VkMemoryAllocateInfo& allocInfo,
14609  bool map,
14610  bool isUserDataString,
14611  void* pUserData,
14612  VmaAllocation* pAllocation)
14613 {
14614  VkDeviceMemory hMemory = VK_NULL_HANDLE;
14615  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14616  if(res < 0)
14617  {
14618  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14619  return res;
14620  }
14621 
14622  void* pMappedData = VMA_NULL;
14623  if(map)
14624  {
14625  res = (*m_VulkanFunctions.vkMapMemory)(
14626  m_hDevice,
14627  hMemory,
14628  0,
14629  VK_WHOLE_SIZE,
14630  0,
14631  &pMappedData);
14632  if(res < 0)
14633  {
14634  VMA_DEBUG_LOG(" vkMapMemory FAILED");
14635  FreeVulkanMemory(memTypeIndex, size, hMemory);
14636  return res;
14637  }
14638  }
14639 
14640  *pAllocation = m_AllocationObjectAllocator.Allocate();
14641  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
14642  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14643  (*pAllocation)->SetUserData(this, pUserData);
14644  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14645  {
14646  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14647  }
14648 
14649  return VK_SUCCESS;
14650 }
14651 
14652 void VmaAllocator_T::GetBufferMemoryRequirements(
14653  VkBuffer hBuffer,
14654  VkMemoryRequirements& memReq,
14655  bool& requiresDedicatedAllocation,
14656  bool& prefersDedicatedAllocation) const
14657 {
14658 #if VMA_DEDICATED_ALLOCATION
14659  if(m_UseKhrDedicatedAllocation)
14660  {
14661  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14662  memReqInfo.buffer = hBuffer;
14663 
14664  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14665 
14666  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14667  memReq2.pNext = &memDedicatedReq;
14668 
14669  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14670 
14671  memReq = memReq2.memoryRequirements;
14672  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14673  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14674  }
14675  else
14676 #endif // #if VMA_DEDICATED_ALLOCATION
14677  {
14678  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14679  requiresDedicatedAllocation = false;
14680  prefersDedicatedAllocation = false;
14681  }
14682 }
14683 
14684 void VmaAllocator_T::GetImageMemoryRequirements(
14685  VkImage hImage,
14686  VkMemoryRequirements& memReq,
14687  bool& requiresDedicatedAllocation,
14688  bool& prefersDedicatedAllocation) const
14689 {
14690 #if VMA_DEDICATED_ALLOCATION
14691  if(m_UseKhrDedicatedAllocation)
14692  {
14693  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14694  memReqInfo.image = hImage;
14695 
14696  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14697 
14698  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14699  memReq2.pNext = &memDedicatedReq;
14700 
14701  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14702 
14703  memReq = memReq2.memoryRequirements;
14704  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14705  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14706  }
14707  else
14708 #endif // #if VMA_DEDICATED_ALLOCATION
14709  {
14710  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14711  requiresDedicatedAllocation = false;
14712  prefersDedicatedAllocation = false;
14713  }
14714 }
14715 
14716 VkResult VmaAllocator_T::AllocateMemory(
14717  const VkMemoryRequirements& vkMemReq,
14718  bool requiresDedicatedAllocation,
14719  bool prefersDedicatedAllocation,
14720  VkBuffer dedicatedBuffer,
14721  VkImage dedicatedImage,
14722  const VmaAllocationCreateInfo& createInfo,
14723  VmaSuballocationType suballocType,
14724  size_t allocationCount,
14725  VmaAllocation* pAllocations)
14726 {
14727  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14728 
14729  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14730 
14731  if(vkMemReq.size == 0)
14732  {
14733  return VK_ERROR_VALIDATION_FAILED_EXT;
14734  }
14735  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14736  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14737  {
14738  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14739  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14740  }
14741  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14743  {
14744  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14745  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14746  }
14747  if(requiresDedicatedAllocation)
14748  {
14749  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14750  {
14751  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14752  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14753  }
14754  if(createInfo.pool != VK_NULL_HANDLE)
14755  {
14756  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
14757  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14758  }
14759  }
14760  if((createInfo.pool != VK_NULL_HANDLE) &&
14761  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
14762  {
14763  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14764  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14765  }
14766 
14767  if(createInfo.pool != VK_NULL_HANDLE)
14768  {
14769  const VkDeviceSize alignmentForPool = VMA_MAX(
14770  vkMemReq.alignment,
14771  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
14772  return createInfo.pool->m_BlockVector.Allocate(
14773  m_CurrentFrameIndex.load(),
14774  vkMemReq.size,
14775  alignmentForPool,
14776  createInfo,
14777  suballocType,
14778  allocationCount,
14779  pAllocations);
14780  }
14781  else
14782  {
14783  // Bit mask of memory Vulkan types acceptable for this allocation.
14784  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14785  uint32_t memTypeIndex = UINT32_MAX;
14786  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14787  if(res == VK_SUCCESS)
14788  {
14789  VkDeviceSize alignmentForMemType = VMA_MAX(
14790  vkMemReq.alignment,
14791  GetMemoryTypeMinAlignment(memTypeIndex));
14792 
14793  res = AllocateMemoryOfType(
14794  vkMemReq.size,
14795  alignmentForMemType,
14796  requiresDedicatedAllocation || prefersDedicatedAllocation,
14797  dedicatedBuffer,
14798  dedicatedImage,
14799  createInfo,
14800  memTypeIndex,
14801  suballocType,
14802  allocationCount,
14803  pAllocations);
14804  // Succeeded on first try.
14805  if(res == VK_SUCCESS)
14806  {
14807  return res;
14808  }
14809  // Allocation from this memory type failed. Try other compatible memory types.
14810  else
14811  {
14812  for(;;)
14813  {
14814  // Remove old memTypeIndex from list of possibilities.
14815  memoryTypeBits &= ~(1u << memTypeIndex);
14816  // Find alternative memTypeIndex.
14817  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14818  if(res == VK_SUCCESS)
14819  {
14820  alignmentForMemType = VMA_MAX(
14821  vkMemReq.alignment,
14822  GetMemoryTypeMinAlignment(memTypeIndex));
14823 
14824  res = AllocateMemoryOfType(
14825  vkMemReq.size,
14826  alignmentForMemType,
14827  requiresDedicatedAllocation || prefersDedicatedAllocation,
14828  dedicatedBuffer,
14829  dedicatedImage,
14830  createInfo,
14831  memTypeIndex,
14832  suballocType,
14833  allocationCount,
14834  pAllocations);
14835  // Allocation from this alternative memory type succeeded.
14836  if(res == VK_SUCCESS)
14837  {
14838  return res;
14839  }
14840  // else: Allocation from this memory type failed. Try next one - next loop iteration.
14841  }
14842  // No other matching memory type index could be found.
14843  else
14844  {
14845  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
14846  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14847  }
14848  }
14849  }
14850  }
14851  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
14852  else
14853  return res;
14854  }
14855 }
14856 
14857 void VmaAllocator_T::FreeMemory(
14858  size_t allocationCount,
14859  const VmaAllocation* pAllocations)
14860 {
14861  VMA_ASSERT(pAllocations);
14862 
14863  for(size_t allocIndex = allocationCount; allocIndex--; )
14864  {
14865  VmaAllocation allocation = pAllocations[allocIndex];
14866 
14867  if(allocation != VK_NULL_HANDLE)
14868  {
14869  if(TouchAllocation(allocation))
14870  {
14871  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14872  {
14873  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14874  }
14875 
14876  switch(allocation->GetType())
14877  {
14878  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14879  {
14880  VmaBlockVector* pBlockVector = VMA_NULL;
14881  VmaPool hPool = allocation->GetBlock()->GetParentPool();
14882  if(hPool != VK_NULL_HANDLE)
14883  {
14884  pBlockVector = &hPool->m_BlockVector;
14885  }
14886  else
14887  {
14888  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14889  pBlockVector = m_pBlockVectors[memTypeIndex];
14890  }
14891  pBlockVector->Free(allocation);
14892  }
14893  break;
14894  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14895  FreeDedicatedMemory(allocation);
14896  break;
14897  default:
14898  VMA_ASSERT(0);
14899  }
14900  }
14901 
14902  allocation->SetUserData(this, VMA_NULL);
14903  allocation->Dtor();
14904  m_AllocationObjectAllocator.Free(allocation);
14905  }
14906  }
14907 }
14908 
14909 VkResult VmaAllocator_T::ResizeAllocation(
14910  const VmaAllocation alloc,
14911  VkDeviceSize newSize)
14912 {
14913  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14914  {
14915  return VK_ERROR_VALIDATION_FAILED_EXT;
14916  }
14917  if(newSize == alloc->GetSize())
14918  {
14919  return VK_SUCCESS;
14920  }
14921 
14922  switch(alloc->GetType())
14923  {
14924  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14925  return VK_ERROR_FEATURE_NOT_PRESENT;
14926  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14927  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
14928  {
14929  alloc->ChangeSize(newSize);
14930  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
14931  return VK_SUCCESS;
14932  }
14933  else
14934  {
14935  return VK_ERROR_OUT_OF_POOL_MEMORY;
14936  }
14937  default:
14938  VMA_ASSERT(0);
14939  return VK_ERROR_VALIDATION_FAILED_EXT;
14940  }
14941 }
14942 
14943 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
14944 {
14945  // Initialize.
14946  InitStatInfo(pStats->total);
14947  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
14948  InitStatInfo(pStats->memoryType[i]);
14949  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14950  InitStatInfo(pStats->memoryHeap[i]);
14951 
14952  // Process default pools.
14953  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14954  {
14955  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14956  VMA_ASSERT(pBlockVector);
14957  pBlockVector->AddStats(pStats);
14958  }
14959 
14960  // Process custom pools.
14961  {
14962  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14963  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14964  {
14965  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
14966  }
14967  }
14968 
14969  // Process dedicated allocations.
14970  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14971  {
14972  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14973  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14974  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
14975  VMA_ASSERT(pDedicatedAllocVector);
14976  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
14977  {
14978  VmaStatInfo allocationStatInfo;
14979  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
14980  VmaAddStatInfo(pStats->total, allocationStatInfo);
14981  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
14982  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
14983  }
14984  }
14985 
14986  // Postprocess.
14987  VmaPostprocessCalcStatInfo(pStats->total);
14988  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
14989  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
14990  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
14991  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
14992 }
14993 
14994 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
14995 
14996 VkResult VmaAllocator_T::DefragmentationBegin(
14997  const VmaDefragmentationInfo2& info,
14998  VmaDefragmentationStats* pStats,
14999  VmaDefragmentationContext* pContext)
15000 {
15001  if(info.pAllocationsChanged != VMA_NULL)
15002  {
15003  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
15004  }
15005 
15006  *pContext = vma_new(this, VmaDefragmentationContext_T)(
15007  this, m_CurrentFrameIndex.load(), info.flags, pStats);
15008 
15009  (*pContext)->AddPools(info.poolCount, info.pPools);
15010  (*pContext)->AddAllocations(
15012 
15013  VkResult res = (*pContext)->Defragment(
15016  info.commandBuffer, pStats);
15017 
15018  if(res != VK_NOT_READY)
15019  {
15020  vma_delete(this, *pContext);
15021  *pContext = VMA_NULL;
15022  }
15023 
15024  return res;
15025 }
15026 
15027 VkResult VmaAllocator_T::DefragmentationEnd(
15028  VmaDefragmentationContext context)
15029 {
15030  vma_delete(this, context);
15031  return VK_SUCCESS;
15032 }
15033 
15034 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
15035 {
15036  if(hAllocation->CanBecomeLost())
15037  {
15038  /*
15039  Warning: This is a carefully designed algorithm.
15040  Do not modify unless you really know what you're doing :)
15041  */
15042  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15043  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15044  for(;;)
15045  {
15046  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15047  {
15048  pAllocationInfo->memoryType = UINT32_MAX;
15049  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
15050  pAllocationInfo->offset = 0;
15051  pAllocationInfo->size = hAllocation->GetSize();
15052  pAllocationInfo->pMappedData = VMA_NULL;
15053  pAllocationInfo->pUserData = hAllocation->GetUserData();
15054  return;
15055  }
15056  else if(localLastUseFrameIndex == localCurrFrameIndex)
15057  {
15058  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15059  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15060  pAllocationInfo->offset = hAllocation->GetOffset();
15061  pAllocationInfo->size = hAllocation->GetSize();
15062  pAllocationInfo->pMappedData = VMA_NULL;
15063  pAllocationInfo->pUserData = hAllocation->GetUserData();
15064  return;
15065  }
15066  else // Last use time earlier than current time.
15067  {
15068  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15069  {
15070  localLastUseFrameIndex = localCurrFrameIndex;
15071  }
15072  }
15073  }
15074  }
15075  else
15076  {
15077 #if VMA_STATS_STRING_ENABLED
15078  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15079  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15080  for(;;)
15081  {
15082  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15083  if(localLastUseFrameIndex == localCurrFrameIndex)
15084  {
15085  break;
15086  }
15087  else // Last use time earlier than current time.
15088  {
15089  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15090  {
15091  localLastUseFrameIndex = localCurrFrameIndex;
15092  }
15093  }
15094  }
15095 #endif
15096 
15097  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15098  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15099  pAllocationInfo->offset = hAllocation->GetOffset();
15100  pAllocationInfo->size = hAllocation->GetSize();
15101  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
15102  pAllocationInfo->pUserData = hAllocation->GetUserData();
15103  }
15104 }
15105 
15106 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
15107 {
15108  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
15109  if(hAllocation->CanBecomeLost())
15110  {
15111  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15112  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15113  for(;;)
15114  {
15115  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15116  {
15117  return false;
15118  }
15119  else if(localLastUseFrameIndex == localCurrFrameIndex)
15120  {
15121  return true;
15122  }
15123  else // Last use time earlier than current time.
15124  {
15125  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15126  {
15127  localLastUseFrameIndex = localCurrFrameIndex;
15128  }
15129  }
15130  }
15131  }
15132  else
15133  {
15134 #if VMA_STATS_STRING_ENABLED
15135  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15136  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15137  for(;;)
15138  {
15139  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15140  if(localLastUseFrameIndex == localCurrFrameIndex)
15141  {
15142  break;
15143  }
15144  else // Last use time earlier than current time.
15145  {
15146  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15147  {
15148  localLastUseFrameIndex = localCurrFrameIndex;
15149  }
15150  }
15151  }
15152 #endif
15153 
15154  return true;
15155  }
15156 }
15157 
15158 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
15159 {
15160  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
15161 
15162  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
15163 
15164  if(newCreateInfo.maxBlockCount == 0)
15165  {
15166  newCreateInfo.maxBlockCount = SIZE_MAX;
15167  }
15168  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
15169  {
15170  return VK_ERROR_INITIALIZATION_FAILED;
15171  }
15172 
15173  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
15174 
15175  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
15176 
15177  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15178  if(res != VK_SUCCESS)
15179  {
15180  vma_delete(this, *pPool);
15181  *pPool = VMA_NULL;
15182  return res;
15183  }
15184 
15185  // Add to m_Pools.
15186  {
15187  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15188  (*pPool)->SetId(m_NextPoolId++);
15189  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
15190  }
15191 
15192  return VK_SUCCESS;
15193 }
15194 
15195 void VmaAllocator_T::DestroyPool(VmaPool pool)
15196 {
15197  // Remove from m_Pools.
15198  {
15199  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15200  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15201  VMA_ASSERT(success && "Pool not found in Allocator.");
15202  }
15203 
15204  vma_delete(this, pool);
15205 }
15206 
15207 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15208 {
15209  pool->m_BlockVector.GetPoolStats(pPoolStats);
15210 }
15211 
15212 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15213 {
15214  m_CurrentFrameIndex.store(frameIndex);
15215 }
15216 
15217 void VmaAllocator_T::MakePoolAllocationsLost(
15218  VmaPool hPool,
15219  size_t* pLostAllocationCount)
15220 {
15221  hPool->m_BlockVector.MakePoolAllocationsLost(
15222  m_CurrentFrameIndex.load(),
15223  pLostAllocationCount);
15224 }
15225 
15226 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15227 {
15228  return hPool->m_BlockVector.CheckCorruption();
15229 }
15230 
15231 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15232 {
15233  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15234 
15235  // Process default pools.
15236  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15237  {
15238  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15239  {
15240  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15241  VMA_ASSERT(pBlockVector);
15242  VkResult localRes = pBlockVector->CheckCorruption();
15243  switch(localRes)
15244  {
15245  case VK_ERROR_FEATURE_NOT_PRESENT:
15246  break;
15247  case VK_SUCCESS:
15248  finalRes = VK_SUCCESS;
15249  break;
15250  default:
15251  return localRes;
15252  }
15253  }
15254  }
15255 
15256  // Process custom pools.
15257  {
15258  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15259  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15260  {
15261  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15262  {
15263  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15264  switch(localRes)
15265  {
15266  case VK_ERROR_FEATURE_NOT_PRESENT:
15267  break;
15268  case VK_SUCCESS:
15269  finalRes = VK_SUCCESS;
15270  break;
15271  default:
15272  return localRes;
15273  }
15274  }
15275  }
15276  }
15277 
15278  return finalRes;
15279 }
15280 
15281 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15282 {
15283  *pAllocation = m_AllocationObjectAllocator.Allocate();
15284  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
15285  (*pAllocation)->InitLost();
15286 }
15287 
15288 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15289 {
15290  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15291 
15292  VkResult res;
15293  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15294  {
15295  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15296  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
15297  {
15298  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15299  if(res == VK_SUCCESS)
15300  {
15301  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
15302  }
15303  }
15304  else
15305  {
15306  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
15307  }
15308  }
15309  else
15310  {
15311  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15312  }
15313 
15314  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15315  {
15316  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15317  }
15318 
15319  return res;
15320 }
15321 
15322 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15323 {
15324  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15325  {
15326  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15327  }
15328 
15329  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15330 
15331  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15332  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15333  {
15334  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15335  m_HeapSizeLimit[heapIndex] += size;
15336  }
15337 }
15338 
15339 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15340 {
15341  if(hAllocation->CanBecomeLost())
15342  {
15343  return VK_ERROR_MEMORY_MAP_FAILED;
15344  }
15345 
15346  switch(hAllocation->GetType())
15347  {
15348  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15349  {
15350  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15351  char *pBytes = VMA_NULL;
15352  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15353  if(res == VK_SUCCESS)
15354  {
15355  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15356  hAllocation->BlockAllocMap();
15357  }
15358  return res;
15359  }
15360  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15361  return hAllocation->DedicatedAllocMap(this, ppData);
15362  default:
15363  VMA_ASSERT(0);
15364  return VK_ERROR_MEMORY_MAP_FAILED;
15365  }
15366 }
15367 
15368 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15369 {
15370  switch(hAllocation->GetType())
15371  {
15372  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15373  {
15374  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15375  hAllocation->BlockAllocUnmap();
15376  pBlock->Unmap(this, 1);
15377  }
15378  break;
15379  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15380  hAllocation->DedicatedAllocUnmap(this);
15381  break;
15382  default:
15383  VMA_ASSERT(0);
15384  }
15385 }
15386 
15387 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
15388 {
15389  VkResult res = VK_SUCCESS;
15390  switch(hAllocation->GetType())
15391  {
15392  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15393  res = GetVulkanFunctions().vkBindBufferMemory(
15394  m_hDevice,
15395  hBuffer,
15396  hAllocation->GetMemory(),
15397  0); //memoryOffset
15398  break;
15399  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15400  {
15401  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15402  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
15403  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
15404  break;
15405  }
15406  default:
15407  VMA_ASSERT(0);
15408  }
15409  return res;
15410 }
15411 
15412 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
15413 {
15414  VkResult res = VK_SUCCESS;
15415  switch(hAllocation->GetType())
15416  {
15417  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15418  res = GetVulkanFunctions().vkBindImageMemory(
15419  m_hDevice,
15420  hImage,
15421  hAllocation->GetMemory(),
15422  0); //memoryOffset
15423  break;
15424  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15425  {
15426  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15427  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
15428  res = pBlock->BindImageMemory(this, hAllocation, hImage);
15429  break;
15430  }
15431  default:
15432  VMA_ASSERT(0);
15433  }
15434  return res;
15435 }
15436 
15437 void VmaAllocator_T::FlushOrInvalidateAllocation(
15438  VmaAllocation hAllocation,
15439  VkDeviceSize offset, VkDeviceSize size,
15440  VMA_CACHE_OPERATION op)
15441 {
15442  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
15443  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15444  {
15445  const VkDeviceSize allocationSize = hAllocation->GetSize();
15446  VMA_ASSERT(offset <= allocationSize);
15447 
15448  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15449 
15450  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
15451  memRange.memory = hAllocation->GetMemory();
15452 
15453  switch(hAllocation->GetType())
15454  {
15455  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15456  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15457  if(size == VK_WHOLE_SIZE)
15458  {
15459  memRange.size = allocationSize - memRange.offset;
15460  }
15461  else
15462  {
15463  VMA_ASSERT(offset + size <= allocationSize);
15464  memRange.size = VMA_MIN(
15465  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
15466  allocationSize - memRange.offset);
15467  }
15468  break;
15469 
15470  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15471  {
15472  // 1. Still within this allocation.
15473  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15474  if(size == VK_WHOLE_SIZE)
15475  {
15476  size = allocationSize - offset;
15477  }
15478  else
15479  {
15480  VMA_ASSERT(offset + size <= allocationSize);
15481  }
15482  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
15483 
15484  // 2. Adjust to whole block.
15485  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
15486  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15487  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
15488  memRange.offset += allocationOffset;
15489  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
15490 
15491  break;
15492  }
15493 
15494  default:
15495  VMA_ASSERT(0);
15496  }
15497 
15498  switch(op)
15499  {
15500  case VMA_CACHE_FLUSH:
15501  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15502  break;
15503  case VMA_CACHE_INVALIDATE:
15504  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15505  break;
15506  default:
15507  VMA_ASSERT(0);
15508  }
15509  }
15510  // else: Just ignore this call.
15511 }
15512 
15513 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
15514 {
15515  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15516 
15517  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15518  {
15519  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15520  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15521  VMA_ASSERT(pDedicatedAllocations);
15522  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
15523  VMA_ASSERT(success);
15524  }
15525 
15526  VkDeviceMemory hMemory = allocation->GetMemory();
15527 
15528  /*
15529  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15530  before vkFreeMemory.
15531 
15532  if(allocation->GetMappedData() != VMA_NULL)
15533  {
15534  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15535  }
15536  */
15537 
15538  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15539 
15540  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15541 }
15542 
15543 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15544 {
15545  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15546  !hAllocation->CanBecomeLost() &&
15547  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15548  {
15549  void* pData = VMA_NULL;
15550  VkResult res = Map(hAllocation, &pData);
15551  if(res == VK_SUCCESS)
15552  {
15553  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15554  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15555  Unmap(hAllocation);
15556  }
15557  else
15558  {
15559  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15560  }
15561  }
15562 }
15563 
15564 #if VMA_STATS_STRING_ENABLED
15565 
15566 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15567 {
15568  bool dedicatedAllocationsStarted = false;
15569  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15570  {
15571  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15572  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15573  VMA_ASSERT(pDedicatedAllocVector);
15574  if(pDedicatedAllocVector->empty() == false)
15575  {
15576  if(dedicatedAllocationsStarted == false)
15577  {
15578  dedicatedAllocationsStarted = true;
15579  json.WriteString("DedicatedAllocations");
15580  json.BeginObject();
15581  }
15582 
15583  json.BeginString("Type ");
15584  json.ContinueString(memTypeIndex);
15585  json.EndString();
15586 
15587  json.BeginArray();
15588 
15589  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15590  {
15591  json.BeginObject(true);
15592  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
15593  hAlloc->PrintParameters(json);
15594  json.EndObject();
15595  }
15596 
15597  json.EndArray();
15598  }
15599  }
15600  if(dedicatedAllocationsStarted)
15601  {
15602  json.EndObject();
15603  }
15604 
15605  {
15606  bool allocationsStarted = false;
15607  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15608  {
15609  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
15610  {
15611  if(allocationsStarted == false)
15612  {
15613  allocationsStarted = true;
15614  json.WriteString("DefaultPools");
15615  json.BeginObject();
15616  }
15617 
15618  json.BeginString("Type ");
15619  json.ContinueString(memTypeIndex);
15620  json.EndString();
15621 
15622  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15623  }
15624  }
15625  if(allocationsStarted)
15626  {
15627  json.EndObject();
15628  }
15629  }
15630 
15631  // Custom pools
15632  {
15633  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15634  const size_t poolCount = m_Pools.size();
15635  if(poolCount > 0)
15636  {
15637  json.WriteString("Pools");
15638  json.BeginObject();
15639  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15640  {
15641  json.BeginString();
15642  json.ContinueString(m_Pools[poolIndex]->GetId());
15643  json.EndString();
15644 
15645  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15646  }
15647  json.EndObject();
15648  }
15649  }
15650 }
15651 
15652 #endif // #if VMA_STATS_STRING_ENABLED
15653 
15655 // Public interface
15656 
15657 VkResult vmaCreateAllocator(
15658  const VmaAllocatorCreateInfo* pCreateInfo,
15659  VmaAllocator* pAllocator)
15660 {
15661  VMA_ASSERT(pCreateInfo && pAllocator);
15662  VMA_DEBUG_LOG("vmaCreateAllocator");
15663  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
15664  return (*pAllocator)->Init(pCreateInfo);
15665 }
15666 
15667 void vmaDestroyAllocator(
15668  VmaAllocator allocator)
15669 {
15670  if(allocator != VK_NULL_HANDLE)
15671  {
15672  VMA_DEBUG_LOG("vmaDestroyAllocator");
15673  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15674  vma_delete(&allocationCallbacks, allocator);
15675  }
15676 }
15677 
15679  VmaAllocator allocator,
15680  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15681 {
15682  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15683  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15684 }
15685 
15687  VmaAllocator allocator,
15688  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15689 {
15690  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15691  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15692 }
15693 
15695  VmaAllocator allocator,
15696  uint32_t memoryTypeIndex,
15697  VkMemoryPropertyFlags* pFlags)
15698 {
15699  VMA_ASSERT(allocator && pFlags);
15700  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15701  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15702 }
15703 
15705  VmaAllocator allocator,
15706  uint32_t frameIndex)
15707 {
15708  VMA_ASSERT(allocator);
15709  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15710 
15711  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15712 
15713  allocator->SetCurrentFrameIndex(frameIndex);
15714 }
15715 
15716 void vmaCalculateStats(
15717  VmaAllocator allocator,
15718  VmaStats* pStats)
15719 {
15720  VMA_ASSERT(allocator && pStats);
15721  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15722  allocator->CalculateStats(pStats);
15723 }
15724 
15725 #if VMA_STATS_STRING_ENABLED
15726 
15727 void vmaBuildStatsString(
15728  VmaAllocator allocator,
15729  char** ppStatsString,
15730  VkBool32 detailedMap)
15731 {
15732  VMA_ASSERT(allocator && ppStatsString);
15733  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15734 
15735  VmaStringBuilder sb(allocator);
15736  {
15737  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15738  json.BeginObject();
15739 
15740  VmaStats stats;
15741  allocator->CalculateStats(&stats);
15742 
15743  json.WriteString("Total");
15744  VmaPrintStatInfo(json, stats.total);
15745 
15746  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15747  {
15748  json.BeginString("Heap ");
15749  json.ContinueString(heapIndex);
15750  json.EndString();
15751  json.BeginObject();
15752 
15753  json.WriteString("Size");
15754  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15755 
15756  json.WriteString("Flags");
15757  json.BeginArray(true);
15758  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15759  {
15760  json.WriteString("DEVICE_LOCAL");
15761  }
15762  json.EndArray();
15763 
15764  if(stats.memoryHeap[heapIndex].blockCount > 0)
15765  {
15766  json.WriteString("Stats");
15767  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
15768  }
15769 
15770  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15771  {
15772  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15773  {
15774  json.BeginString("Type ");
15775  json.ContinueString(typeIndex);
15776  json.EndString();
15777 
15778  json.BeginObject();
15779 
15780  json.WriteString("Flags");
15781  json.BeginArray(true);
15782  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15783  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15784  {
15785  json.WriteString("DEVICE_LOCAL");
15786  }
15787  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15788  {
15789  json.WriteString("HOST_VISIBLE");
15790  }
15791  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15792  {
15793  json.WriteString("HOST_COHERENT");
15794  }
15795  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15796  {
15797  json.WriteString("HOST_CACHED");
15798  }
15799  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15800  {
15801  json.WriteString("LAZILY_ALLOCATED");
15802  }
15803  json.EndArray();
15804 
15805  if(stats.memoryType[typeIndex].blockCount > 0)
15806  {
15807  json.WriteString("Stats");
15808  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
15809  }
15810 
15811  json.EndObject();
15812  }
15813  }
15814 
15815  json.EndObject();
15816  }
15817  if(detailedMap == VK_TRUE)
15818  {
15819  allocator->PrintDetailedMap(json);
15820  }
15821 
15822  json.EndObject();
15823  }
15824 
15825  const size_t len = sb.GetLength();
15826  char* const pChars = vma_new_array(allocator, char, len + 1);
15827  if(len > 0)
15828  {
15829  memcpy(pChars, sb.GetData(), len);
15830  }
15831  pChars[len] = '\0';
15832  *ppStatsString = pChars;
15833 }
15834 
15835 void vmaFreeStatsString(
15836  VmaAllocator allocator,
15837  char* pStatsString)
15838 {
15839  if(pStatsString != VMA_NULL)
15840  {
15841  VMA_ASSERT(allocator);
15842  size_t len = strlen(pStatsString);
15843  vma_delete_array(allocator, pStatsString, len + 1);
15844  }
15845 }
15846 
15847 #endif // #if VMA_STATS_STRING_ENABLED
15848 
15849 /*
15850 This function is not protected by any mutex because it just reads immutable data.
15851 */
15852 VkResult vmaFindMemoryTypeIndex(
15853  VmaAllocator allocator,
15854  uint32_t memoryTypeBits,
15855  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15856  uint32_t* pMemoryTypeIndex)
15857 {
15858  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15859  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15860  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15861 
15862  if(pAllocationCreateInfo->memoryTypeBits != 0)
15863  {
15864  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
15865  }
15866 
15867  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
15868  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
15869 
15870  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
15871  if(mapped)
15872  {
15873  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15874  }
15875 
15876  // Convert usage to requiredFlags and preferredFlags.
15877  switch(pAllocationCreateInfo->usage)
15878  {
15880  break;
15882  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15883  {
15884  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15885  }
15886  break;
15888  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15889  break;
15891  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15892  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15893  {
15894  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15895  }
15896  break;
15898  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15899  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
15900  break;
15901  default:
15902  break;
15903  }
15904 
15905  *pMemoryTypeIndex = UINT32_MAX;
15906  uint32_t minCost = UINT32_MAX;
15907  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
15908  memTypeIndex < allocator->GetMemoryTypeCount();
15909  ++memTypeIndex, memTypeBit <<= 1)
15910  {
15911  // This memory type is acceptable according to memoryTypeBits bitmask.
15912  if((memTypeBit & memoryTypeBits) != 0)
15913  {
15914  const VkMemoryPropertyFlags currFlags =
15915  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
15916  // This memory type contains requiredFlags.
15917  if((requiredFlags & ~currFlags) == 0)
15918  {
15919  // Calculate cost as number of bits from preferredFlags not present in this memory type.
15920  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
15921  // Remember memory type with lowest cost.
15922  if(currCost < minCost)
15923  {
15924  *pMemoryTypeIndex = memTypeIndex;
15925  if(currCost == 0)
15926  {
15927  return VK_SUCCESS;
15928  }
15929  minCost = currCost;
15930  }
15931  }
15932  }
15933  }
15934  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
15935 }
15936 
15938  VmaAllocator allocator,
15939  const VkBufferCreateInfo* pBufferCreateInfo,
15940  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15941  uint32_t* pMemoryTypeIndex)
15942 {
15943  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15944  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
15945  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15946  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15947 
15948  const VkDevice hDev = allocator->m_hDevice;
15949  VkBuffer hBuffer = VK_NULL_HANDLE;
15950  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
15951  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
15952  if(res == VK_SUCCESS)
15953  {
15954  VkMemoryRequirements memReq = {};
15955  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
15956  hDev, hBuffer, &memReq);
15957 
15958  res = vmaFindMemoryTypeIndex(
15959  allocator,
15960  memReq.memoryTypeBits,
15961  pAllocationCreateInfo,
15962  pMemoryTypeIndex);
15963 
15964  allocator->GetVulkanFunctions().vkDestroyBuffer(
15965  hDev, hBuffer, allocator->GetAllocationCallbacks());
15966  }
15967  return res;
15968 }
15969 
15971  VmaAllocator allocator,
15972  const VkImageCreateInfo* pImageCreateInfo,
15973  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15974  uint32_t* pMemoryTypeIndex)
15975 {
15976  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15977  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
15978  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15979  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15980 
15981  const VkDevice hDev = allocator->m_hDevice;
15982  VkImage hImage = VK_NULL_HANDLE;
15983  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
15984  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
15985  if(res == VK_SUCCESS)
15986  {
15987  VkMemoryRequirements memReq = {};
15988  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
15989  hDev, hImage, &memReq);
15990 
15991  res = vmaFindMemoryTypeIndex(
15992  allocator,
15993  memReq.memoryTypeBits,
15994  pAllocationCreateInfo,
15995  pMemoryTypeIndex);
15996 
15997  allocator->GetVulkanFunctions().vkDestroyImage(
15998  hDev, hImage, allocator->GetAllocationCallbacks());
15999  }
16000  return res;
16001 }
16002 
16003 VkResult vmaCreatePool(
16004  VmaAllocator allocator,
16005  const VmaPoolCreateInfo* pCreateInfo,
16006  VmaPool* pPool)
16007 {
16008  VMA_ASSERT(allocator && pCreateInfo && pPool);
16009 
16010  VMA_DEBUG_LOG("vmaCreatePool");
16011 
16012  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16013 
16014  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
16015 
16016 #if VMA_RECORDING_ENABLED
16017  if(allocator->GetRecorder() != VMA_NULL)
16018  {
16019  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
16020  }
16021 #endif
16022 
16023  return res;
16024 }
16025 
16026 void vmaDestroyPool(
16027  VmaAllocator allocator,
16028  VmaPool pool)
16029 {
16030  VMA_ASSERT(allocator);
16031 
16032  if(pool == VK_NULL_HANDLE)
16033  {
16034  return;
16035  }
16036 
16037  VMA_DEBUG_LOG("vmaDestroyPool");
16038 
16039  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16040 
16041 #if VMA_RECORDING_ENABLED
16042  if(allocator->GetRecorder() != VMA_NULL)
16043  {
16044  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
16045  }
16046 #endif
16047 
16048  allocator->DestroyPool(pool);
16049 }
16050 
16051 void vmaGetPoolStats(
16052  VmaAllocator allocator,
16053  VmaPool pool,
16054  VmaPoolStats* pPoolStats)
16055 {
16056  VMA_ASSERT(allocator && pool && pPoolStats);
16057 
16058  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16059 
16060  allocator->GetPoolStats(pool, pPoolStats);
16061 }
16062 
16064  VmaAllocator allocator,
16065  VmaPool pool,
16066  size_t* pLostAllocationCount)
16067 {
16068  VMA_ASSERT(allocator && pool);
16069 
16070  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16071 
16072 #if VMA_RECORDING_ENABLED
16073  if(allocator->GetRecorder() != VMA_NULL)
16074  {
16075  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
16076  }
16077 #endif
16078 
16079  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
16080 }
16081 
16082 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
16083 {
16084  VMA_ASSERT(allocator && pool);
16085 
16086  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16087 
16088  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
16089 
16090  return allocator->CheckPoolCorruption(pool);
16091 }
16092 
16093 VkResult vmaAllocateMemory(
16094  VmaAllocator allocator,
16095  const VkMemoryRequirements* pVkMemoryRequirements,
16096  const VmaAllocationCreateInfo* pCreateInfo,
16097  VmaAllocation* pAllocation,
16098  VmaAllocationInfo* pAllocationInfo)
16099 {
16100  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
16101 
16102  VMA_DEBUG_LOG("vmaAllocateMemory");
16103 
16104  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16105 
16106  VkResult result = allocator->AllocateMemory(
16107  *pVkMemoryRequirements,
16108  false, // requiresDedicatedAllocation
16109  false, // prefersDedicatedAllocation
16110  VK_NULL_HANDLE, // dedicatedBuffer
16111  VK_NULL_HANDLE, // dedicatedImage
16112  *pCreateInfo,
16113  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16114  1, // allocationCount
16115  pAllocation);
16116 
16117 #if VMA_RECORDING_ENABLED
16118  if(allocator->GetRecorder() != VMA_NULL)
16119  {
16120  allocator->GetRecorder()->RecordAllocateMemory(
16121  allocator->GetCurrentFrameIndex(),
16122  *pVkMemoryRequirements,
16123  *pCreateInfo,
16124  *pAllocation);
16125  }
16126 #endif
16127 
16128  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16129  {
16130  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16131  }
16132 
16133  return result;
16134 }
16135 
16136 VkResult vmaAllocateMemoryPages(
16137  VmaAllocator allocator,
16138  const VkMemoryRequirements* pVkMemoryRequirements,
16139  const VmaAllocationCreateInfo* pCreateInfo,
16140  size_t allocationCount,
16141  VmaAllocation* pAllocations,
16142  VmaAllocationInfo* pAllocationInfo)
16143 {
16144  if(allocationCount == 0)
16145  {
16146  return VK_SUCCESS;
16147  }
16148 
16149  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
16150 
16151  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
16152 
16153  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16154 
16155  VkResult result = allocator->AllocateMemory(
16156  *pVkMemoryRequirements,
16157  false, // requiresDedicatedAllocation
16158  false, // prefersDedicatedAllocation
16159  VK_NULL_HANDLE, // dedicatedBuffer
16160  VK_NULL_HANDLE, // dedicatedImage
16161  *pCreateInfo,
16162  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16163  allocationCount,
16164  pAllocations);
16165 
16166 #if VMA_RECORDING_ENABLED
16167  if(allocator->GetRecorder() != VMA_NULL)
16168  {
16169  allocator->GetRecorder()->RecordAllocateMemoryPages(
16170  allocator->GetCurrentFrameIndex(),
16171  *pVkMemoryRequirements,
16172  *pCreateInfo,
16173  (uint64_t)allocationCount,
16174  pAllocations);
16175  }
16176 #endif
16177 
16178  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16179  {
16180  for(size_t i = 0; i < allocationCount; ++i)
16181  {
16182  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16183  }
16184  }
16185 
16186  return result;
16187 }
16188 
16190  VmaAllocator allocator,
16191  VkBuffer buffer,
16192  const VmaAllocationCreateInfo* pCreateInfo,
16193  VmaAllocation* pAllocation,
16194  VmaAllocationInfo* pAllocationInfo)
16195 {
16196  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16197 
16198  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16199 
16200  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16201 
16202  VkMemoryRequirements vkMemReq = {};
16203  bool requiresDedicatedAllocation = false;
16204  bool prefersDedicatedAllocation = false;
16205  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16206  requiresDedicatedAllocation,
16207  prefersDedicatedAllocation);
16208 
16209  VkResult result = allocator->AllocateMemory(
16210  vkMemReq,
16211  requiresDedicatedAllocation,
16212  prefersDedicatedAllocation,
16213  buffer, // dedicatedBuffer
16214  VK_NULL_HANDLE, // dedicatedImage
16215  *pCreateInfo,
16216  VMA_SUBALLOCATION_TYPE_BUFFER,
16217  1, // allocationCount
16218  pAllocation);
16219 
16220 #if VMA_RECORDING_ENABLED
16221  if(allocator->GetRecorder() != VMA_NULL)
16222  {
16223  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16224  allocator->GetCurrentFrameIndex(),
16225  vkMemReq,
16226  requiresDedicatedAllocation,
16227  prefersDedicatedAllocation,
16228  *pCreateInfo,
16229  *pAllocation);
16230  }
16231 #endif
16232 
16233  if(pAllocationInfo && result == VK_SUCCESS)
16234  {
16235  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16236  }
16237 
16238  return result;
16239 }
16240 
16241 VkResult vmaAllocateMemoryForImage(
16242  VmaAllocator allocator,
16243  VkImage image,
16244  const VmaAllocationCreateInfo* pCreateInfo,
16245  VmaAllocation* pAllocation,
16246  VmaAllocationInfo* pAllocationInfo)
16247 {
16248  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16249 
16250  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
16251 
16252  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16253 
16254  VkMemoryRequirements vkMemReq = {};
16255  bool requiresDedicatedAllocation = false;
16256  bool prefersDedicatedAllocation = false;
16257  allocator->GetImageMemoryRequirements(image, vkMemReq,
16258  requiresDedicatedAllocation, prefersDedicatedAllocation);
16259 
16260  VkResult result = allocator->AllocateMemory(
16261  vkMemReq,
16262  requiresDedicatedAllocation,
16263  prefersDedicatedAllocation,
16264  VK_NULL_HANDLE, // dedicatedBuffer
16265  image, // dedicatedImage
16266  *pCreateInfo,
16267  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16268  1, // allocationCount
16269  pAllocation);
16270 
16271 #if VMA_RECORDING_ENABLED
16272  if(allocator->GetRecorder() != VMA_NULL)
16273  {
16274  allocator->GetRecorder()->RecordAllocateMemoryForImage(
16275  allocator->GetCurrentFrameIndex(),
16276  vkMemReq,
16277  requiresDedicatedAllocation,
16278  prefersDedicatedAllocation,
16279  *pCreateInfo,
16280  *pAllocation);
16281  }
16282 #endif
16283 
16284  if(pAllocationInfo && result == VK_SUCCESS)
16285  {
16286  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16287  }
16288 
16289  return result;
16290 }
16291 
16292 void vmaFreeMemory(
16293  VmaAllocator allocator,
16294  VmaAllocation allocation)
16295 {
16296  VMA_ASSERT(allocator);
16297 
16298  if(allocation == VK_NULL_HANDLE)
16299  {
16300  return;
16301  }
16302 
16303  VMA_DEBUG_LOG("vmaFreeMemory");
16304 
16305  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16306 
16307 #if VMA_RECORDING_ENABLED
16308  if(allocator->GetRecorder() != VMA_NULL)
16309  {
16310  allocator->GetRecorder()->RecordFreeMemory(
16311  allocator->GetCurrentFrameIndex(),
16312  allocation);
16313  }
16314 #endif
16315 
16316  allocator->FreeMemory(
16317  1, // allocationCount
16318  &allocation);
16319 }
16320 
16321 void vmaFreeMemoryPages(
16322  VmaAllocator allocator,
16323  size_t allocationCount,
16324  VmaAllocation* pAllocations)
16325 {
16326  if(allocationCount == 0)
16327  {
16328  return;
16329  }
16330 
16331  VMA_ASSERT(allocator);
16332 
16333  VMA_DEBUG_LOG("vmaFreeMemoryPages");
16334 
16335  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16336 
16337 #if VMA_RECORDING_ENABLED
16338  if(allocator->GetRecorder() != VMA_NULL)
16339  {
16340  allocator->GetRecorder()->RecordFreeMemoryPages(
16341  allocator->GetCurrentFrameIndex(),
16342  (uint64_t)allocationCount,
16343  pAllocations);
16344  }
16345 #endif
16346 
16347  allocator->FreeMemory(allocationCount, pAllocations);
16348 }
16349 
16350 VkResult vmaResizeAllocation(
16351  VmaAllocator allocator,
16352  VmaAllocation allocation,
16353  VkDeviceSize newSize)
16354 {
16355  VMA_ASSERT(allocator && allocation);
16356 
16357  VMA_DEBUG_LOG("vmaResizeAllocation");
16358 
16359  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16360 
16361 #if VMA_RECORDING_ENABLED
16362  if(allocator->GetRecorder() != VMA_NULL)
16363  {
16364  allocator->GetRecorder()->RecordResizeAllocation(
16365  allocator->GetCurrentFrameIndex(),
16366  allocation,
16367  newSize);
16368  }
16369 #endif
16370 
16371  return allocator->ResizeAllocation(allocation, newSize);
16372 }
16373 
16375  VmaAllocator allocator,
16376  VmaAllocation allocation,
16377  VmaAllocationInfo* pAllocationInfo)
16378 {
16379  VMA_ASSERT(allocator && allocation && pAllocationInfo);
16380 
16381  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16382 
16383 #if VMA_RECORDING_ENABLED
16384  if(allocator->GetRecorder() != VMA_NULL)
16385  {
16386  allocator->GetRecorder()->RecordGetAllocationInfo(
16387  allocator->GetCurrentFrameIndex(),
16388  allocation);
16389  }
16390 #endif
16391 
16392  allocator->GetAllocationInfo(allocation, pAllocationInfo);
16393 }
16394 
16395 VkBool32 vmaTouchAllocation(
16396  VmaAllocator allocator,
16397  VmaAllocation allocation)
16398 {
16399  VMA_ASSERT(allocator && allocation);
16400 
16401  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16402 
16403 #if VMA_RECORDING_ENABLED
16404  if(allocator->GetRecorder() != VMA_NULL)
16405  {
16406  allocator->GetRecorder()->RecordTouchAllocation(
16407  allocator->GetCurrentFrameIndex(),
16408  allocation);
16409  }
16410 #endif
16411 
16412  return allocator->TouchAllocation(allocation);
16413 }
16414 
16416  VmaAllocator allocator,
16417  VmaAllocation allocation,
16418  void* pUserData)
16419 {
16420  VMA_ASSERT(allocator && allocation);
16421 
16422  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16423 
16424  allocation->SetUserData(allocator, pUserData);
16425 
16426 #if VMA_RECORDING_ENABLED
16427  if(allocator->GetRecorder() != VMA_NULL)
16428  {
16429  allocator->GetRecorder()->RecordSetAllocationUserData(
16430  allocator->GetCurrentFrameIndex(),
16431  allocation,
16432  pUserData);
16433  }
16434 #endif
16435 }
16436 
16438  VmaAllocator allocator,
16439  VmaAllocation* pAllocation)
16440 {
16441  VMA_ASSERT(allocator && pAllocation);
16442 
16443  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
16444 
16445  allocator->CreateLostAllocation(pAllocation);
16446 
16447 #if VMA_RECORDING_ENABLED
16448  if(allocator->GetRecorder() != VMA_NULL)
16449  {
16450  allocator->GetRecorder()->RecordCreateLostAllocation(
16451  allocator->GetCurrentFrameIndex(),
16452  *pAllocation);
16453  }
16454 #endif
16455 }
16456 
16457 VkResult vmaMapMemory(
16458  VmaAllocator allocator,
16459  VmaAllocation allocation,
16460  void** ppData)
16461 {
16462  VMA_ASSERT(allocator && allocation && ppData);
16463 
16464  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16465 
16466  VkResult res = allocator->Map(allocation, ppData);
16467 
16468 #if VMA_RECORDING_ENABLED
16469  if(allocator->GetRecorder() != VMA_NULL)
16470  {
16471  allocator->GetRecorder()->RecordMapMemory(
16472  allocator->GetCurrentFrameIndex(),
16473  allocation);
16474  }
16475 #endif
16476 
16477  return res;
16478 }
16479 
16480 void vmaUnmapMemory(
16481  VmaAllocator allocator,
16482  VmaAllocation allocation)
16483 {
16484  VMA_ASSERT(allocator && allocation);
16485 
16486  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16487 
16488 #if VMA_RECORDING_ENABLED
16489  if(allocator->GetRecorder() != VMA_NULL)
16490  {
16491  allocator->GetRecorder()->RecordUnmapMemory(
16492  allocator->GetCurrentFrameIndex(),
16493  allocation);
16494  }
16495 #endif
16496 
16497  allocator->Unmap(allocation);
16498 }
16499 
16500 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16501 {
16502  VMA_ASSERT(allocator && allocation);
16503 
16504  VMA_DEBUG_LOG("vmaFlushAllocation");
16505 
16506  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16507 
16508  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
16509 
16510 #if VMA_RECORDING_ENABLED
16511  if(allocator->GetRecorder() != VMA_NULL)
16512  {
16513  allocator->GetRecorder()->RecordFlushAllocation(
16514  allocator->GetCurrentFrameIndex(),
16515  allocation, offset, size);
16516  }
16517 #endif
16518 }
16519 
16520 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16521 {
16522  VMA_ASSERT(allocator && allocation);
16523 
16524  VMA_DEBUG_LOG("vmaInvalidateAllocation");
16525 
16526  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16527 
16528  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
16529 
16530 #if VMA_RECORDING_ENABLED
16531  if(allocator->GetRecorder() != VMA_NULL)
16532  {
16533  allocator->GetRecorder()->RecordInvalidateAllocation(
16534  allocator->GetCurrentFrameIndex(),
16535  allocation, offset, size);
16536  }
16537 #endif
16538 }
16539 
16540 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
16541 {
16542  VMA_ASSERT(allocator);
16543 
16544  VMA_DEBUG_LOG("vmaCheckCorruption");
16545 
16546  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16547 
16548  return allocator->CheckCorruption(memoryTypeBits);
16549 }
16550 
16551 VkResult vmaDefragment(
16552  VmaAllocator allocator,
16553  VmaAllocation* pAllocations,
16554  size_t allocationCount,
16555  VkBool32* pAllocationsChanged,
16556  const VmaDefragmentationInfo *pDefragmentationInfo,
16557  VmaDefragmentationStats* pDefragmentationStats)
16558 {
16559  // Deprecated interface, reimplemented using new one.
16560 
16561  VmaDefragmentationInfo2 info2 = {};
16562  info2.allocationCount = (uint32_t)allocationCount;
16563  info2.pAllocations = pAllocations;
16564  info2.pAllocationsChanged = pAllocationsChanged;
16565  if(pDefragmentationInfo != VMA_NULL)
16566  {
16567  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
16568  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
16569  }
16570  else
16571  {
16572  info2.maxCpuAllocationsToMove = UINT32_MAX;
16573  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
16574  }
16575  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
16576 
16578  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
16579  if(res == VK_NOT_READY)
16580  {
16581  res = vmaDefragmentationEnd( allocator, ctx);
16582  }
16583  return res;
16584 }
16585 
16586 VkResult vmaDefragmentationBegin(
16587  VmaAllocator allocator,
16588  const VmaDefragmentationInfo2* pInfo,
16589  VmaDefragmentationStats* pStats,
16590  VmaDefragmentationContext *pContext)
16591 {
16592  VMA_ASSERT(allocator && pInfo && pContext);
16593 
16594  // Degenerate case: Nothing to defragment.
16595  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
16596  {
16597  return VK_SUCCESS;
16598  }
16599 
16600  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
16601  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
16602  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
16603  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
16604 
16605  VMA_DEBUG_LOG("vmaDefragmentationBegin");
16606 
16607  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16608 
16609  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
16610 
16611 #if VMA_RECORDING_ENABLED
16612  if(allocator->GetRecorder() != VMA_NULL)
16613  {
16614  allocator->GetRecorder()->RecordDefragmentationBegin(
16615  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
16616  }
16617 #endif
16618 
16619  return res;
16620 }
16621 
16622 VkResult vmaDefragmentationEnd(
16623  VmaAllocator allocator,
16624  VmaDefragmentationContext context)
16625 {
16626  VMA_ASSERT(allocator);
16627 
16628  VMA_DEBUG_LOG("vmaDefragmentationEnd");
16629 
16630  if(context != VK_NULL_HANDLE)
16631  {
16632  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16633 
16634 #if VMA_RECORDING_ENABLED
16635  if(allocator->GetRecorder() != VMA_NULL)
16636  {
16637  allocator->GetRecorder()->RecordDefragmentationEnd(
16638  allocator->GetCurrentFrameIndex(), context);
16639  }
16640 #endif
16641 
16642  return allocator->DefragmentationEnd(context);
16643  }
16644  else
16645  {
16646  return VK_SUCCESS;
16647  }
16648 }
16649 
16650 VkResult vmaBindBufferMemory(
16651  VmaAllocator allocator,
16652  VmaAllocation allocation,
16653  VkBuffer buffer)
16654 {
16655  VMA_ASSERT(allocator && allocation && buffer);
16656 
16657  VMA_DEBUG_LOG("vmaBindBufferMemory");
16658 
16659  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16660 
16661  return allocator->BindBufferMemory(allocation, buffer);
16662 }
16663 
16664 VkResult vmaBindImageMemory(
16665  VmaAllocator allocator,
16666  VmaAllocation allocation,
16667  VkImage image)
16668 {
16669  VMA_ASSERT(allocator && allocation && image);
16670 
16671  VMA_DEBUG_LOG("vmaBindImageMemory");
16672 
16673  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16674 
16675  return allocator->BindImageMemory(allocation, image);
16676 }
16677 
16678 VkResult vmaCreateBuffer(
16679  VmaAllocator allocator,
16680  const VkBufferCreateInfo* pBufferCreateInfo,
16681  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16682  VkBuffer* pBuffer,
16683  VmaAllocation* pAllocation,
16684  VmaAllocationInfo* pAllocationInfo)
16685 {
16686  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16687 
16688  if(pBufferCreateInfo->size == 0)
16689  {
16690  return VK_ERROR_VALIDATION_FAILED_EXT;
16691  }
16692 
16693  VMA_DEBUG_LOG("vmaCreateBuffer");
16694 
16695  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16696 
16697  *pBuffer = VK_NULL_HANDLE;
16698  *pAllocation = VK_NULL_HANDLE;
16699 
16700  // 1. Create VkBuffer.
16701  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16702  allocator->m_hDevice,
16703  pBufferCreateInfo,
16704  allocator->GetAllocationCallbacks(),
16705  pBuffer);
16706  if(res >= 0)
16707  {
16708  // 2. vkGetBufferMemoryRequirements.
16709  VkMemoryRequirements vkMemReq = {};
16710  bool requiresDedicatedAllocation = false;
16711  bool prefersDedicatedAllocation = false;
16712  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16713  requiresDedicatedAllocation, prefersDedicatedAllocation);
16714 
16715  // Make sure alignment requirements for specific buffer usages reported
16716  // in Physical Device Properties are included in alignment reported by memory requirements.
16717  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16718  {
16719  VMA_ASSERT(vkMemReq.alignment %
16720  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16721  }
16722  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16723  {
16724  VMA_ASSERT(vkMemReq.alignment %
16725  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16726  }
16727  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16728  {
16729  VMA_ASSERT(vkMemReq.alignment %
16730  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16731  }
16732 
16733  // 3. Allocate memory using allocator.
16734  res = allocator->AllocateMemory(
16735  vkMemReq,
16736  requiresDedicatedAllocation,
16737  prefersDedicatedAllocation,
16738  *pBuffer, // dedicatedBuffer
16739  VK_NULL_HANDLE, // dedicatedImage
16740  *pAllocationCreateInfo,
16741  VMA_SUBALLOCATION_TYPE_BUFFER,
16742  1, // allocationCount
16743  pAllocation);
16744 
16745 #if VMA_RECORDING_ENABLED
16746  if(allocator->GetRecorder() != VMA_NULL)
16747  {
16748  allocator->GetRecorder()->RecordCreateBuffer(
16749  allocator->GetCurrentFrameIndex(),
16750  *pBufferCreateInfo,
16751  *pAllocationCreateInfo,
16752  *pAllocation);
16753  }
16754 #endif
16755 
16756  if(res >= 0)
16757  {
16758  // 3. Bind buffer with memory.
16759  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
16760  {
16761  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
16762  }
16763  if(res >= 0)
16764  {
16765  // All steps succeeded.
16766  #if VMA_STATS_STRING_ENABLED
16767  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16768  #endif
16769  if(pAllocationInfo != VMA_NULL)
16770  {
16771  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16772  }
16773 
16774  return VK_SUCCESS;
16775  }
16776  allocator->FreeMemory(
16777  1, // allocationCount
16778  pAllocation);
16779  *pAllocation = VK_NULL_HANDLE;
16780  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16781  *pBuffer = VK_NULL_HANDLE;
16782  return res;
16783  }
16784  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16785  *pBuffer = VK_NULL_HANDLE;
16786  return res;
16787  }
16788  return res;
16789 }
16790 
16791 void vmaDestroyBuffer(
16792  VmaAllocator allocator,
16793  VkBuffer buffer,
16794  VmaAllocation allocation)
16795 {
16796  VMA_ASSERT(allocator);
16797 
16798  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16799  {
16800  return;
16801  }
16802 
16803  VMA_DEBUG_LOG("vmaDestroyBuffer");
16804 
16805  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16806 
16807 #if VMA_RECORDING_ENABLED
16808  if(allocator->GetRecorder() != VMA_NULL)
16809  {
16810  allocator->GetRecorder()->RecordDestroyBuffer(
16811  allocator->GetCurrentFrameIndex(),
16812  allocation);
16813  }
16814 #endif
16815 
16816  if(buffer != VK_NULL_HANDLE)
16817  {
16818  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16819  }
16820 
16821  if(allocation != VK_NULL_HANDLE)
16822  {
16823  allocator->FreeMemory(
16824  1, // allocationCount
16825  &allocation);
16826  }
16827 }
16828 
16829 VkResult vmaCreateImage(
16830  VmaAllocator allocator,
16831  const VkImageCreateInfo* pImageCreateInfo,
16832  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16833  VkImage* pImage,
16834  VmaAllocation* pAllocation,
16835  VmaAllocationInfo* pAllocationInfo)
16836 {
16837  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16838 
16839  if(pImageCreateInfo->extent.width == 0 ||
16840  pImageCreateInfo->extent.height == 0 ||
16841  pImageCreateInfo->extent.depth == 0 ||
16842  pImageCreateInfo->mipLevels == 0 ||
16843  pImageCreateInfo->arrayLayers == 0)
16844  {
16845  return VK_ERROR_VALIDATION_FAILED_EXT;
16846  }
16847 
16848  VMA_DEBUG_LOG("vmaCreateImage");
16849 
16850  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16851 
16852  *pImage = VK_NULL_HANDLE;
16853  *pAllocation = VK_NULL_HANDLE;
16854 
16855  // 1. Create VkImage.
16856  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16857  allocator->m_hDevice,
16858  pImageCreateInfo,
16859  allocator->GetAllocationCallbacks(),
16860  pImage);
16861  if(res >= 0)
16862  {
16863  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16864  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16865  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16866 
16867  // 2. Allocate memory using allocator.
16868  VkMemoryRequirements vkMemReq = {};
16869  bool requiresDedicatedAllocation = false;
16870  bool prefersDedicatedAllocation = false;
16871  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16872  requiresDedicatedAllocation, prefersDedicatedAllocation);
16873 
16874  res = allocator->AllocateMemory(
16875  vkMemReq,
16876  requiresDedicatedAllocation,
16877  prefersDedicatedAllocation,
16878  VK_NULL_HANDLE, // dedicatedBuffer
16879  *pImage, // dedicatedImage
16880  *pAllocationCreateInfo,
16881  suballocType,
16882  1, // allocationCount
16883  pAllocation);
16884 
16885 #if VMA_RECORDING_ENABLED
16886  if(allocator->GetRecorder() != VMA_NULL)
16887  {
16888  allocator->GetRecorder()->RecordCreateImage(
16889  allocator->GetCurrentFrameIndex(),
16890  *pImageCreateInfo,
16891  *pAllocationCreateInfo,
16892  *pAllocation);
16893  }
16894 #endif
16895 
16896  if(res >= 0)
16897  {
16898  // 3. Bind image with memory.
16899  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
16900  {
16901  res = allocator->BindImageMemory(*pAllocation, *pImage);
16902  }
16903  if(res >= 0)
16904  {
16905  // All steps succeeded.
16906  #if VMA_STATS_STRING_ENABLED
16907  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
16908  #endif
16909  if(pAllocationInfo != VMA_NULL)
16910  {
16911  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16912  }
16913 
16914  return VK_SUCCESS;
16915  }
16916  allocator->FreeMemory(
16917  1, // allocationCount
16918  pAllocation);
16919  *pAllocation = VK_NULL_HANDLE;
16920  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16921  *pImage = VK_NULL_HANDLE;
16922  return res;
16923  }
16924  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16925  *pImage = VK_NULL_HANDLE;
16926  return res;
16927  }
16928  return res;
16929 }
16930 
16931 void vmaDestroyImage(
16932  VmaAllocator allocator,
16933  VkImage image,
16934  VmaAllocation allocation)
16935 {
16936  VMA_ASSERT(allocator);
16937 
16938  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16939  {
16940  return;
16941  }
16942 
16943  VMA_DEBUG_LOG("vmaDestroyImage");
16944 
16945  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16946 
16947 #if VMA_RECORDING_ENABLED
16948  if(allocator->GetRecorder() != VMA_NULL)
16949  {
16950  allocator->GetRecorder()->RecordDestroyImage(
16951  allocator->GetCurrentFrameIndex(),
16952  allocation);
16953  }
16954 #endif
16955 
16956  if(image != VK_NULL_HANDLE)
16957  {
16958  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
16959  }
16960  if(allocation != VK_NULL_HANDLE)
16961  {
16962  allocator->FreeMemory(
16963  1, // allocationCount
16964  &allocation);
16965  }
16966 }
16967 
16968 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1753
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2053
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1811
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:2856
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1785
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2384
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1765
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2015
Definition: vk_mem_alloc.h:2119
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:2809
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1757
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2484
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1808
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2892
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2273
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1652
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2365
Definition: vk_mem_alloc.h:2090
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:2812
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1746
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2172
Definition: vk_mem_alloc.h:2042
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1820
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2301
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
Definition: vk_mem_alloc.h:1874
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1805
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2046
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1946
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1762
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:2846
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1945
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2896
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1837
VmaStatInfo total
Definition: vk_mem_alloc.h:1955
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2904
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2156
Definition: vk_mem_alloc.h:2114
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2887
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1763
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1688
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1814
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2315
Definition: vk_mem_alloc.h:2309
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:1769
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1881
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2494
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1758
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1783
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2193
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2335
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2371
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1744
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2318
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2861
VmaMemoryUsage
Definition: vk_mem_alloc.h:1993
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:2821
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2882
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:2900
Definition: vk_mem_alloc.h:2032
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2180
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1761
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1951
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1694
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:2800
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
Definition: vk_mem_alloc.h:2798
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:2827
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1715
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1787
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1720
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2902
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2167
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2381
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1754
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1934
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:2330
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1707
Definition: vk_mem_alloc.h:2305
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2097
Represents Opaque object that represents started defragmentation process.
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1947
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1711
Definition: vk_mem_alloc.h:2130
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2321
Definition: vk_mem_alloc.h:2041
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1760
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2162
Definition: vk_mem_alloc.h:2153
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1937
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1756
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2343
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1823
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2374
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2151
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2851
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2186
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1862
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1953
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2077
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1946
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1767
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1793
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:2797
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:2875
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1709
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1766
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2357
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1759
Definition: vk_mem_alloc.h:2108
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1801
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2508
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:1817
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1946
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1943
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2362
Parameters for defragmentation.
Definition: vk_mem_alloc.h:2806
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
Definition: vk_mem_alloc.h:2123
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:2489
Definition: vk_mem_alloc.h:2137
Definition: vk_mem_alloc.h:2149
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2898
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1752
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1941
Definition: vk_mem_alloc.h:1998
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2311
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1790
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1939
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1764
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1768
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2064
Definition: vk_mem_alloc.h:2144
Definition: vk_mem_alloc.h:2025
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2503
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1742
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1755
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2290
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Tries to resize an allocation in place, if there is enough free memory after it.
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2470
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2134
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2255
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1947
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1777
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1954
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2368
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1947
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:2866
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2475
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:2830