Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1644 /*
1645 Define this macro to 0/1 to disable/enable support for recording functionality,
1646 available through VmaAllocatorCreateInfo::pRecordSettings.
1647 */
1648 #ifndef VMA_RECORDING_ENABLED
1649  #ifdef _WIN32
1650  #define VMA_RECORDING_ENABLED 1
1651  #else
1652  #define VMA_RECORDING_ENABLED 0
1653  #endif
1654 #endif
1655 
1656 #ifndef NOMINMAX
1657  #define NOMINMAX // For windows.h
1658 #endif
1659 
1660 #ifndef VULKAN_H_
1661  #include <vulkan/vulkan.h>
1662 #endif
1663 
1664 #if VMA_RECORDING_ENABLED
1665  #include <windows.h>
1666 #endif
1667 
1668 #if !defined(VMA_DEDICATED_ALLOCATION)
1669  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1670  #define VMA_DEDICATED_ALLOCATION 1
1671  #else
1672  #define VMA_DEDICATED_ALLOCATION 0
1673  #endif
1674 #endif
1675 
1685 VK_DEFINE_HANDLE(VmaAllocator)
1686 
1687 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1689  VmaAllocator allocator,
1690  uint32_t memoryType,
1691  VkDeviceMemory memory,
1692  VkDeviceSize size);
1694 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1695  VmaAllocator allocator,
1696  uint32_t memoryType,
1697  VkDeviceMemory memory,
1698  VkDeviceSize size);
1699 
1713 
1743 
1746 typedef VkFlags VmaAllocatorCreateFlags;
1747 
1752 typedef struct VmaVulkanFunctions {
1753  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1754  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1755  PFN_vkAllocateMemory vkAllocateMemory;
1756  PFN_vkFreeMemory vkFreeMemory;
1757  PFN_vkMapMemory vkMapMemory;
1758  PFN_vkUnmapMemory vkUnmapMemory;
1759  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1760  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1761  PFN_vkBindBufferMemory vkBindBufferMemory;
1762  PFN_vkBindImageMemory vkBindImageMemory;
1763  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1764  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1765  PFN_vkCreateBuffer vkCreateBuffer;
1766  PFN_vkDestroyBuffer vkDestroyBuffer;
1767  PFN_vkCreateImage vkCreateImage;
1768  PFN_vkDestroyImage vkDestroyImage;
1769  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1770 #if VMA_DEDICATED_ALLOCATION
1771  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1772  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1773 #endif
1775 
1777 typedef enum VmaRecordFlagBits {
1784 
1787 typedef VkFlags VmaRecordFlags;
1788 
1790 typedef struct VmaRecordSettings
1791 {
1801  const char* pFilePath;
1803 
1806 {
1810 
1811  VkPhysicalDevice physicalDevice;
1813 
1814  VkDevice device;
1816 
1819 
1820  const VkAllocationCallbacks* pAllocationCallbacks;
1822 
1862  const VkDeviceSize* pHeapSizeLimit;
1883 
1885 VkResult vmaCreateAllocator(
1886  const VmaAllocatorCreateInfo* pCreateInfo,
1887  VmaAllocator* pAllocator);
1888 
1890 void vmaDestroyAllocator(
1891  VmaAllocator allocator);
1892 
1898  VmaAllocator allocator,
1899  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1900 
1906  VmaAllocator allocator,
1907  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1908 
1916  VmaAllocator allocator,
1917  uint32_t memoryTypeIndex,
1918  VkMemoryPropertyFlags* pFlags);
1919 
1929  VmaAllocator allocator,
1930  uint32_t frameIndex);
1931 
1934 typedef struct VmaStatInfo
1935 {
1937  uint32_t blockCount;
1943  VkDeviceSize usedBytes;
1945  VkDeviceSize unusedBytes;
1948 } VmaStatInfo;
1949 
1951 typedef struct VmaStats
1952 {
1953  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1954  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1956 } VmaStats;
1957 
1959 void vmaCalculateStats(
1960  VmaAllocator allocator,
1961  VmaStats* pStats);
1962 
1963 #define VMA_STATS_STRING_ENABLED 1
1964 
1965 #if VMA_STATS_STRING_ENABLED
1966 
1968 
1970 void vmaBuildStatsString(
1971  VmaAllocator allocator,
1972  char** ppStatsString,
1973  VkBool32 detailedMap);
1974 
1975 void vmaFreeStatsString(
1976  VmaAllocator allocator,
1977  char* pStatsString);
1978 
1979 #endif // #if VMA_STATS_STRING_ENABLED
1980 
1989 VK_DEFINE_HANDLE(VmaPool)
1990 
1991 typedef enum VmaMemoryUsage
1992 {
2041 } VmaMemoryUsage;
2042 
2052 
2107 
2123 
2133 
2140 
2144 
2146 {
2159  VkMemoryPropertyFlags requiredFlags;
2164  VkMemoryPropertyFlags preferredFlags;
2172  uint32_t memoryTypeBits;
2185  void* pUserData;
2187 
2204 VkResult vmaFindMemoryTypeIndex(
2205  VmaAllocator allocator,
2206  uint32_t memoryTypeBits,
2207  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2208  uint32_t* pMemoryTypeIndex);
2209 
2223  VmaAllocator allocator,
2224  const VkBufferCreateInfo* pBufferCreateInfo,
2225  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2226  uint32_t* pMemoryTypeIndex);
2227 
2241  VmaAllocator allocator,
2242  const VkImageCreateInfo* pImageCreateInfo,
2243  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2244  uint32_t* pMemoryTypeIndex);
2245 
2266 
2283 
2294 
2300 
2303 typedef VkFlags VmaPoolCreateFlags;
2304 
2307 typedef struct VmaPoolCreateInfo {
2322  VkDeviceSize blockSize;
2351 
2354 typedef struct VmaPoolStats {
2357  VkDeviceSize size;
2360  VkDeviceSize unusedSize;
2373  VkDeviceSize unusedRangeSizeMax;
2376  size_t blockCount;
2377 } VmaPoolStats;
2378 
2385 VkResult vmaCreatePool(
2386  VmaAllocator allocator,
2387  const VmaPoolCreateInfo* pCreateInfo,
2388  VmaPool* pPool);
2389 
2392 void vmaDestroyPool(
2393  VmaAllocator allocator,
2394  VmaPool pool);
2395 
2402 void vmaGetPoolStats(
2403  VmaAllocator allocator,
2404  VmaPool pool,
2405  VmaPoolStats* pPoolStats);
2406 
2414  VmaAllocator allocator,
2415  VmaPool pool,
2416  size_t* pLostAllocationCount);
2417 
2432 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2433 
2458 VK_DEFINE_HANDLE(VmaAllocation)
2459 
2460 
2462 typedef struct VmaAllocationInfo {
2467  uint32_t memoryType;
2476  VkDeviceMemory deviceMemory;
2481  VkDeviceSize offset;
2486  VkDeviceSize size;
2500  void* pUserData;
2502 
2513 VkResult vmaAllocateMemory(
2514  VmaAllocator allocator,
2515  const VkMemoryRequirements* pVkMemoryRequirements,
2516  const VmaAllocationCreateInfo* pCreateInfo,
2517  VmaAllocation* pAllocation,
2518  VmaAllocationInfo* pAllocationInfo);
2519 
2539 VkResult vmaAllocateMemoryPages(
2540  VmaAllocator allocator,
2541  const VkMemoryRequirements* pVkMemoryRequirements,
2542  const VmaAllocationCreateInfo* pCreateInfo,
2543  size_t allocationCount,
2544  VmaAllocation* pAllocations,
2545  VmaAllocationInfo* pAllocationInfo);
2546 
2554  VmaAllocator allocator,
2555  VkBuffer buffer,
2556  const VmaAllocationCreateInfo* pCreateInfo,
2557  VmaAllocation* pAllocation,
2558  VmaAllocationInfo* pAllocationInfo);
2559 
2561 VkResult vmaAllocateMemoryForImage(
2562  VmaAllocator allocator,
2563  VkImage image,
2564  const VmaAllocationCreateInfo* pCreateInfo,
2565  VmaAllocation* pAllocation,
2566  VmaAllocationInfo* pAllocationInfo);
2567 
2572 void vmaFreeMemory(
2573  VmaAllocator allocator,
2574  VmaAllocation allocation);
2575 
2586 void vmaFreeMemoryPages(
2587  VmaAllocator allocator,
2588  size_t allocationCount,
2589  VmaAllocation* pAllocations);
2590 
2611 VkResult vmaResizeAllocation(
2612  VmaAllocator allocator,
2613  VmaAllocation allocation,
2614  VkDeviceSize newSize);
2615 
2633  VmaAllocator allocator,
2634  VmaAllocation allocation,
2635  VmaAllocationInfo* pAllocationInfo);
2636 
2651 VkBool32 vmaTouchAllocation(
2652  VmaAllocator allocator,
2653  VmaAllocation allocation);
2654 
2669  VmaAllocator allocator,
2670  VmaAllocation allocation,
2671  void* pUserData);
2672 
2684  VmaAllocator allocator,
2685  VmaAllocation* pAllocation);
2686 
2721 VkResult vmaMapMemory(
2722  VmaAllocator allocator,
2723  VmaAllocation allocation,
2724  void** ppData);
2725 
2730 void vmaUnmapMemory(
2731  VmaAllocator allocator,
2732  VmaAllocation allocation);
2733 
2746 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2747 
2760 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2761 
2778 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2779 
2786 VK_DEFINE_HANDLE(VmaDefragmentationContext)
2787 
2788 typedef enum VmaDefragmentationFlagBits {
2792 typedef VkFlags VmaDefragmentationFlags;
2793 
2798 typedef struct VmaDefragmentationInfo2 {
2822  uint32_t poolCount;
2843  VkDeviceSize maxCpuBytesToMove;
2853  VkDeviceSize maxGpuBytesToMove;
2867  VkCommandBuffer commandBuffer;
2869 
2874 typedef struct VmaDefragmentationInfo {
2879  VkDeviceSize maxBytesToMove;
2886 
2888 typedef struct VmaDefragmentationStats {
2890  VkDeviceSize bytesMoved;
2892  VkDeviceSize bytesFreed;
2898 
2925 VkResult vmaDefragmentationBegin(
2926  VmaAllocator allocator,
2927  const VmaDefragmentationInfo2* pInfo,
2928  VmaDefragmentationStats* pStats,
2929  VmaDefragmentationContext *pContext);
2930 
2936 VkResult vmaDefragmentationEnd(
2937  VmaAllocator allocator,
2938  VmaDefragmentationContext context);
2939 
2980 VkResult vmaDefragment(
2981  VmaAllocator allocator,
2982  VmaAllocation* pAllocations,
2983  size_t allocationCount,
2984  VkBool32* pAllocationsChanged,
2985  const VmaDefragmentationInfo *pDefragmentationInfo,
2986  VmaDefragmentationStats* pDefragmentationStats);
2987 
3000 VkResult vmaBindBufferMemory(
3001  VmaAllocator allocator,
3002  VmaAllocation allocation,
3003  VkBuffer buffer);
3004 
3017 VkResult vmaBindImageMemory(
3018  VmaAllocator allocator,
3019  VmaAllocation allocation,
3020  VkImage image);
3021 
3048 VkResult vmaCreateBuffer(
3049  VmaAllocator allocator,
3050  const VkBufferCreateInfo* pBufferCreateInfo,
3051  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3052  VkBuffer* pBuffer,
3053  VmaAllocation* pAllocation,
3054  VmaAllocationInfo* pAllocationInfo);
3055 
3067 void vmaDestroyBuffer(
3068  VmaAllocator allocator,
3069  VkBuffer buffer,
3070  VmaAllocation allocation);
3071 
3073 VkResult vmaCreateImage(
3074  VmaAllocator allocator,
3075  const VkImageCreateInfo* pImageCreateInfo,
3076  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3077  VkImage* pImage,
3078  VmaAllocation* pAllocation,
3079  VmaAllocationInfo* pAllocationInfo);
3080 
3092 void vmaDestroyImage(
3093  VmaAllocator allocator,
3094  VkImage image,
3095  VmaAllocation allocation);
3096 
3097 #ifdef __cplusplus
3098 }
3099 #endif
3100 
3101 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3102 
3103 // For Visual Studio IntelliSense.
3104 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3105 #define VMA_IMPLEMENTATION
3106 #endif
3107 
3108 #ifdef VMA_IMPLEMENTATION
3109 #undef VMA_IMPLEMENTATION
3110 
3111 #include <cstdint>
3112 #include <cstdlib>
3113 #include <cstring>
3114 
3115 /*******************************************************************************
3116 CONFIGURATION SECTION
3117 
3118 Define some of these macros before each #include of this header or change them
3119 here if you need other then default behavior depending on your environment.
3120 */
3121 
3122 /*
3123 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3124 internally, like:
3125 
3126  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3127 
3128 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3129 VmaAllocatorCreateInfo::pVulkanFunctions.
3130 */
3131 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3132 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3133 #endif
3134 
3135 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3136 //#define VMA_USE_STL_CONTAINERS 1
3137 
3138 /* Set this macro to 1 to make the library including and using STL containers:
3139 std::pair, std::vector, std::list, std::unordered_map.
3140 
3141 Set it to 0 or undefined to make the library using its own implementation of
3142 the containers.
3143 */
3144 #if VMA_USE_STL_CONTAINERS
3145  #define VMA_USE_STL_VECTOR 1
3146  #define VMA_USE_STL_UNORDERED_MAP 1
3147  #define VMA_USE_STL_LIST 1
3148 #endif
3149 
3150 #ifndef VMA_USE_STL_SHARED_MUTEX
3151  // Compiler conforms to C++17.
3152  #if __cplusplus >= 201703L
3153  #define VMA_USE_STL_SHARED_MUTEX 1
3154  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3155  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3156  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3157  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3158  #define VMA_USE_STL_SHARED_MUTEX 1
3159  #else
3160  #define VMA_USE_STL_SHARED_MUTEX 0
3161  #endif
3162 #endif
3163 
3164 #if VMA_USE_STL_VECTOR
3165  #include <vector>
3166 #endif
3167 
3168 #if VMA_USE_STL_UNORDERED_MAP
3169  #include <unordered_map>
3170 #endif
3171 
3172 #if VMA_USE_STL_LIST
3173  #include <list>
3174 #endif
3175 
3176 /*
3177 Following headers are used in this CONFIGURATION section only, so feel free to
3178 remove them if not needed.
3179 */
3180 #include <cassert> // for assert
3181 #include <algorithm> // for min, max
3182 #include <mutex>
3183 #include <atomic> // for std::atomic
3184 
3185 #ifndef VMA_NULL
3186  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3187  #define VMA_NULL nullptr
3188 #endif
3189 
3190 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3191 #include <cstdlib>
3192 void *aligned_alloc(size_t alignment, size_t size)
3193 {
3194  // alignment must be >= sizeof(void*)
3195  if(alignment < sizeof(void*))
3196  {
3197  alignment = sizeof(void*);
3198  }
3199 
3200  return memalign(alignment, size);
3201 }
3202 #elif defined(__APPLE__) || defined(__ANDROID__)
3203 #include <cstdlib>
3204 void *aligned_alloc(size_t alignment, size_t size)
3205 {
3206  // alignment must be >= sizeof(void*)
3207  if(alignment < sizeof(void*))
3208  {
3209  alignment = sizeof(void*);
3210  }
3211 
3212  void *pointer;
3213  if(posix_memalign(&pointer, alignment, size) == 0)
3214  return pointer;
3215  return VMA_NULL;
3216 }
3217 #endif
3218 
3219 // If your compiler is not compatible with C++11 and definition of
3220 // aligned_alloc() function is missing, uncommeting following line may help:
3221 
3222 //#include <malloc.h>
3223 
3224 // Normal assert to check for programmer's errors, especially in Debug configuration.
3225 #ifndef VMA_ASSERT
3226  #ifdef _DEBUG
3227  #define VMA_ASSERT(expr) assert(expr)
3228  #else
3229  #define VMA_ASSERT(expr)
3230  #endif
3231 #endif
3232 
3233 // Assert that will be called very often, like inside data structures e.g. operator[].
3234 // Making it non-empty can make program slow.
3235 #ifndef VMA_HEAVY_ASSERT
3236  #ifdef _DEBUG
3237  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3238  #else
3239  #define VMA_HEAVY_ASSERT(expr)
3240  #endif
3241 #endif
3242 
3243 #ifndef VMA_ALIGN_OF
3244  #define VMA_ALIGN_OF(type) (__alignof(type))
3245 #endif
3246 
3247 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3248  #if defined(_WIN32)
3249  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3250  #else
3251  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3252  #endif
3253 #endif
3254 
3255 #ifndef VMA_SYSTEM_FREE
3256  #if defined(_WIN32)
3257  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3258  #else
3259  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3260  #endif
3261 #endif
3262 
3263 #ifndef VMA_MIN
3264  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3265 #endif
3266 
3267 #ifndef VMA_MAX
3268  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3269 #endif
3270 
3271 #ifndef VMA_SWAP
3272  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3273 #endif
3274 
3275 #ifndef VMA_SORT
3276  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3277 #endif
3278 
3279 #ifndef VMA_DEBUG_LOG
3280  #define VMA_DEBUG_LOG(format, ...)
3281  /*
3282  #define VMA_DEBUG_LOG(format, ...) do { \
3283  printf(format, __VA_ARGS__); \
3284  printf("\n"); \
3285  } while(false)
3286  */
3287 #endif
3288 
3289 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3290 #if VMA_STATS_STRING_ENABLED
3291  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3292  {
3293  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3294  }
3295  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3296  {
3297  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3298  }
3299  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3300  {
3301  snprintf(outStr, strLen, "%p", ptr);
3302  }
3303 #endif
3304 
3305 #ifndef VMA_MUTEX
3306  class VmaMutex
3307  {
3308  public:
3309  void Lock() { m_Mutex.lock(); }
3310  void Unlock() { m_Mutex.unlock(); }
3311  private:
3312  std::mutex m_Mutex;
3313  };
3314  #define VMA_MUTEX VmaMutex
3315 #endif
3316 
3317 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3318 #ifndef VMA_RW_MUTEX
3319  #if VMA_USE_STL_SHARED_MUTEX
3320  // Use std::shared_mutex from C++17.
3321  #include <shared_mutex>
3322  class VmaRWMutex
3323  {
3324  public:
3325  void LockRead() { m_Mutex.lock_shared(); }
3326  void UnlockRead() { m_Mutex.unlock_shared(); }
3327  void LockWrite() { m_Mutex.lock(); }
3328  void UnlockWrite() { m_Mutex.unlock(); }
3329  private:
3330  std::shared_mutex m_Mutex;
3331  };
3332  #define VMA_RW_MUTEX VmaRWMutex
3333  #elif defined(_WIN32)
3334  // Use SRWLOCK from WinAPI.
3335  class VmaRWMutex
3336  {
3337  public:
3338  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3339  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3340  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3341  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3342  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3343  private:
3344  SRWLOCK m_Lock;
3345  };
3346  #define VMA_RW_MUTEX VmaRWMutex
3347  #else
3348  // Less efficient fallback: Use normal mutex.
3349  class VmaRWMutex
3350  {
3351  public:
3352  void LockRead() { m_Mutex.Lock(); }
3353  void UnlockRead() { m_Mutex.Unlock(); }
3354  void LockWrite() { m_Mutex.Lock(); }
3355  void UnlockWrite() { m_Mutex.Unlock(); }
3356  private:
3357  VMA_MUTEX m_Mutex;
3358  };
3359  #define VMA_RW_MUTEX VmaRWMutex
3360  #endif // #if VMA_USE_STL_SHARED_MUTEX
3361 #endif // #ifndef VMA_RW_MUTEX
3362 
3363 /*
3364 If providing your own implementation, you need to implement a subset of std::atomic:
3365 
3366 - Constructor(uint32_t desired)
3367 - uint32_t load() const
3368 - void store(uint32_t desired)
3369 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3370 */
3371 #ifndef VMA_ATOMIC_UINT32
3372  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3373 #endif
3374 
3375 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3376 
3380  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3381 #endif
3382 
3383 #ifndef VMA_DEBUG_ALIGNMENT
3384 
3388  #define VMA_DEBUG_ALIGNMENT (1)
3389 #endif
3390 
3391 #ifndef VMA_DEBUG_MARGIN
3392 
3396  #define VMA_DEBUG_MARGIN (0)
3397 #endif
3398 
3399 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3400 
3404  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3405 #endif
3406 
3407 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3408 
3413  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3414 #endif
3415 
3416 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3417 
3421  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3422 #endif
3423 
3424 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3425 
3429  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3430 #endif
3431 
3432 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3433  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3435 #endif
3436 
3437 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3438  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3440 #endif
3441 
3442 #ifndef VMA_CLASS_NO_COPY
3443  #define VMA_CLASS_NO_COPY(className) \
3444  private: \
3445  className(const className&) = delete; \
3446  className& operator=(const className&) = delete;
3447 #endif
3448 
3449 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3450 
3451 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3452 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3453 
3454 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3455 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3456 
3457 /*******************************************************************************
3458 END OF CONFIGURATION
3459 */
3460 
3461 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3462 
3463 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3464  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3465 
3466 // Returns number of bits set to 1 in (v).
3467 static inline uint32_t VmaCountBitsSet(uint32_t v)
3468 {
3469  uint32_t c = v - ((v >> 1) & 0x55555555);
3470  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3471  c = ((c >> 4) + c) & 0x0F0F0F0F;
3472  c = ((c >> 8) + c) & 0x00FF00FF;
3473  c = ((c >> 16) + c) & 0x0000FFFF;
3474  return c;
3475 }
3476 
3477 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3478 // Use types like uint32_t, uint64_t as T.
3479 template <typename T>
3480 static inline T VmaAlignUp(T val, T align)
3481 {
3482  return (val + align - 1) / align * align;
3483 }
3484 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3485 // Use types like uint32_t, uint64_t as T.
3486 template <typename T>
3487 static inline T VmaAlignDown(T val, T align)
3488 {
3489  return val / align * align;
3490 }
3491 
3492 // Division with mathematical rounding to nearest number.
3493 template <typename T>
3494 static inline T VmaRoundDiv(T x, T y)
3495 {
3496  return (x + (y / (T)2)) / y;
3497 }
3498 
3499 /*
3500 Returns true if given number is a power of two.
3501 T must be unsigned integer number or signed integer but always nonnegative.
3502 For 0 returns true.
3503 */
3504 template <typename T>
3505 inline bool VmaIsPow2(T x)
3506 {
3507  return (x & (x-1)) == 0;
3508 }
3509 
3510 // Returns smallest power of 2 greater or equal to v.
3511 static inline uint32_t VmaNextPow2(uint32_t v)
3512 {
3513  v--;
3514  v |= v >> 1;
3515  v |= v >> 2;
3516  v |= v >> 4;
3517  v |= v >> 8;
3518  v |= v >> 16;
3519  v++;
3520  return v;
3521 }
3522 static inline uint64_t VmaNextPow2(uint64_t v)
3523 {
3524  v--;
3525  v |= v >> 1;
3526  v |= v >> 2;
3527  v |= v >> 4;
3528  v |= v >> 8;
3529  v |= v >> 16;
3530  v |= v >> 32;
3531  v++;
3532  return v;
3533 }
3534 
3535 // Returns largest power of 2 less or equal to v.
3536 static inline uint32_t VmaPrevPow2(uint32_t v)
3537 {
3538  v |= v >> 1;
3539  v |= v >> 2;
3540  v |= v >> 4;
3541  v |= v >> 8;
3542  v |= v >> 16;
3543  v = v ^ (v >> 1);
3544  return v;
3545 }
3546 static inline uint64_t VmaPrevPow2(uint64_t v)
3547 {
3548  v |= v >> 1;
3549  v |= v >> 2;
3550  v |= v >> 4;
3551  v |= v >> 8;
3552  v |= v >> 16;
3553  v |= v >> 32;
3554  v = v ^ (v >> 1);
3555  return v;
3556 }
3557 
3558 static inline bool VmaStrIsEmpty(const char* pStr)
3559 {
3560  return pStr == VMA_NULL || *pStr == '\0';
3561 }
3562 
3563 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3564 {
3565  switch(algorithm)
3566  {
3568  return "Linear";
3570  return "Buddy";
3571  case 0:
3572  return "Default";
3573  default:
3574  VMA_ASSERT(0);
3575  return "";
3576  }
3577 }
3578 
3579 #ifndef VMA_SORT
3580 
3581 template<typename Iterator, typename Compare>
3582 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3583 {
3584  Iterator centerValue = end; --centerValue;
3585  Iterator insertIndex = beg;
3586  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3587  {
3588  if(cmp(*memTypeIndex, *centerValue))
3589  {
3590  if(insertIndex != memTypeIndex)
3591  {
3592  VMA_SWAP(*memTypeIndex, *insertIndex);
3593  }
3594  ++insertIndex;
3595  }
3596  }
3597  if(insertIndex != centerValue)
3598  {
3599  VMA_SWAP(*insertIndex, *centerValue);
3600  }
3601  return insertIndex;
3602 }
3603 
3604 template<typename Iterator, typename Compare>
3605 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3606 {
3607  if(beg < end)
3608  {
3609  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3610  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3611  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3612  }
3613 }
3614 
3615 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3616 
3617 #endif // #ifndef VMA_SORT
3618 
3619 /*
3620 Returns true if two memory blocks occupy overlapping pages.
3621 ResourceA must be in less memory offset than ResourceB.
3622 
3623 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3624 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3625 */
3626 static inline bool VmaBlocksOnSamePage(
3627  VkDeviceSize resourceAOffset,
3628  VkDeviceSize resourceASize,
3629  VkDeviceSize resourceBOffset,
3630  VkDeviceSize pageSize)
3631 {
3632  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3633  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3634  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3635  VkDeviceSize resourceBStart = resourceBOffset;
3636  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3637  return resourceAEndPage == resourceBStartPage;
3638 }
3639 
3640 enum VmaSuballocationType
3641 {
3642  VMA_SUBALLOCATION_TYPE_FREE = 0,
3643  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3644  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3645  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3646  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3647  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3648  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3649 };
3650 
3651 /*
3652 Returns true if given suballocation types could conflict and must respect
3653 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3654 or linear image and another one is optimal image. If type is unknown, behave
3655 conservatively.
3656 */
3657 static inline bool VmaIsBufferImageGranularityConflict(
3658  VmaSuballocationType suballocType1,
3659  VmaSuballocationType suballocType2)
3660 {
3661  if(suballocType1 > suballocType2)
3662  {
3663  VMA_SWAP(suballocType1, suballocType2);
3664  }
3665 
3666  switch(suballocType1)
3667  {
3668  case VMA_SUBALLOCATION_TYPE_FREE:
3669  return false;
3670  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3671  return true;
3672  case VMA_SUBALLOCATION_TYPE_BUFFER:
3673  return
3674  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3675  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3676  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3677  return
3678  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3679  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3680  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3681  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3682  return
3683  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3684  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3685  return false;
3686  default:
3687  VMA_ASSERT(0);
3688  return true;
3689  }
3690 }
3691 
3692 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3693 {
3694  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3695  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3696  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3697  {
3698  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3699  }
3700 }
3701 
3702 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3703 {
3704  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3705  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3706  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3707  {
3708  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3709  {
3710  return false;
3711  }
3712  }
3713  return true;
3714 }
3715 
3716 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3717 struct VmaMutexLock
3718 {
3719  VMA_CLASS_NO_COPY(VmaMutexLock)
3720 public:
3721  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3722  m_pMutex(useMutex ? &mutex : VMA_NULL)
3723  { if(m_pMutex) { m_pMutex->Lock(); } }
3724  ~VmaMutexLock()
3725  { if(m_pMutex) { m_pMutex->Unlock(); } }
3726 private:
3727  VMA_MUTEX* m_pMutex;
3728 };
3729 
3730 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3731 struct VmaMutexLockRead
3732 {
3733  VMA_CLASS_NO_COPY(VmaMutexLockRead)
3734 public:
3735  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3736  m_pMutex(useMutex ? &mutex : VMA_NULL)
3737  { if(m_pMutex) { m_pMutex->LockRead(); } }
3738  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
3739 private:
3740  VMA_RW_MUTEX* m_pMutex;
3741 };
3742 
3743 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3744 struct VmaMutexLockWrite
3745 {
3746  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3747 public:
3748  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
3749  m_pMutex(useMutex ? &mutex : VMA_NULL)
3750  { if(m_pMutex) { m_pMutex->LockWrite(); } }
3751  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3752 private:
3753  VMA_RW_MUTEX* m_pMutex;
3754 };
3755 
3756 #if VMA_DEBUG_GLOBAL_MUTEX
3757  static VMA_MUTEX gDebugGlobalMutex;
3758  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3759 #else
3760  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3761 #endif
3762 
3763 // Minimum size of a free suballocation to register it in the free suballocation collection.
3764 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3765 
3766 /*
3767 Performs binary search and returns iterator to first element that is greater or
3768 equal to (key), according to comparison (cmp).
3769 
3770 Cmp should return true if first argument is less than second argument.
3771 
3772 Returned value is the found element, if present in the collection or place where
3773 new element with value (key) should be inserted.
3774 */
3775 template <typename CmpLess, typename IterT, typename KeyT>
3776 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3777 {
3778  size_t down = 0, up = (end - beg);
3779  while(down < up)
3780  {
3781  const size_t mid = (down + up) / 2;
3782  if(cmp(*(beg+mid), key))
3783  {
3784  down = mid + 1;
3785  }
3786  else
3787  {
3788  up = mid;
3789  }
3790  }
3791  return beg + down;
3792 }
3793 
3794 /*
3795 Returns true if all pointers in the array are not-null and unique.
3796 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3797 T must be pointer type, e.g. VmaAllocation, VmaPool.
3798 */
3799 template<typename T>
3800 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3801 {
3802  for(uint32_t i = 0; i < count; ++i)
3803  {
3804  const T iPtr = arr[i];
3805  if(iPtr == VMA_NULL)
3806  {
3807  return false;
3808  }
3809  for(uint32_t j = i + 1; j < count; ++j)
3810  {
3811  if(iPtr == arr[j])
3812  {
3813  return false;
3814  }
3815  }
3816  }
3817  return true;
3818 }
3819 
3821 // Memory allocation
3822 
3823 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3824 {
3825  if((pAllocationCallbacks != VMA_NULL) &&
3826  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3827  {
3828  return (*pAllocationCallbacks->pfnAllocation)(
3829  pAllocationCallbacks->pUserData,
3830  size,
3831  alignment,
3832  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3833  }
3834  else
3835  {
3836  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3837  }
3838 }
3839 
3840 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3841 {
3842  if((pAllocationCallbacks != VMA_NULL) &&
3843  (pAllocationCallbacks->pfnFree != VMA_NULL))
3844  {
3845  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3846  }
3847  else
3848  {
3849  VMA_SYSTEM_FREE(ptr);
3850  }
3851 }
3852 
3853 template<typename T>
3854 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3855 {
3856  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3857 }
3858 
3859 template<typename T>
3860 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3861 {
3862  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3863 }
3864 
3865 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3866 
3867 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3868 
3869 template<typename T>
3870 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3871 {
3872  ptr->~T();
3873  VmaFree(pAllocationCallbacks, ptr);
3874 }
3875 
3876 template<typename T>
3877 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3878 {
3879  if(ptr != VMA_NULL)
3880  {
3881  for(size_t i = count; i--; )
3882  {
3883  ptr[i].~T();
3884  }
3885  VmaFree(pAllocationCallbacks, ptr);
3886  }
3887 }
3888 
3889 // STL-compatible allocator.
3890 template<typename T>
3891 class VmaStlAllocator
3892 {
3893 public:
3894  const VkAllocationCallbacks* const m_pCallbacks;
3895  typedef T value_type;
3896 
3897  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3898  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3899 
3900  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3901  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3902 
3903  template<typename U>
3904  bool operator==(const VmaStlAllocator<U>& rhs) const
3905  {
3906  return m_pCallbacks == rhs.m_pCallbacks;
3907  }
3908  template<typename U>
3909  bool operator!=(const VmaStlAllocator<U>& rhs) const
3910  {
3911  return m_pCallbacks != rhs.m_pCallbacks;
3912  }
3913 
3914  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3915 };
3916 
3917 #if VMA_USE_STL_VECTOR
3918 
3919 #define VmaVector std::vector
3920 
3921 template<typename T, typename allocatorT>
3922 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3923 {
3924  vec.insert(vec.begin() + index, item);
3925 }
3926 
3927 template<typename T, typename allocatorT>
3928 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3929 {
3930  vec.erase(vec.begin() + index);
3931 }
3932 
3933 #else // #if VMA_USE_STL_VECTOR
3934 
3935 /* Class with interface compatible with subset of std::vector.
3936 T must be POD because constructors and destructors are not called and memcpy is
3937 used for these objects. */
3938 template<typename T, typename AllocatorT>
3939 class VmaVector
3940 {
3941 public:
3942  typedef T value_type;
3943 
3944  VmaVector(const AllocatorT& allocator) :
3945  m_Allocator(allocator),
3946  m_pArray(VMA_NULL),
3947  m_Count(0),
3948  m_Capacity(0)
3949  {
3950  }
3951 
3952  VmaVector(size_t count, const AllocatorT& allocator) :
3953  m_Allocator(allocator),
3954  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3955  m_Count(count),
3956  m_Capacity(count)
3957  {
3958  }
3959 
3960  VmaVector(const VmaVector<T, AllocatorT>& src) :
3961  m_Allocator(src.m_Allocator),
3962  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3963  m_Count(src.m_Count),
3964  m_Capacity(src.m_Count)
3965  {
3966  if(m_Count != 0)
3967  {
3968  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3969  }
3970  }
3971 
3972  ~VmaVector()
3973  {
3974  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3975  }
3976 
3977  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3978  {
3979  if(&rhs != this)
3980  {
3981  resize(rhs.m_Count);
3982  if(m_Count != 0)
3983  {
3984  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3985  }
3986  }
3987  return *this;
3988  }
3989 
3990  bool empty() const { return m_Count == 0; }
3991  size_t size() const { return m_Count; }
3992  T* data() { return m_pArray; }
3993  const T* data() const { return m_pArray; }
3994 
3995  T& operator[](size_t index)
3996  {
3997  VMA_HEAVY_ASSERT(index < m_Count);
3998  return m_pArray[index];
3999  }
4000  const T& operator[](size_t index) const
4001  {
4002  VMA_HEAVY_ASSERT(index < m_Count);
4003  return m_pArray[index];
4004  }
4005 
4006  T& front()
4007  {
4008  VMA_HEAVY_ASSERT(m_Count > 0);
4009  return m_pArray[0];
4010  }
4011  const T& front() const
4012  {
4013  VMA_HEAVY_ASSERT(m_Count > 0);
4014  return m_pArray[0];
4015  }
4016  T& back()
4017  {
4018  VMA_HEAVY_ASSERT(m_Count > 0);
4019  return m_pArray[m_Count - 1];
4020  }
4021  const T& back() const
4022  {
4023  VMA_HEAVY_ASSERT(m_Count > 0);
4024  return m_pArray[m_Count - 1];
4025  }
4026 
4027  void reserve(size_t newCapacity, bool freeMemory = false)
4028  {
4029  newCapacity = VMA_MAX(newCapacity, m_Count);
4030 
4031  if((newCapacity < m_Capacity) && !freeMemory)
4032  {
4033  newCapacity = m_Capacity;
4034  }
4035 
4036  if(newCapacity != m_Capacity)
4037  {
4038  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4039  if(m_Count != 0)
4040  {
4041  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4042  }
4043  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4044  m_Capacity = newCapacity;
4045  m_pArray = newArray;
4046  }
4047  }
4048 
4049  void resize(size_t newCount, bool freeMemory = false)
4050  {
4051  size_t newCapacity = m_Capacity;
4052  if(newCount > m_Capacity)
4053  {
4054  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4055  }
4056  else if(freeMemory)
4057  {
4058  newCapacity = newCount;
4059  }
4060 
4061  if(newCapacity != m_Capacity)
4062  {
4063  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4064  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4065  if(elementsToCopy != 0)
4066  {
4067  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4068  }
4069  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4070  m_Capacity = newCapacity;
4071  m_pArray = newArray;
4072  }
4073 
4074  m_Count = newCount;
4075  }
4076 
4077  void clear(bool freeMemory = false)
4078  {
4079  resize(0, freeMemory);
4080  }
4081 
4082  void insert(size_t index, const T& src)
4083  {
4084  VMA_HEAVY_ASSERT(index <= m_Count);
4085  const size_t oldCount = size();
4086  resize(oldCount + 1);
4087  if(index < oldCount)
4088  {
4089  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4090  }
4091  m_pArray[index] = src;
4092  }
4093 
4094  void remove(size_t index)
4095  {
4096  VMA_HEAVY_ASSERT(index < m_Count);
4097  const size_t oldCount = size();
4098  if(index < oldCount - 1)
4099  {
4100  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4101  }
4102  resize(oldCount - 1);
4103  }
4104 
4105  void push_back(const T& src)
4106  {
4107  const size_t newIndex = size();
4108  resize(newIndex + 1);
4109  m_pArray[newIndex] = src;
4110  }
4111 
4112  void pop_back()
4113  {
4114  VMA_HEAVY_ASSERT(m_Count > 0);
4115  resize(size() - 1);
4116  }
4117 
4118  void push_front(const T& src)
4119  {
4120  insert(0, src);
4121  }
4122 
4123  void pop_front()
4124  {
4125  VMA_HEAVY_ASSERT(m_Count > 0);
4126  remove(0);
4127  }
4128 
4129  typedef T* iterator;
4130 
4131  iterator begin() { return m_pArray; }
4132  iterator end() { return m_pArray + m_Count; }
4133 
4134 private:
4135  AllocatorT m_Allocator;
4136  T* m_pArray;
4137  size_t m_Count;
4138  size_t m_Capacity;
4139 };
4140 
4141 template<typename T, typename allocatorT>
4142 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4143 {
4144  vec.insert(index, item);
4145 }
4146 
4147 template<typename T, typename allocatorT>
4148 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4149 {
4150  vec.remove(index);
4151 }
4152 
4153 #endif // #if VMA_USE_STL_VECTOR
4154 
4155 template<typename CmpLess, typename VectorT>
4156 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4157 {
4158  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4159  vector.data(),
4160  vector.data() + vector.size(),
4161  value,
4162  CmpLess()) - vector.data();
4163  VmaVectorInsert(vector, indexToInsert, value);
4164  return indexToInsert;
4165 }
4166 
4167 template<typename CmpLess, typename VectorT>
4168 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4169 {
4170  CmpLess comparator;
4171  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4172  vector.begin(),
4173  vector.end(),
4174  value,
4175  comparator);
4176  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4177  {
4178  size_t indexToRemove = it - vector.begin();
4179  VmaVectorRemove(vector, indexToRemove);
4180  return true;
4181  }
4182  return false;
4183 }
4184 
4185 template<typename CmpLess, typename IterT, typename KeyT>
4186 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
4187 {
4188  CmpLess comparator;
4189  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4190  beg, end, value, comparator);
4191  if(it == end ||
4192  (!comparator(*it, value) && !comparator(value, *it)))
4193  {
4194  return it;
4195  }
4196  return end;
4197 }
4198 
4200 // class VmaPoolAllocator
4201 
4202 /*
4203 Allocator for objects of type T using a list of arrays (pools) to speed up
4204 allocation. Number of elements that can be allocated is not bounded because
4205 allocator can create multiple blocks.
4206 */
4207 template<typename T>
4208 class VmaPoolAllocator
4209 {
4210  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4211 public:
4212  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
4213  ~VmaPoolAllocator();
4214  void Clear();
4215  T* Alloc();
4216  void Free(T* ptr);
4217 
4218 private:
4219  union Item
4220  {
4221  uint32_t NextFreeIndex;
4222  T Value;
4223  };
4224 
4225  struct ItemBlock
4226  {
4227  Item* pItems;
4228  uint32_t FirstFreeIndex;
4229  };
4230 
4231  const VkAllocationCallbacks* m_pAllocationCallbacks;
4232  size_t m_ItemsPerBlock;
4233  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4234 
4235  ItemBlock& CreateNewBlock();
4236 };
4237 
4238 template<typename T>
4239 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
4240  m_pAllocationCallbacks(pAllocationCallbacks),
4241  m_ItemsPerBlock(itemsPerBlock),
4242  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4243 {
4244  VMA_ASSERT(itemsPerBlock > 0);
4245 }
4246 
4247 template<typename T>
4248 VmaPoolAllocator<T>::~VmaPoolAllocator()
4249 {
4250  Clear();
4251 }
4252 
4253 template<typename T>
4254 void VmaPoolAllocator<T>::Clear()
4255 {
4256  for(size_t i = m_ItemBlocks.size(); i--; )
4257  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
4258  m_ItemBlocks.clear();
4259 }
4260 
4261 template<typename T>
4262 T* VmaPoolAllocator<T>::Alloc()
4263 {
4264  for(size_t i = m_ItemBlocks.size(); i--; )
4265  {
4266  ItemBlock& block = m_ItemBlocks[i];
4267  // This block has some free items: Use first one.
4268  if(block.FirstFreeIndex != UINT32_MAX)
4269  {
4270  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4271  block.FirstFreeIndex = pItem->NextFreeIndex;
4272  return &pItem->Value;
4273  }
4274  }
4275 
4276  // No block has free item: Create new one and use it.
4277  ItemBlock& newBlock = CreateNewBlock();
4278  Item* const pItem = &newBlock.pItems[0];
4279  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4280  return &pItem->Value;
4281 }
4282 
4283 template<typename T>
4284 void VmaPoolAllocator<T>::Free(T* ptr)
4285 {
4286  // Search all memory blocks to find ptr.
4287  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
4288  {
4289  ItemBlock& block = m_ItemBlocks[i];
4290 
4291  // Casting to union.
4292  Item* pItemPtr;
4293  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4294 
4295  // Check if pItemPtr is in address range of this block.
4296  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
4297  {
4298  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4299  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4300  block.FirstFreeIndex = index;
4301  return;
4302  }
4303  }
4304  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4305 }
4306 
4307 template<typename T>
4308 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4309 {
4310  ItemBlock newBlock = {
4311  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
4312 
4313  m_ItemBlocks.push_back(newBlock);
4314 
4315  // Setup singly-linked list of all free items in this block.
4316  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
4317  newBlock.pItems[i].NextFreeIndex = i + 1;
4318  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
4319  return m_ItemBlocks.back();
4320 }
4321 
4323 // class VmaRawList, VmaList
4324 
4325 #if VMA_USE_STL_LIST
4326 
4327 #define VmaList std::list
4328 
4329 #else // #if VMA_USE_STL_LIST
4330 
4331 template<typename T>
4332 struct VmaListItem
4333 {
4334  VmaListItem* pPrev;
4335  VmaListItem* pNext;
4336  T Value;
4337 };
4338 
4339 // Doubly linked list.
4340 template<typename T>
4341 class VmaRawList
4342 {
4343  VMA_CLASS_NO_COPY(VmaRawList)
4344 public:
4345  typedef VmaListItem<T> ItemType;
4346 
4347  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4348  ~VmaRawList();
4349  void Clear();
4350 
4351  size_t GetCount() const { return m_Count; }
4352  bool IsEmpty() const { return m_Count == 0; }
4353 
4354  ItemType* Front() { return m_pFront; }
4355  const ItemType* Front() const { return m_pFront; }
4356  ItemType* Back() { return m_pBack; }
4357  const ItemType* Back() const { return m_pBack; }
4358 
4359  ItemType* PushBack();
4360  ItemType* PushFront();
4361  ItemType* PushBack(const T& value);
4362  ItemType* PushFront(const T& value);
4363  void PopBack();
4364  void PopFront();
4365 
4366  // Item can be null - it means PushBack.
4367  ItemType* InsertBefore(ItemType* pItem);
4368  // Item can be null - it means PushFront.
4369  ItemType* InsertAfter(ItemType* pItem);
4370 
4371  ItemType* InsertBefore(ItemType* pItem, const T& value);
4372  ItemType* InsertAfter(ItemType* pItem, const T& value);
4373 
4374  void Remove(ItemType* pItem);
4375 
4376 private:
4377  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4378  VmaPoolAllocator<ItemType> m_ItemAllocator;
4379  ItemType* m_pFront;
4380  ItemType* m_pBack;
4381  size_t m_Count;
4382 };
4383 
4384 template<typename T>
4385 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4386  m_pAllocationCallbacks(pAllocationCallbacks),
4387  m_ItemAllocator(pAllocationCallbacks, 128),
4388  m_pFront(VMA_NULL),
4389  m_pBack(VMA_NULL),
4390  m_Count(0)
4391 {
4392 }
4393 
4394 template<typename T>
4395 VmaRawList<T>::~VmaRawList()
4396 {
4397  // Intentionally not calling Clear, because that would be unnecessary
4398  // computations to return all items to m_ItemAllocator as free.
4399 }
4400 
4401 template<typename T>
4402 void VmaRawList<T>::Clear()
4403 {
4404  if(IsEmpty() == false)
4405  {
4406  ItemType* pItem = m_pBack;
4407  while(pItem != VMA_NULL)
4408  {
4409  ItemType* const pPrevItem = pItem->pPrev;
4410  m_ItemAllocator.Free(pItem);
4411  pItem = pPrevItem;
4412  }
4413  m_pFront = VMA_NULL;
4414  m_pBack = VMA_NULL;
4415  m_Count = 0;
4416  }
4417 }
4418 
4419 template<typename T>
4420 VmaListItem<T>* VmaRawList<T>::PushBack()
4421 {
4422  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4423  pNewItem->pNext = VMA_NULL;
4424  if(IsEmpty())
4425  {
4426  pNewItem->pPrev = VMA_NULL;
4427  m_pFront = pNewItem;
4428  m_pBack = pNewItem;
4429  m_Count = 1;
4430  }
4431  else
4432  {
4433  pNewItem->pPrev = m_pBack;
4434  m_pBack->pNext = pNewItem;
4435  m_pBack = pNewItem;
4436  ++m_Count;
4437  }
4438  return pNewItem;
4439 }
4440 
4441 template<typename T>
4442 VmaListItem<T>* VmaRawList<T>::PushFront()
4443 {
4444  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4445  pNewItem->pPrev = VMA_NULL;
4446  if(IsEmpty())
4447  {
4448  pNewItem->pNext = VMA_NULL;
4449  m_pFront = pNewItem;
4450  m_pBack = pNewItem;
4451  m_Count = 1;
4452  }
4453  else
4454  {
4455  pNewItem->pNext = m_pFront;
4456  m_pFront->pPrev = pNewItem;
4457  m_pFront = pNewItem;
4458  ++m_Count;
4459  }
4460  return pNewItem;
4461 }
4462 
4463 template<typename T>
4464 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4465 {
4466  ItemType* const pNewItem = PushBack();
4467  pNewItem->Value = value;
4468  return pNewItem;
4469 }
4470 
4471 template<typename T>
4472 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4473 {
4474  ItemType* const pNewItem = PushFront();
4475  pNewItem->Value = value;
4476  return pNewItem;
4477 }
4478 
4479 template<typename T>
4480 void VmaRawList<T>::PopBack()
4481 {
4482  VMA_HEAVY_ASSERT(m_Count > 0);
4483  ItemType* const pBackItem = m_pBack;
4484  ItemType* const pPrevItem = pBackItem->pPrev;
4485  if(pPrevItem != VMA_NULL)
4486  {
4487  pPrevItem->pNext = VMA_NULL;
4488  }
4489  m_pBack = pPrevItem;
4490  m_ItemAllocator.Free(pBackItem);
4491  --m_Count;
4492 }
4493 
4494 template<typename T>
4495 void VmaRawList<T>::PopFront()
4496 {
4497  VMA_HEAVY_ASSERT(m_Count > 0);
4498  ItemType* const pFrontItem = m_pFront;
4499  ItemType* const pNextItem = pFrontItem->pNext;
4500  if(pNextItem != VMA_NULL)
4501  {
4502  pNextItem->pPrev = VMA_NULL;
4503  }
4504  m_pFront = pNextItem;
4505  m_ItemAllocator.Free(pFrontItem);
4506  --m_Count;
4507 }
4508 
4509 template<typename T>
4510 void VmaRawList<T>::Remove(ItemType* pItem)
4511 {
4512  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4513  VMA_HEAVY_ASSERT(m_Count > 0);
4514 
4515  if(pItem->pPrev != VMA_NULL)
4516  {
4517  pItem->pPrev->pNext = pItem->pNext;
4518  }
4519  else
4520  {
4521  VMA_HEAVY_ASSERT(m_pFront == pItem);
4522  m_pFront = pItem->pNext;
4523  }
4524 
4525  if(pItem->pNext != VMA_NULL)
4526  {
4527  pItem->pNext->pPrev = pItem->pPrev;
4528  }
4529  else
4530  {
4531  VMA_HEAVY_ASSERT(m_pBack == pItem);
4532  m_pBack = pItem->pPrev;
4533  }
4534 
4535  m_ItemAllocator.Free(pItem);
4536  --m_Count;
4537 }
4538 
4539 template<typename T>
4540 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4541 {
4542  if(pItem != VMA_NULL)
4543  {
4544  ItemType* const prevItem = pItem->pPrev;
4545  ItemType* const newItem = m_ItemAllocator.Alloc();
4546  newItem->pPrev = prevItem;
4547  newItem->pNext = pItem;
4548  pItem->pPrev = newItem;
4549  if(prevItem != VMA_NULL)
4550  {
4551  prevItem->pNext = newItem;
4552  }
4553  else
4554  {
4555  VMA_HEAVY_ASSERT(m_pFront == pItem);
4556  m_pFront = newItem;
4557  }
4558  ++m_Count;
4559  return newItem;
4560  }
4561  else
4562  return PushBack();
4563 }
4564 
4565 template<typename T>
4566 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4567 {
4568  if(pItem != VMA_NULL)
4569  {
4570  ItemType* const nextItem = pItem->pNext;
4571  ItemType* const newItem = m_ItemAllocator.Alloc();
4572  newItem->pNext = nextItem;
4573  newItem->pPrev = pItem;
4574  pItem->pNext = newItem;
4575  if(nextItem != VMA_NULL)
4576  {
4577  nextItem->pPrev = newItem;
4578  }
4579  else
4580  {
4581  VMA_HEAVY_ASSERT(m_pBack == pItem);
4582  m_pBack = newItem;
4583  }
4584  ++m_Count;
4585  return newItem;
4586  }
4587  else
4588  return PushFront();
4589 }
4590 
4591 template<typename T>
4592 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4593 {
4594  ItemType* const newItem = InsertBefore(pItem);
4595  newItem->Value = value;
4596  return newItem;
4597 }
4598 
4599 template<typename T>
4600 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4601 {
4602  ItemType* const newItem = InsertAfter(pItem);
4603  newItem->Value = value;
4604  return newItem;
4605 }
4606 
4607 template<typename T, typename AllocatorT>
4608 class VmaList
4609 {
4610  VMA_CLASS_NO_COPY(VmaList)
4611 public:
4612  class iterator
4613  {
4614  public:
4615  iterator() :
4616  m_pList(VMA_NULL),
4617  m_pItem(VMA_NULL)
4618  {
4619  }
4620 
4621  T& operator*() const
4622  {
4623  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4624  return m_pItem->Value;
4625  }
4626  T* operator->() const
4627  {
4628  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4629  return &m_pItem->Value;
4630  }
4631 
4632  iterator& operator++()
4633  {
4634  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4635  m_pItem = m_pItem->pNext;
4636  return *this;
4637  }
4638  iterator& operator--()
4639  {
4640  if(m_pItem != VMA_NULL)
4641  {
4642  m_pItem = m_pItem->pPrev;
4643  }
4644  else
4645  {
4646  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4647  m_pItem = m_pList->Back();
4648  }
4649  return *this;
4650  }
4651 
4652  iterator operator++(int)
4653  {
4654  iterator result = *this;
4655  ++*this;
4656  return result;
4657  }
4658  iterator operator--(int)
4659  {
4660  iterator result = *this;
4661  --*this;
4662  return result;
4663  }
4664 
4665  bool operator==(const iterator& rhs) const
4666  {
4667  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4668  return m_pItem == rhs.m_pItem;
4669  }
4670  bool operator!=(const iterator& rhs) const
4671  {
4672  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4673  return m_pItem != rhs.m_pItem;
4674  }
4675 
4676  private:
4677  VmaRawList<T>* m_pList;
4678  VmaListItem<T>* m_pItem;
4679 
4680  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4681  m_pList(pList),
4682  m_pItem(pItem)
4683  {
4684  }
4685 
4686  friend class VmaList<T, AllocatorT>;
4687  };
4688 
4689  class const_iterator
4690  {
4691  public:
4692  const_iterator() :
4693  m_pList(VMA_NULL),
4694  m_pItem(VMA_NULL)
4695  {
4696  }
4697 
4698  const_iterator(const iterator& src) :
4699  m_pList(src.m_pList),
4700  m_pItem(src.m_pItem)
4701  {
4702  }
4703 
4704  const T& operator*() const
4705  {
4706  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4707  return m_pItem->Value;
4708  }
4709  const T* operator->() const
4710  {
4711  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4712  return &m_pItem->Value;
4713  }
4714 
4715  const_iterator& operator++()
4716  {
4717  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4718  m_pItem = m_pItem->pNext;
4719  return *this;
4720  }
4721  const_iterator& operator--()
4722  {
4723  if(m_pItem != VMA_NULL)
4724  {
4725  m_pItem = m_pItem->pPrev;
4726  }
4727  else
4728  {
4729  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4730  m_pItem = m_pList->Back();
4731  }
4732  return *this;
4733  }
4734 
4735  const_iterator operator++(int)
4736  {
4737  const_iterator result = *this;
4738  ++*this;
4739  return result;
4740  }
4741  const_iterator operator--(int)
4742  {
4743  const_iterator result = *this;
4744  --*this;
4745  return result;
4746  }
4747 
4748  bool operator==(const const_iterator& rhs) const
4749  {
4750  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4751  return m_pItem == rhs.m_pItem;
4752  }
4753  bool operator!=(const const_iterator& rhs) const
4754  {
4755  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4756  return m_pItem != rhs.m_pItem;
4757  }
4758 
4759  private:
4760  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4761  m_pList(pList),
4762  m_pItem(pItem)
4763  {
4764  }
4765 
4766  const VmaRawList<T>* m_pList;
4767  const VmaListItem<T>* m_pItem;
4768 
4769  friend class VmaList<T, AllocatorT>;
4770  };
4771 
4772  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4773 
4774  bool empty() const { return m_RawList.IsEmpty(); }
4775  size_t size() const { return m_RawList.GetCount(); }
4776 
4777  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4778  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4779 
4780  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4781  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4782 
4783  void clear() { m_RawList.Clear(); }
4784  void push_back(const T& value) { m_RawList.PushBack(value); }
4785  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4786  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4787 
4788 private:
4789  VmaRawList<T> m_RawList;
4790 };
4791 
4792 #endif // #if VMA_USE_STL_LIST
4793 
4795 // class VmaMap
4796 
4797 // Unused in this version.
4798 #if 0
4799 
4800 #if VMA_USE_STL_UNORDERED_MAP
4801 
4802 #define VmaPair std::pair
4803 
4804 #define VMA_MAP_TYPE(KeyT, ValueT) \
4805  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4806 
4807 #else // #if VMA_USE_STL_UNORDERED_MAP
4808 
4809 template<typename T1, typename T2>
4810 struct VmaPair
4811 {
4812  T1 first;
4813  T2 second;
4814 
4815  VmaPair() : first(), second() { }
4816  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4817 };
4818 
4819 /* Class compatible with subset of interface of std::unordered_map.
4820 KeyT, ValueT must be POD because they will be stored in VmaVector.
4821 */
4822 template<typename KeyT, typename ValueT>
4823 class VmaMap
4824 {
4825 public:
4826  typedef VmaPair<KeyT, ValueT> PairType;
4827  typedef PairType* iterator;
4828 
4829  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4830 
4831  iterator begin() { return m_Vector.begin(); }
4832  iterator end() { return m_Vector.end(); }
4833 
4834  void insert(const PairType& pair);
4835  iterator find(const KeyT& key);
4836  void erase(iterator it);
4837 
4838 private:
4839  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4840 };
4841 
4842 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4843 
4844 template<typename FirstT, typename SecondT>
4845 struct VmaPairFirstLess
4846 {
4847  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4848  {
4849  return lhs.first < rhs.first;
4850  }
4851  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4852  {
4853  return lhs.first < rhsFirst;
4854  }
4855 };
4856 
4857 template<typename KeyT, typename ValueT>
4858 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4859 {
4860  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4861  m_Vector.data(),
4862  m_Vector.data() + m_Vector.size(),
4863  pair,
4864  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4865  VmaVectorInsert(m_Vector, indexToInsert, pair);
4866 }
4867 
4868 template<typename KeyT, typename ValueT>
4869 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4870 {
4871  PairType* it = VmaBinaryFindFirstNotLess(
4872  m_Vector.data(),
4873  m_Vector.data() + m_Vector.size(),
4874  key,
4875  VmaPairFirstLess<KeyT, ValueT>());
4876  if((it != m_Vector.end()) && (it->first == key))
4877  {
4878  return it;
4879  }
4880  else
4881  {
4882  return m_Vector.end();
4883  }
4884 }
4885 
4886 template<typename KeyT, typename ValueT>
4887 void VmaMap<KeyT, ValueT>::erase(iterator it)
4888 {
4889  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4890 }
4891 
4892 #endif // #if VMA_USE_STL_UNORDERED_MAP
4893 
4894 #endif // #if 0
4895 
4897 
4898 class VmaDeviceMemoryBlock;
4899 
4900 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4901 
4902 struct VmaAllocation_T
4903 {
4904  VMA_CLASS_NO_COPY(VmaAllocation_T)
4905 private:
4906  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4907 
4908  enum FLAGS
4909  {
4910  FLAG_USER_DATA_STRING = 0x01,
4911  };
4912 
4913 public:
4914  enum ALLOCATION_TYPE
4915  {
4916  ALLOCATION_TYPE_NONE,
4917  ALLOCATION_TYPE_BLOCK,
4918  ALLOCATION_TYPE_DEDICATED,
4919  };
4920 
4921  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4922  m_Alignment(1),
4923  m_Size(0),
4924  m_pUserData(VMA_NULL),
4925  m_LastUseFrameIndex(currentFrameIndex),
4926  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4927  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4928  m_MapCount(0),
4929  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4930  {
4931 #if VMA_STATS_STRING_ENABLED
4932  m_CreationFrameIndex = currentFrameIndex;
4933  m_BufferImageUsage = 0;
4934 #endif
4935  }
4936 
4937  ~VmaAllocation_T()
4938  {
4939  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4940 
4941  // Check if owned string was freed.
4942  VMA_ASSERT(m_pUserData == VMA_NULL);
4943  }
4944 
4945  void InitBlockAllocation(
4946  VmaPool hPool,
4947  VmaDeviceMemoryBlock* block,
4948  VkDeviceSize offset,
4949  VkDeviceSize alignment,
4950  VkDeviceSize size,
4951  VmaSuballocationType suballocationType,
4952  bool mapped,
4953  bool canBecomeLost)
4954  {
4955  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4956  VMA_ASSERT(block != VMA_NULL);
4957  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4958  m_Alignment = alignment;
4959  m_Size = size;
4960  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4961  m_SuballocationType = (uint8_t)suballocationType;
4962  m_BlockAllocation.m_hPool = hPool;
4963  m_BlockAllocation.m_Block = block;
4964  m_BlockAllocation.m_Offset = offset;
4965  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4966  }
4967 
4968  void InitLost()
4969  {
4970  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4971  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4972  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4973  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4974  m_BlockAllocation.m_Block = VMA_NULL;
4975  m_BlockAllocation.m_Offset = 0;
4976  m_BlockAllocation.m_CanBecomeLost = true;
4977  }
4978 
4979  void ChangeBlockAllocation(
4980  VmaAllocator hAllocator,
4981  VmaDeviceMemoryBlock* block,
4982  VkDeviceSize offset);
4983 
4984  void ChangeSize(VkDeviceSize newSize);
4985  void ChangeOffset(VkDeviceSize newOffset);
4986 
4987  // pMappedData not null means allocation is created with MAPPED flag.
4988  void InitDedicatedAllocation(
4989  uint32_t memoryTypeIndex,
4990  VkDeviceMemory hMemory,
4991  VmaSuballocationType suballocationType,
4992  void* pMappedData,
4993  VkDeviceSize size)
4994  {
4995  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4996  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4997  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4998  m_Alignment = 0;
4999  m_Size = size;
5000  m_SuballocationType = (uint8_t)suballocationType;
5001  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5002  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
5003  m_DedicatedAllocation.m_hMemory = hMemory;
5004  m_DedicatedAllocation.m_pMappedData = pMappedData;
5005  }
5006 
5007  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5008  VkDeviceSize GetAlignment() const { return m_Alignment; }
5009  VkDeviceSize GetSize() const { return m_Size; }
5010  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5011  void* GetUserData() const { return m_pUserData; }
5012  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5013  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5014 
5015  VmaDeviceMemoryBlock* GetBlock() const
5016  {
5017  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5018  return m_BlockAllocation.m_Block;
5019  }
5020  VkDeviceSize GetOffset() const;
5021  VkDeviceMemory GetMemory() const;
5022  uint32_t GetMemoryTypeIndex() const;
5023  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5024  void* GetMappedData() const;
5025  bool CanBecomeLost() const;
5026  VmaPool GetPool() const;
5027 
5028  uint32_t GetLastUseFrameIndex() const
5029  {
5030  return m_LastUseFrameIndex.load();
5031  }
5032  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5033  {
5034  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5035  }
5036  /*
5037  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5038  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5039  - Else, returns false.
5040 
5041  If hAllocation is already lost, assert - you should not call it then.
5042  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5043  */
5044  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5045 
5046  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5047  {
5048  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5049  outInfo.blockCount = 1;
5050  outInfo.allocationCount = 1;
5051  outInfo.unusedRangeCount = 0;
5052  outInfo.usedBytes = m_Size;
5053  outInfo.unusedBytes = 0;
5054  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5055  outInfo.unusedRangeSizeMin = UINT64_MAX;
5056  outInfo.unusedRangeSizeMax = 0;
5057  }
5058 
5059  void BlockAllocMap();
5060  void BlockAllocUnmap();
5061  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5062  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5063 
5064 #if VMA_STATS_STRING_ENABLED
5065  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5066  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5067 
5068  void InitBufferImageUsage(uint32_t bufferImageUsage)
5069  {
5070  VMA_ASSERT(m_BufferImageUsage == 0);
5071  m_BufferImageUsage = bufferImageUsage;
5072  }
5073 
5074  void PrintParameters(class VmaJsonWriter& json) const;
5075 #endif
5076 
5077 private:
5078  VkDeviceSize m_Alignment;
5079  VkDeviceSize m_Size;
5080  void* m_pUserData;
5081  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5082  uint8_t m_Type; // ALLOCATION_TYPE
5083  uint8_t m_SuballocationType; // VmaSuballocationType
5084  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5085  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5086  uint8_t m_MapCount;
5087  uint8_t m_Flags; // enum FLAGS
5088 
5089  // Allocation out of VmaDeviceMemoryBlock.
5090  struct BlockAllocation
5091  {
5092  VmaPool m_hPool; // Null if belongs to general memory.
5093  VmaDeviceMemoryBlock* m_Block;
5094  VkDeviceSize m_Offset;
5095  bool m_CanBecomeLost;
5096  };
5097 
5098  // Allocation for an object that has its own private VkDeviceMemory.
5099  struct DedicatedAllocation
5100  {
5101  uint32_t m_MemoryTypeIndex;
5102  VkDeviceMemory m_hMemory;
5103  void* m_pMappedData; // Not null means memory is mapped.
5104  };
5105 
5106  union
5107  {
5108  // Allocation out of VmaDeviceMemoryBlock.
5109  BlockAllocation m_BlockAllocation;
5110  // Allocation for an object that has its own private VkDeviceMemory.
5111  DedicatedAllocation m_DedicatedAllocation;
5112  };
5113 
5114 #if VMA_STATS_STRING_ENABLED
5115  uint32_t m_CreationFrameIndex;
5116  uint32_t m_BufferImageUsage; // 0 if unknown.
5117 #endif
5118 
5119  void FreeUserDataString(VmaAllocator hAllocator);
5120 };
5121 
5122 /*
5123 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5124 allocated memory block or free.
5125 */
5126 struct VmaSuballocation
5127 {
5128  VkDeviceSize offset;
5129  VkDeviceSize size;
5130  VmaAllocation hAllocation;
5131  VmaSuballocationType type;
5132 };
5133 
5134 // Comparator for offsets.
5135 struct VmaSuballocationOffsetLess
5136 {
5137  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5138  {
5139  return lhs.offset < rhs.offset;
5140  }
5141 };
5142 struct VmaSuballocationOffsetGreater
5143 {
5144  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5145  {
5146  return lhs.offset > rhs.offset;
5147  }
5148 };
5149 
5150 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5151 
5152 // Cost of one additional allocation lost, as equivalent in bytes.
5153 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5154 
5155 /*
5156 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5157 
5158 If canMakeOtherLost was false:
5159 - item points to a FREE suballocation.
5160 - itemsToMakeLostCount is 0.
5161 
5162 If canMakeOtherLost was true:
5163 - item points to first of sequence of suballocations, which are either FREE,
5164  or point to VmaAllocations that can become lost.
5165 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5166  the requested allocation to succeed.
5167 */
5168 struct VmaAllocationRequest
5169 {
5170  VkDeviceSize offset;
5171  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5172  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5173  VmaSuballocationList::iterator item;
5174  size_t itemsToMakeLostCount;
5175  void* customData;
5176 
5177  VkDeviceSize CalcCost() const
5178  {
5179  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5180  }
5181 };
5182 
5183 /*
5184 Data structure used for bookkeeping of allocations and unused ranges of memory
5185 in a single VkDeviceMemory block.
5186 */
5187 class VmaBlockMetadata
5188 {
5189 public:
5190  VmaBlockMetadata(VmaAllocator hAllocator);
5191  virtual ~VmaBlockMetadata() { }
5192  virtual void Init(VkDeviceSize size) { m_Size = size; }
5193 
5194  // Validates all data structures inside this object. If not valid, returns false.
5195  virtual bool Validate() const = 0;
5196  VkDeviceSize GetSize() const { return m_Size; }
5197  virtual size_t GetAllocationCount() const = 0;
5198  virtual VkDeviceSize GetSumFreeSize() const = 0;
5199  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5200  // Returns true if this block is empty - contains only single free suballocation.
5201  virtual bool IsEmpty() const = 0;
5202 
5203  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5204  // Shouldn't modify blockCount.
5205  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5206 
5207 #if VMA_STATS_STRING_ENABLED
5208  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5209 #endif
5210 
5211  // Tries to find a place for suballocation with given parameters inside this block.
5212  // If succeeded, fills pAllocationRequest and returns true.
5213  // If failed, returns false.
5214  virtual bool CreateAllocationRequest(
5215  uint32_t currentFrameIndex,
5216  uint32_t frameInUseCount,
5217  VkDeviceSize bufferImageGranularity,
5218  VkDeviceSize allocSize,
5219  VkDeviceSize allocAlignment,
5220  bool upperAddress,
5221  VmaSuballocationType allocType,
5222  bool canMakeOtherLost,
5223  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5224  uint32_t strategy,
5225  VmaAllocationRequest* pAllocationRequest) = 0;
5226 
5227  virtual bool MakeRequestedAllocationsLost(
5228  uint32_t currentFrameIndex,
5229  uint32_t frameInUseCount,
5230  VmaAllocationRequest* pAllocationRequest) = 0;
5231 
5232  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5233 
5234  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5235 
5236  // Makes actual allocation based on request. Request must already be checked and valid.
5237  virtual void Alloc(
5238  const VmaAllocationRequest& request,
5239  VmaSuballocationType type,
5240  VkDeviceSize allocSize,
5241  bool upperAddress,
5242  VmaAllocation hAllocation) = 0;
5243 
5244  // Frees suballocation assigned to given memory region.
5245  virtual void Free(const VmaAllocation allocation) = 0;
5246  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5247 
5248  // Tries to resize (grow or shrink) space for given allocation, in place.
5249  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
5250 
5251 protected:
5252  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5253 
5254 #if VMA_STATS_STRING_ENABLED
5255  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5256  VkDeviceSize unusedBytes,
5257  size_t allocationCount,
5258  size_t unusedRangeCount) const;
5259  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5260  VkDeviceSize offset,
5261  VmaAllocation hAllocation) const;
5262  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5263  VkDeviceSize offset,
5264  VkDeviceSize size) const;
5265  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5266 #endif
5267 
5268 private:
5269  VkDeviceSize m_Size;
5270  const VkAllocationCallbacks* m_pAllocationCallbacks;
5271 };
5272 
5273 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5274  VMA_ASSERT(0 && "Validation failed: " #cond); \
5275  return false; \
5276  } } while(false)
5277 
5278 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5279 {
5280  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5281 public:
5282  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5283  virtual ~VmaBlockMetadata_Generic();
5284  virtual void Init(VkDeviceSize size);
5285 
5286  virtual bool Validate() const;
5287  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5288  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5289  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5290  virtual bool IsEmpty() const;
5291 
5292  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5293  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5294 
5295 #if VMA_STATS_STRING_ENABLED
5296  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5297 #endif
5298 
5299  virtual bool CreateAllocationRequest(
5300  uint32_t currentFrameIndex,
5301  uint32_t frameInUseCount,
5302  VkDeviceSize bufferImageGranularity,
5303  VkDeviceSize allocSize,
5304  VkDeviceSize allocAlignment,
5305  bool upperAddress,
5306  VmaSuballocationType allocType,
5307  bool canMakeOtherLost,
5308  uint32_t strategy,
5309  VmaAllocationRequest* pAllocationRequest);
5310 
5311  virtual bool MakeRequestedAllocationsLost(
5312  uint32_t currentFrameIndex,
5313  uint32_t frameInUseCount,
5314  VmaAllocationRequest* pAllocationRequest);
5315 
5316  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5317 
5318  virtual VkResult CheckCorruption(const void* pBlockData);
5319 
5320  virtual void Alloc(
5321  const VmaAllocationRequest& request,
5322  VmaSuballocationType type,
5323  VkDeviceSize allocSize,
5324  bool upperAddress,
5325  VmaAllocation hAllocation);
5326 
5327  virtual void Free(const VmaAllocation allocation);
5328  virtual void FreeAtOffset(VkDeviceSize offset);
5329 
5330  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
5331 
5333  // For defragmentation
5334 
5335  bool IsBufferImageGranularityConflictPossible(
5336  VkDeviceSize bufferImageGranularity,
5337  VmaSuballocationType& inOutPrevSuballocType) const;
5338 
5339 private:
5340  friend class VmaDefragmentationAlgorithm_Generic;
5341  friend class VmaDefragmentationAlgorithm_Fast;
5342 
5343  uint32_t m_FreeCount;
5344  VkDeviceSize m_SumFreeSize;
5345  VmaSuballocationList m_Suballocations;
5346  // Suballocations that are free and have size greater than certain threshold.
5347  // Sorted by size, ascending.
5348  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5349 
5350  bool ValidateFreeSuballocationList() const;
5351 
5352  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5353  // If yes, fills pOffset and returns true. If no, returns false.
5354  bool CheckAllocation(
5355  uint32_t currentFrameIndex,
5356  uint32_t frameInUseCount,
5357  VkDeviceSize bufferImageGranularity,
5358  VkDeviceSize allocSize,
5359  VkDeviceSize allocAlignment,
5360  VmaSuballocationType allocType,
5361  VmaSuballocationList::const_iterator suballocItem,
5362  bool canMakeOtherLost,
5363  VkDeviceSize* pOffset,
5364  size_t* itemsToMakeLostCount,
5365  VkDeviceSize* pSumFreeSize,
5366  VkDeviceSize* pSumItemSize) const;
5367  // Given free suballocation, it merges it with following one, which must also be free.
5368  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5369  // Releases given suballocation, making it free.
5370  // Merges it with adjacent free suballocations if applicable.
5371  // Returns iterator to new free suballocation at this place.
5372  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5373  // Given free suballocation, it inserts it into sorted list of
5374  // m_FreeSuballocationsBySize if it's suitable.
5375  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5376  // Given free suballocation, it removes it from sorted list of
5377  // m_FreeSuballocationsBySize if it's suitable.
5378  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5379 };
5380 
5381 /*
5382 Allocations and their references in internal data structure look like this:
5383 
5384 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5385 
5386  0 +-------+
5387  | |
5388  | |
5389  | |
5390  +-------+
5391  | Alloc | 1st[m_1stNullItemsBeginCount]
5392  +-------+
5393  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5394  +-------+
5395  | ... |
5396  +-------+
5397  | Alloc | 1st[1st.size() - 1]
5398  +-------+
5399  | |
5400  | |
5401  | |
5402 GetSize() +-------+
5403 
5404 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5405 
5406  0 +-------+
5407  | Alloc | 2nd[0]
5408  +-------+
5409  | Alloc | 2nd[1]
5410  +-------+
5411  | ... |
5412  +-------+
5413  | Alloc | 2nd[2nd.size() - 1]
5414  +-------+
5415  | |
5416  | |
5417  | |
5418  +-------+
5419  | Alloc | 1st[m_1stNullItemsBeginCount]
5420  +-------+
5421  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5422  +-------+
5423  | ... |
5424  +-------+
5425  | Alloc | 1st[1st.size() - 1]
5426  +-------+
5427  | |
5428 GetSize() +-------+
5429 
5430 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5431 
5432  0 +-------+
5433  | |
5434  | |
5435  | |
5436  +-------+
5437  | Alloc | 1st[m_1stNullItemsBeginCount]
5438  +-------+
5439  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5440  +-------+
5441  | ... |
5442  +-------+
5443  | Alloc | 1st[1st.size() - 1]
5444  +-------+
5445  | |
5446  | |
5447  | |
5448  +-------+
5449  | Alloc | 2nd[2nd.size() - 1]
5450  +-------+
5451  | ... |
5452  +-------+
5453  | Alloc | 2nd[1]
5454  +-------+
5455  | Alloc | 2nd[0]
5456 GetSize() +-------+
5457 
5458 */
5459 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5460 {
5461  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5462 public:
5463  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5464  virtual ~VmaBlockMetadata_Linear();
5465  virtual void Init(VkDeviceSize size);
5466 
5467  virtual bool Validate() const;
5468  virtual size_t GetAllocationCount() const;
5469  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5470  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5471  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5472 
5473  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5474  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5475 
5476 #if VMA_STATS_STRING_ENABLED
5477  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5478 #endif
5479 
5480  virtual bool CreateAllocationRequest(
5481  uint32_t currentFrameIndex,
5482  uint32_t frameInUseCount,
5483  VkDeviceSize bufferImageGranularity,
5484  VkDeviceSize allocSize,
5485  VkDeviceSize allocAlignment,
5486  bool upperAddress,
5487  VmaSuballocationType allocType,
5488  bool canMakeOtherLost,
5489  uint32_t strategy,
5490  VmaAllocationRequest* pAllocationRequest);
5491 
5492  virtual bool MakeRequestedAllocationsLost(
5493  uint32_t currentFrameIndex,
5494  uint32_t frameInUseCount,
5495  VmaAllocationRequest* pAllocationRequest);
5496 
5497  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5498 
5499  virtual VkResult CheckCorruption(const void* pBlockData);
5500 
5501  virtual void Alloc(
5502  const VmaAllocationRequest& request,
5503  VmaSuballocationType type,
5504  VkDeviceSize allocSize,
5505  bool upperAddress,
5506  VmaAllocation hAllocation);
5507 
5508  virtual void Free(const VmaAllocation allocation);
5509  virtual void FreeAtOffset(VkDeviceSize offset);
5510 
5511 private:
5512  /*
5513  There are two suballocation vectors, used in ping-pong way.
5514  The one with index m_1stVectorIndex is called 1st.
5515  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5516  2nd can be non-empty only when 1st is not empty.
5517  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5518  */
5519  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5520 
5521  enum SECOND_VECTOR_MODE
5522  {
5523  SECOND_VECTOR_EMPTY,
5524  /*
5525  Suballocations in 2nd vector are created later than the ones in 1st, but they
5526  all have smaller offset.
5527  */
5528  SECOND_VECTOR_RING_BUFFER,
5529  /*
5530  Suballocations in 2nd vector are upper side of double stack.
5531  They all have offsets higher than those in 1st vector.
5532  Top of this stack means smaller offsets, but higher indices in this vector.
5533  */
5534  SECOND_VECTOR_DOUBLE_STACK,
5535  };
5536 
5537  VkDeviceSize m_SumFreeSize;
5538  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5539  uint32_t m_1stVectorIndex;
5540  SECOND_VECTOR_MODE m_2ndVectorMode;
5541 
5542  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5543  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5544  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5545  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5546 
5547  // Number of items in 1st vector with hAllocation = null at the beginning.
5548  size_t m_1stNullItemsBeginCount;
5549  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5550  size_t m_1stNullItemsMiddleCount;
5551  // Number of items in 2nd vector with hAllocation = null.
5552  size_t m_2ndNullItemsCount;
5553 
5554  bool ShouldCompact1st() const;
5555  void CleanupAfterFree();
5556 };
5557 
5558 /*
5559 - GetSize() is the original size of allocated memory block.
5560 - m_UsableSize is this size aligned down to a power of two.
5561  All allocations and calculations happen relative to m_UsableSize.
5562 - GetUnusableSize() is the difference between them.
5563  It is repoted as separate, unused range, not available for allocations.
5564 
5565 Node at level 0 has size = m_UsableSize.
5566 Each next level contains nodes with size 2 times smaller than current level.
5567 m_LevelCount is the maximum number of levels to use in the current object.
5568 */
5569 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5570 {
5571  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5572 public:
5573  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5574  virtual ~VmaBlockMetadata_Buddy();
5575  virtual void Init(VkDeviceSize size);
5576 
5577  virtual bool Validate() const;
5578  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5579  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5580  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5581  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5582 
5583  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5584  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5585 
5586 #if VMA_STATS_STRING_ENABLED
5587  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5588 #endif
5589 
5590  virtual bool CreateAllocationRequest(
5591  uint32_t currentFrameIndex,
5592  uint32_t frameInUseCount,
5593  VkDeviceSize bufferImageGranularity,
5594  VkDeviceSize allocSize,
5595  VkDeviceSize allocAlignment,
5596  bool upperAddress,
5597  VmaSuballocationType allocType,
5598  bool canMakeOtherLost,
5599  uint32_t strategy,
5600  VmaAllocationRequest* pAllocationRequest);
5601 
5602  virtual bool MakeRequestedAllocationsLost(
5603  uint32_t currentFrameIndex,
5604  uint32_t frameInUseCount,
5605  VmaAllocationRequest* pAllocationRequest);
5606 
5607  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5608 
5609  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5610 
5611  virtual void Alloc(
5612  const VmaAllocationRequest& request,
5613  VmaSuballocationType type,
5614  VkDeviceSize allocSize,
5615  bool upperAddress,
5616  VmaAllocation hAllocation);
5617 
5618  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5619  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5620 
5621 private:
5622  static const VkDeviceSize MIN_NODE_SIZE = 32;
5623  static const size_t MAX_LEVELS = 30;
5624 
5625  struct ValidationContext
5626  {
5627  size_t calculatedAllocationCount;
5628  size_t calculatedFreeCount;
5629  VkDeviceSize calculatedSumFreeSize;
5630 
5631  ValidationContext() :
5632  calculatedAllocationCount(0),
5633  calculatedFreeCount(0),
5634  calculatedSumFreeSize(0) { }
5635  };
5636 
5637  struct Node
5638  {
5639  VkDeviceSize offset;
5640  enum TYPE
5641  {
5642  TYPE_FREE,
5643  TYPE_ALLOCATION,
5644  TYPE_SPLIT,
5645  TYPE_COUNT
5646  } type;
5647  Node* parent;
5648  Node* buddy;
5649 
5650  union
5651  {
5652  struct
5653  {
5654  Node* prev;
5655  Node* next;
5656  } free;
5657  struct
5658  {
5659  VmaAllocation alloc;
5660  } allocation;
5661  struct
5662  {
5663  Node* leftChild;
5664  } split;
5665  };
5666  };
5667 
5668  // Size of the memory block aligned down to a power of two.
5669  VkDeviceSize m_UsableSize;
5670  uint32_t m_LevelCount;
5671 
5672  Node* m_Root;
5673  struct {
5674  Node* front;
5675  Node* back;
5676  } m_FreeList[MAX_LEVELS];
5677  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5678  size_t m_AllocationCount;
5679  // Number of nodes in the tree with type == TYPE_FREE.
5680  size_t m_FreeCount;
5681  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5682  VkDeviceSize m_SumFreeSize;
5683 
5684  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5685  void DeleteNode(Node* node);
5686  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5687  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5688  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5689  // Alloc passed just for validation. Can be null.
5690  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5691  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5692  // Adds node to the front of FreeList at given level.
5693  // node->type must be FREE.
5694  // node->free.prev, next can be undefined.
5695  void AddToFreeListFront(uint32_t level, Node* node);
5696  // Removes node from FreeList at given level.
5697  // node->type must be FREE.
5698  // node->free.prev, next stay untouched.
5699  void RemoveFromFreeList(uint32_t level, Node* node);
5700 
5701 #if VMA_STATS_STRING_ENABLED
5702  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5703 #endif
5704 };
5705 
5706 /*
5707 Represents a single block of device memory (`VkDeviceMemory`) with all the
5708 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5709 
5710 Thread-safety: This class must be externally synchronized.
5711 */
5712 class VmaDeviceMemoryBlock
5713 {
5714  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5715 public:
5716  VmaBlockMetadata* m_pMetadata;
5717 
5718  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5719 
5720  ~VmaDeviceMemoryBlock()
5721  {
5722  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5723  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5724  }
5725 
5726  // Always call after construction.
5727  void Init(
5728  VmaAllocator hAllocator,
5729  uint32_t newMemoryTypeIndex,
5730  VkDeviceMemory newMemory,
5731  VkDeviceSize newSize,
5732  uint32_t id,
5733  uint32_t algorithm);
5734  // Always call before destruction.
5735  void Destroy(VmaAllocator allocator);
5736 
5737  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5738  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5739  uint32_t GetId() const { return m_Id; }
5740  void* GetMappedData() const { return m_pMappedData; }
5741 
5742  // Validates all data structures inside this object. If not valid, returns false.
5743  bool Validate() const;
5744 
5745  VkResult CheckCorruption(VmaAllocator hAllocator);
5746 
5747  // ppData can be null.
5748  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5749  void Unmap(VmaAllocator hAllocator, uint32_t count);
5750 
5751  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5752  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5753 
5754  VkResult BindBufferMemory(
5755  const VmaAllocator hAllocator,
5756  const VmaAllocation hAllocation,
5757  VkBuffer hBuffer);
5758  VkResult BindImageMemory(
5759  const VmaAllocator hAllocator,
5760  const VmaAllocation hAllocation,
5761  VkImage hImage);
5762 
5763 private:
5764  uint32_t m_MemoryTypeIndex;
5765  uint32_t m_Id;
5766  VkDeviceMemory m_hMemory;
5767 
5768  /*
5769  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5770  Also protects m_MapCount, m_pMappedData.
5771  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5772  */
5773  VMA_MUTEX m_Mutex;
5774  uint32_t m_MapCount;
5775  void* m_pMappedData;
5776 };
5777 
5778 struct VmaPointerLess
5779 {
5780  bool operator()(const void* lhs, const void* rhs) const
5781  {
5782  return lhs < rhs;
5783  }
5784 };
5785 
5786 struct VmaDefragmentationMove
5787 {
5788  size_t srcBlockIndex;
5789  size_t dstBlockIndex;
5790  VkDeviceSize srcOffset;
5791  VkDeviceSize dstOffset;
5792  VkDeviceSize size;
5793 };
5794 
5795 class VmaDefragmentationAlgorithm;
5796 
5797 /*
5798 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5799 Vulkan memory type.
5800 
5801 Synchronized internally with a mutex.
5802 */
5803 struct VmaBlockVector
5804 {
5805  VMA_CLASS_NO_COPY(VmaBlockVector)
5806 public:
5807  VmaBlockVector(
5808  VmaAllocator hAllocator,
5809  uint32_t memoryTypeIndex,
5810  VkDeviceSize preferredBlockSize,
5811  size_t minBlockCount,
5812  size_t maxBlockCount,
5813  VkDeviceSize bufferImageGranularity,
5814  uint32_t frameInUseCount,
5815  bool isCustomPool,
5816  bool explicitBlockSize,
5817  uint32_t algorithm);
5818  ~VmaBlockVector();
5819 
5820  VkResult CreateMinBlocks();
5821 
5822  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5823  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5824  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5825  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5826  uint32_t GetAlgorithm() const { return m_Algorithm; }
5827 
5828  void GetPoolStats(VmaPoolStats* pStats);
5829 
5830  bool IsEmpty() const { return m_Blocks.empty(); }
5831  bool IsCorruptionDetectionEnabled() const;
5832 
5833  VkResult Allocate(
5834  VmaPool hCurrentPool,
5835  uint32_t currentFrameIndex,
5836  VkDeviceSize size,
5837  VkDeviceSize alignment,
5838  const VmaAllocationCreateInfo& createInfo,
5839  VmaSuballocationType suballocType,
5840  size_t allocationCount,
5841  VmaAllocation* pAllocations);
5842 
5843  void Free(
5844  VmaAllocation hAllocation);
5845 
5846  // Adds statistics of this BlockVector to pStats.
5847  void AddStats(VmaStats* pStats);
5848 
5849 #if VMA_STATS_STRING_ENABLED
5850  void PrintDetailedMap(class VmaJsonWriter& json);
5851 #endif
5852 
5853  void MakePoolAllocationsLost(
5854  uint32_t currentFrameIndex,
5855  size_t* pLostAllocationCount);
5856  VkResult CheckCorruption();
5857 
5858  // Saves results in pCtx->res.
5859  void Defragment(
5860  class VmaBlockVectorDefragmentationContext* pCtx,
5861  VmaDefragmentationStats* pStats,
5862  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
5863  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
5864  VkCommandBuffer commandBuffer);
5865  void DefragmentationEnd(
5866  class VmaBlockVectorDefragmentationContext* pCtx,
5867  VmaDefragmentationStats* pStats);
5868 
5870  // To be used only while the m_Mutex is locked. Used during defragmentation.
5871 
5872  size_t GetBlockCount() const { return m_Blocks.size(); }
5873  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
5874  size_t CalcAllocationCount() const;
5875  bool IsBufferImageGranularityConflictPossible() const;
5876 
5877 private:
5878  friend class VmaDefragmentationAlgorithm_Generic;
5879 
5880  const VmaAllocator m_hAllocator;
5881  const uint32_t m_MemoryTypeIndex;
5882  const VkDeviceSize m_PreferredBlockSize;
5883  const size_t m_MinBlockCount;
5884  const size_t m_MaxBlockCount;
5885  const VkDeviceSize m_BufferImageGranularity;
5886  const uint32_t m_FrameInUseCount;
5887  const bool m_IsCustomPool;
5888  const bool m_ExplicitBlockSize;
5889  const uint32_t m_Algorithm;
5890  /* There can be at most one allocation that is completely empty - a
5891  hysteresis to avoid pessimistic case of alternating creation and destruction
5892  of a VkDeviceMemory. */
5893  bool m_HasEmptyBlock;
5894  VMA_RW_MUTEX m_Mutex;
5895  // Incrementally sorted by sumFreeSize, ascending.
5896  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5897  uint32_t m_NextBlockId;
5898 
5899  VkDeviceSize CalcMaxBlockSize() const;
5900 
5901  // Finds and removes given block from vector.
5902  void Remove(VmaDeviceMemoryBlock* pBlock);
5903 
5904  // Performs single step in sorting m_Blocks. They may not be fully sorted
5905  // after this call.
5906  void IncrementallySortBlocks();
5907 
5908  VkResult AllocatePage(
5909  VmaPool hCurrentPool,
5910  uint32_t currentFrameIndex,
5911  VkDeviceSize size,
5912  VkDeviceSize alignment,
5913  const VmaAllocationCreateInfo& createInfo,
5914  VmaSuballocationType suballocType,
5915  VmaAllocation* pAllocation);
5916 
5917  // To be used only without CAN_MAKE_OTHER_LOST flag.
5918  VkResult AllocateFromBlock(
5919  VmaDeviceMemoryBlock* pBlock,
5920  VmaPool hCurrentPool,
5921  uint32_t currentFrameIndex,
5922  VkDeviceSize size,
5923  VkDeviceSize alignment,
5924  VmaAllocationCreateFlags allocFlags,
5925  void* pUserData,
5926  VmaSuballocationType suballocType,
5927  uint32_t strategy,
5928  VmaAllocation* pAllocation);
5929 
5930  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5931 
5932  // Saves result to pCtx->res.
5933  void ApplyDefragmentationMovesCpu(
5934  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5935  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
5936  // Saves result to pCtx->res.
5937  void ApplyDefragmentationMovesGpu(
5938  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5939  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5940  VkCommandBuffer commandBuffer);
5941 
5942  /*
5943  Used during defragmentation. pDefragmentationStats is optional. It's in/out
5944  - updated with new data.
5945  */
5946  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
5947 };
5948 
5949 struct VmaPool_T
5950 {
5951  VMA_CLASS_NO_COPY(VmaPool_T)
5952 public:
5953  VmaBlockVector m_BlockVector;
5954 
5955  VmaPool_T(
5956  VmaAllocator hAllocator,
5957  const VmaPoolCreateInfo& createInfo,
5958  VkDeviceSize preferredBlockSize);
5959  ~VmaPool_T();
5960 
5961  uint32_t GetId() const { return m_Id; }
5962  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5963 
5964 #if VMA_STATS_STRING_ENABLED
5965  //void PrintDetailedMap(class VmaStringBuilder& sb);
5966 #endif
5967 
5968 private:
5969  uint32_t m_Id;
5970 };
5971 
5972 /*
5973 Performs defragmentation:
5974 
5975 - Updates `pBlockVector->m_pMetadata`.
5976 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
5977 - Does not move actual data, only returns requested moves as `moves`.
5978 */
5979 class VmaDefragmentationAlgorithm
5980 {
5981  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
5982 public:
5983  VmaDefragmentationAlgorithm(
5984  VmaAllocator hAllocator,
5985  VmaBlockVector* pBlockVector,
5986  uint32_t currentFrameIndex) :
5987  m_hAllocator(hAllocator),
5988  m_pBlockVector(pBlockVector),
5989  m_CurrentFrameIndex(currentFrameIndex)
5990  {
5991  }
5992  virtual ~VmaDefragmentationAlgorithm()
5993  {
5994  }
5995 
5996  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
5997  virtual void AddAll() = 0;
5998 
5999  virtual VkResult Defragment(
6000  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6001  VkDeviceSize maxBytesToMove,
6002  uint32_t maxAllocationsToMove) = 0;
6003 
6004  virtual VkDeviceSize GetBytesMoved() const = 0;
6005  virtual uint32_t GetAllocationsMoved() const = 0;
6006 
6007 protected:
6008  VmaAllocator const m_hAllocator;
6009  VmaBlockVector* const m_pBlockVector;
6010  const uint32_t m_CurrentFrameIndex;
6011 
6012  struct AllocationInfo
6013  {
6014  VmaAllocation m_hAllocation;
6015  VkBool32* m_pChanged;
6016 
6017  AllocationInfo() :
6018  m_hAllocation(VK_NULL_HANDLE),
6019  m_pChanged(VMA_NULL)
6020  {
6021  }
6022  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6023  m_hAllocation(hAlloc),
6024  m_pChanged(pChanged)
6025  {
6026  }
6027  };
6028 };
6029 
6030 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6031 {
6032  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6033 public:
6034  VmaDefragmentationAlgorithm_Generic(
6035  VmaAllocator hAllocator,
6036  VmaBlockVector* pBlockVector,
6037  uint32_t currentFrameIndex,
6038  bool overlappingMoveSupported);
6039  virtual ~VmaDefragmentationAlgorithm_Generic();
6040 
6041  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6042  virtual void AddAll() { m_AllAllocations = true; }
6043 
6044  virtual VkResult Defragment(
6045  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6046  VkDeviceSize maxBytesToMove,
6047  uint32_t maxAllocationsToMove);
6048 
6049  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6050  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6051 
6052 private:
6053  uint32_t m_AllocationCount;
6054  bool m_AllAllocations;
6055 
6056  VkDeviceSize m_BytesMoved;
6057  uint32_t m_AllocationsMoved;
6058 
6059  struct AllocationInfoSizeGreater
6060  {
6061  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6062  {
6063  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6064  }
6065  };
6066 
6067  struct AllocationInfoOffsetGreater
6068  {
6069  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6070  {
6071  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6072  }
6073  };
6074 
6075  struct BlockInfo
6076  {
6077  size_t m_OriginalBlockIndex;
6078  VmaDeviceMemoryBlock* m_pBlock;
6079  bool m_HasNonMovableAllocations;
6080  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6081 
6082  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6083  m_OriginalBlockIndex(SIZE_MAX),
6084  m_pBlock(VMA_NULL),
6085  m_HasNonMovableAllocations(true),
6086  m_Allocations(pAllocationCallbacks)
6087  {
6088  }
6089 
6090  void CalcHasNonMovableAllocations()
6091  {
6092  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6093  const size_t defragmentAllocCount = m_Allocations.size();
6094  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6095  }
6096 
6097  void SortAllocationsBySizeDescending()
6098  {
6099  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6100  }
6101 
6102  void SortAllocationsByOffsetDescending()
6103  {
6104  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6105  }
6106  };
6107 
6108  struct BlockPointerLess
6109  {
6110  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6111  {
6112  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6113  }
6114  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6115  {
6116  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6117  }
6118  };
6119 
6120  // 1. Blocks with some non-movable allocations go first.
6121  // 2. Blocks with smaller sumFreeSize go first.
6122  struct BlockInfoCompareMoveDestination
6123  {
6124  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6125  {
6126  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6127  {
6128  return true;
6129  }
6130  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6131  {
6132  return false;
6133  }
6134  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6135  {
6136  return true;
6137  }
6138  return false;
6139  }
6140  };
6141 
6142  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6143  BlockInfoVector m_Blocks;
6144 
6145  VkResult DefragmentRound(
6146  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6147  VkDeviceSize maxBytesToMove,
6148  uint32_t maxAllocationsToMove);
6149 
6150  size_t CalcBlocksWithNonMovableCount() const;
6151 
6152  static bool MoveMakesSense(
6153  size_t dstBlockIndex, VkDeviceSize dstOffset,
6154  size_t srcBlockIndex, VkDeviceSize srcOffset);
6155 };
6156 
6157 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6158 {
6159  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6160 public:
6161  VmaDefragmentationAlgorithm_Fast(
6162  VmaAllocator hAllocator,
6163  VmaBlockVector* pBlockVector,
6164  uint32_t currentFrameIndex,
6165  bool overlappingMoveSupported);
6166  virtual ~VmaDefragmentationAlgorithm_Fast();
6167 
6168  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6169  virtual void AddAll() { m_AllAllocations = true; }
6170 
6171  virtual VkResult Defragment(
6172  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6173  VkDeviceSize maxBytesToMove,
6174  uint32_t maxAllocationsToMove);
6175 
6176  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6177  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6178 
6179 private:
6180  struct BlockInfo
6181  {
6182  size_t origBlockIndex;
6183  };
6184 
6185  class FreeSpaceDatabase
6186  {
6187  public:
6188  FreeSpaceDatabase()
6189  {
6190  FreeSpace s = {};
6191  s.blockInfoIndex = SIZE_MAX;
6192  for(size_t i = 0; i < MAX_COUNT; ++i)
6193  {
6194  m_FreeSpaces[i] = s;
6195  }
6196  }
6197 
6198  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6199  {
6200  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6201  {
6202  return;
6203  }
6204 
6205  // Find first invalid or the smallest structure.
6206  size_t bestIndex = SIZE_MAX;
6207  for(size_t i = 0; i < MAX_COUNT; ++i)
6208  {
6209  // Empty structure.
6210  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6211  {
6212  bestIndex = i;
6213  break;
6214  }
6215  if(m_FreeSpaces[i].size < size &&
6216  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6217  {
6218  bestIndex = i;
6219  }
6220  }
6221 
6222  if(bestIndex != SIZE_MAX)
6223  {
6224  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6225  m_FreeSpaces[bestIndex].offset = offset;
6226  m_FreeSpaces[bestIndex].size = size;
6227  }
6228  }
6229 
6230  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6231  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6232  {
6233  size_t bestIndex = SIZE_MAX;
6234  VkDeviceSize bestFreeSpaceAfter = 0;
6235  for(size_t i = 0; i < MAX_COUNT; ++i)
6236  {
6237  // Structure is valid.
6238  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6239  {
6240  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6241  // Allocation fits into this structure.
6242  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6243  {
6244  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6245  (dstOffset + size);
6246  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6247  {
6248  bestIndex = i;
6249  bestFreeSpaceAfter = freeSpaceAfter;
6250  }
6251  }
6252  }
6253  }
6254 
6255  if(bestIndex != SIZE_MAX)
6256  {
6257  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6258  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6259 
6260  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6261  {
6262  // Leave this structure for remaining empty space.
6263  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6264  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6265  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6266  }
6267  else
6268  {
6269  // This structure becomes invalid.
6270  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6271  }
6272 
6273  return true;
6274  }
6275 
6276  return false;
6277  }
6278 
6279  private:
6280  static const size_t MAX_COUNT = 4;
6281 
6282  struct FreeSpace
6283  {
6284  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6285  VkDeviceSize offset;
6286  VkDeviceSize size;
6287  } m_FreeSpaces[MAX_COUNT];
6288  };
6289 
6290  const bool m_OverlappingMoveSupported;
6291 
6292  uint32_t m_AllocationCount;
6293  bool m_AllAllocations;
6294 
6295  VkDeviceSize m_BytesMoved;
6296  uint32_t m_AllocationsMoved;
6297 
6298  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6299 
6300  void PreprocessMetadata();
6301  void PostprocessMetadata();
6302  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6303 };
6304 
6305 struct VmaBlockDefragmentationContext
6306 {
6307  enum BLOCK_FLAG
6308  {
6309  BLOCK_FLAG_USED = 0x00000001,
6310  };
6311  uint32_t flags;
6312  VkBuffer hBuffer;
6313 
6314  VmaBlockDefragmentationContext() :
6315  flags(0),
6316  hBuffer(VK_NULL_HANDLE)
6317  {
6318  }
6319 };
6320 
6321 class VmaBlockVectorDefragmentationContext
6322 {
6323  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6324 public:
6325  VkResult res;
6326  bool mutexLocked;
6327  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6328 
6329  VmaBlockVectorDefragmentationContext(
6330  VmaAllocator hAllocator,
6331  VmaPool hCustomPool, // Optional.
6332  VmaBlockVector* pBlockVector,
6333  uint32_t currFrameIndex,
6334  uint32_t flags);
6335  ~VmaBlockVectorDefragmentationContext();
6336 
6337  VmaPool GetCustomPool() const { return m_hCustomPool; }
6338  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6339  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6340 
6341  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6342  void AddAll() { m_AllAllocations = true; }
6343 
6344  void Begin(bool overlappingMoveSupported);
6345 
6346 private:
6347  const VmaAllocator m_hAllocator;
6348  // Null if not from custom pool.
6349  const VmaPool m_hCustomPool;
6350  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6351  VmaBlockVector* const m_pBlockVector;
6352  const uint32_t m_CurrFrameIndex;
6353  const uint32_t m_AlgorithmFlags;
6354  // Owner of this object.
6355  VmaDefragmentationAlgorithm* m_pAlgorithm;
6356 
6357  struct AllocInfo
6358  {
6359  VmaAllocation hAlloc;
6360  VkBool32* pChanged;
6361  };
6362  // Used between constructor and Begin.
6363  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6364  bool m_AllAllocations;
6365 };
6366 
6367 struct VmaDefragmentationContext_T
6368 {
6369 private:
6370  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6371 public:
6372  VmaDefragmentationContext_T(
6373  VmaAllocator hAllocator,
6374  uint32_t currFrameIndex,
6375  uint32_t flags,
6376  VmaDefragmentationStats* pStats);
6377  ~VmaDefragmentationContext_T();
6378 
6379  void AddPools(uint32_t poolCount, VmaPool* pPools);
6380  void AddAllocations(
6381  uint32_t allocationCount,
6382  VmaAllocation* pAllocations,
6383  VkBool32* pAllocationsChanged);
6384 
6385  /*
6386  Returns:
6387  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6388  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6389  - Negative value if error occured and object can be destroyed immediately.
6390  */
6391  VkResult Defragment(
6392  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6393  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6394  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6395 
6396 private:
6397  const VmaAllocator m_hAllocator;
6398  const uint32_t m_CurrFrameIndex;
6399  const uint32_t m_Flags;
6400  VmaDefragmentationStats* const m_pStats;
6401  // Owner of these objects.
6402  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6403  // Owner of these objects.
6404  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6405 };
6406 
6407 #if VMA_RECORDING_ENABLED
6408 
6409 class VmaRecorder
6410 {
6411 public:
6412  VmaRecorder();
6413  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6414  void WriteConfiguration(
6415  const VkPhysicalDeviceProperties& devProps,
6416  const VkPhysicalDeviceMemoryProperties& memProps,
6417  bool dedicatedAllocationExtensionEnabled);
6418  ~VmaRecorder();
6419 
6420  void RecordCreateAllocator(uint32_t frameIndex);
6421  void RecordDestroyAllocator(uint32_t frameIndex);
6422  void RecordCreatePool(uint32_t frameIndex,
6423  const VmaPoolCreateInfo& createInfo,
6424  VmaPool pool);
6425  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6426  void RecordAllocateMemory(uint32_t frameIndex,
6427  const VkMemoryRequirements& vkMemReq,
6428  const VmaAllocationCreateInfo& createInfo,
6429  VmaAllocation allocation);
6430  void RecordAllocateMemoryPages(uint32_t frameIndex,
6431  const VkMemoryRequirements& vkMemReq,
6432  const VmaAllocationCreateInfo& createInfo,
6433  uint64_t allocationCount,
6434  const VmaAllocation* pAllocations);
6435  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6436  const VkMemoryRequirements& vkMemReq,
6437  bool requiresDedicatedAllocation,
6438  bool prefersDedicatedAllocation,
6439  const VmaAllocationCreateInfo& createInfo,
6440  VmaAllocation allocation);
6441  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6442  const VkMemoryRequirements& vkMemReq,
6443  bool requiresDedicatedAllocation,
6444  bool prefersDedicatedAllocation,
6445  const VmaAllocationCreateInfo& createInfo,
6446  VmaAllocation allocation);
6447  void RecordFreeMemory(uint32_t frameIndex,
6448  VmaAllocation allocation);
6449  void RecordFreeMemoryPages(uint32_t frameIndex,
6450  uint64_t allocationCount,
6451  const VmaAllocation* pAllocations);
6452  void RecordResizeAllocation(
6453  uint32_t frameIndex,
6454  VmaAllocation allocation,
6455  VkDeviceSize newSize);
6456  void RecordSetAllocationUserData(uint32_t frameIndex,
6457  VmaAllocation allocation,
6458  const void* pUserData);
6459  void RecordCreateLostAllocation(uint32_t frameIndex,
6460  VmaAllocation allocation);
6461  void RecordMapMemory(uint32_t frameIndex,
6462  VmaAllocation allocation);
6463  void RecordUnmapMemory(uint32_t frameIndex,
6464  VmaAllocation allocation);
6465  void RecordFlushAllocation(uint32_t frameIndex,
6466  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6467  void RecordInvalidateAllocation(uint32_t frameIndex,
6468  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6469  void RecordCreateBuffer(uint32_t frameIndex,
6470  const VkBufferCreateInfo& bufCreateInfo,
6471  const VmaAllocationCreateInfo& allocCreateInfo,
6472  VmaAllocation allocation);
6473  void RecordCreateImage(uint32_t frameIndex,
6474  const VkImageCreateInfo& imageCreateInfo,
6475  const VmaAllocationCreateInfo& allocCreateInfo,
6476  VmaAllocation allocation);
6477  void RecordDestroyBuffer(uint32_t frameIndex,
6478  VmaAllocation allocation);
6479  void RecordDestroyImage(uint32_t frameIndex,
6480  VmaAllocation allocation);
6481  void RecordTouchAllocation(uint32_t frameIndex,
6482  VmaAllocation allocation);
6483  void RecordGetAllocationInfo(uint32_t frameIndex,
6484  VmaAllocation allocation);
6485  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6486  VmaPool pool);
6487  void RecordDefragmentationBegin(uint32_t frameIndex,
6488  const VmaDefragmentationInfo2& info,
6490  void RecordDefragmentationEnd(uint32_t frameIndex,
6492 
6493 private:
6494  struct CallParams
6495  {
6496  uint32_t threadId;
6497  double time;
6498  };
6499 
6500  class UserDataString
6501  {
6502  public:
6503  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6504  const char* GetString() const { return m_Str; }
6505 
6506  private:
6507  char m_PtrStr[17];
6508  const char* m_Str;
6509  };
6510 
6511  bool m_UseMutex;
6512  VmaRecordFlags m_Flags;
6513  FILE* m_File;
6514  VMA_MUTEX m_FileMutex;
6515  int64_t m_Freq;
6516  int64_t m_StartCounter;
6517 
6518  void GetBasicParams(CallParams& outParams);
6519 
6520  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6521  template<typename T>
6522  void PrintPointerList(uint64_t count, const T* pItems)
6523  {
6524  if(count)
6525  {
6526  fprintf(m_File, "%p", pItems[0]);
6527  for(uint64_t i = 1; i < count; ++i)
6528  {
6529  fprintf(m_File, " %p", pItems[i]);
6530  }
6531  }
6532  }
6533 
6534  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6535  void Flush();
6536 };
6537 
6538 #endif // #if VMA_RECORDING_ENABLED
6539 
6540 // Main allocator object.
6541 struct VmaAllocator_T
6542 {
6543  VMA_CLASS_NO_COPY(VmaAllocator_T)
6544 public:
6545  bool m_UseMutex;
6546  bool m_UseKhrDedicatedAllocation;
6547  VkDevice m_hDevice;
6548  bool m_AllocationCallbacksSpecified;
6549  VkAllocationCallbacks m_AllocationCallbacks;
6550  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6551 
6552  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
6553  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6554  VMA_MUTEX m_HeapSizeLimitMutex;
6555 
6556  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6557  VkPhysicalDeviceMemoryProperties m_MemProps;
6558 
6559  // Default pools.
6560  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6561 
6562  // Each vector is sorted by memory (handle value).
6563  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6564  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6565  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6566 
6567  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6568  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6569  ~VmaAllocator_T();
6570 
6571  const VkAllocationCallbacks* GetAllocationCallbacks() const
6572  {
6573  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6574  }
6575  const VmaVulkanFunctions& GetVulkanFunctions() const
6576  {
6577  return m_VulkanFunctions;
6578  }
6579 
6580  VkDeviceSize GetBufferImageGranularity() const
6581  {
6582  return VMA_MAX(
6583  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6584  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6585  }
6586 
6587  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
6588  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6589 
6590  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6591  {
6592  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6593  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6594  }
6595  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
6596  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6597  {
6598  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6599  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6600  }
6601  // Minimum alignment for all allocations in specific memory type.
6602  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6603  {
6604  return IsMemoryTypeNonCoherent(memTypeIndex) ?
6605  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6606  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6607  }
6608 
6609  bool IsIntegratedGpu() const
6610  {
6611  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6612  }
6613 
6614 #if VMA_RECORDING_ENABLED
6615  VmaRecorder* GetRecorder() const { return m_pRecorder; }
6616 #endif
6617 
6618  void GetBufferMemoryRequirements(
6619  VkBuffer hBuffer,
6620  VkMemoryRequirements& memReq,
6621  bool& requiresDedicatedAllocation,
6622  bool& prefersDedicatedAllocation) const;
6623  void GetImageMemoryRequirements(
6624  VkImage hImage,
6625  VkMemoryRequirements& memReq,
6626  bool& requiresDedicatedAllocation,
6627  bool& prefersDedicatedAllocation) const;
6628 
6629  // Main allocation function.
6630  VkResult AllocateMemory(
6631  const VkMemoryRequirements& vkMemReq,
6632  bool requiresDedicatedAllocation,
6633  bool prefersDedicatedAllocation,
6634  VkBuffer dedicatedBuffer,
6635  VkImage dedicatedImage,
6636  const VmaAllocationCreateInfo& createInfo,
6637  VmaSuballocationType suballocType,
6638  size_t allocationCount,
6639  VmaAllocation* pAllocations);
6640 
6641  // Main deallocation function.
6642  void FreeMemory(
6643  size_t allocationCount,
6644  const VmaAllocation* pAllocations);
6645 
6646  VkResult ResizeAllocation(
6647  const VmaAllocation alloc,
6648  VkDeviceSize newSize);
6649 
6650  void CalculateStats(VmaStats* pStats);
6651 
6652 #if VMA_STATS_STRING_ENABLED
6653  void PrintDetailedMap(class VmaJsonWriter& json);
6654 #endif
6655 
6656  VkResult DefragmentationBegin(
6657  const VmaDefragmentationInfo2& info,
6658  VmaDefragmentationStats* pStats,
6659  VmaDefragmentationContext* pContext);
6660  VkResult DefragmentationEnd(
6661  VmaDefragmentationContext context);
6662 
6663  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
6664  bool TouchAllocation(VmaAllocation hAllocation);
6665 
6666  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
6667  void DestroyPool(VmaPool pool);
6668  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
6669 
6670  void SetCurrentFrameIndex(uint32_t frameIndex);
6671  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6672 
6673  void MakePoolAllocationsLost(
6674  VmaPool hPool,
6675  size_t* pLostAllocationCount);
6676  VkResult CheckPoolCorruption(VmaPool hPool);
6677  VkResult CheckCorruption(uint32_t memoryTypeBits);
6678 
6679  void CreateLostAllocation(VmaAllocation* pAllocation);
6680 
6681  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6682  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6683 
6684  VkResult Map(VmaAllocation hAllocation, void** ppData);
6685  void Unmap(VmaAllocation hAllocation);
6686 
6687  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
6688  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
6689 
6690  void FlushOrInvalidateAllocation(
6691  VmaAllocation hAllocation,
6692  VkDeviceSize offset, VkDeviceSize size,
6693  VMA_CACHE_OPERATION op);
6694 
6695  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
6696 
6697 private:
6698  VkDeviceSize m_PreferredLargeHeapBlockSize;
6699 
6700  VkPhysicalDevice m_PhysicalDevice;
6701  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6702 
6703  VMA_RW_MUTEX m_PoolsMutex;
6704  // Protected by m_PoolsMutex. Sorted by pointer value.
6705  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6706  uint32_t m_NextPoolId;
6707 
6708  VmaVulkanFunctions m_VulkanFunctions;
6709 
6710 #if VMA_RECORDING_ENABLED
6711  VmaRecorder* m_pRecorder;
6712 #endif
6713 
6714  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
6715 
6716  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6717 
6718  VkResult AllocateMemoryOfType(
6719  VkDeviceSize size,
6720  VkDeviceSize alignment,
6721  bool dedicatedAllocation,
6722  VkBuffer dedicatedBuffer,
6723  VkImage dedicatedImage,
6724  const VmaAllocationCreateInfo& createInfo,
6725  uint32_t memTypeIndex,
6726  VmaSuballocationType suballocType,
6727  size_t allocationCount,
6728  VmaAllocation* pAllocations);
6729 
6730  // Helper function only to be used inside AllocateDedicatedMemory.
6731  VkResult AllocateDedicatedMemoryPage(
6732  VkDeviceSize size,
6733  VmaSuballocationType suballocType,
6734  uint32_t memTypeIndex,
6735  const VkMemoryAllocateInfo& allocInfo,
6736  bool map,
6737  bool isUserDataString,
6738  void* pUserData,
6739  VmaAllocation* pAllocation);
6740 
6741  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
6742  VkResult AllocateDedicatedMemory(
6743  VkDeviceSize size,
6744  VmaSuballocationType suballocType,
6745  uint32_t memTypeIndex,
6746  bool map,
6747  bool isUserDataString,
6748  void* pUserData,
6749  VkBuffer dedicatedBuffer,
6750  VkImage dedicatedImage,
6751  size_t allocationCount,
6752  VmaAllocation* pAllocations);
6753 
6754  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
6755  void FreeDedicatedMemory(VmaAllocation allocation);
6756 };
6757 
6759 // Memory allocation #2 after VmaAllocator_T definition
6760 
6761 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
6762 {
6763  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6764 }
6765 
6766 static void VmaFree(VmaAllocator hAllocator, void* ptr)
6767 {
6768  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6769 }
6770 
6771 template<typename T>
6772 static T* VmaAllocate(VmaAllocator hAllocator)
6773 {
6774  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
6775 }
6776 
6777 template<typename T>
6778 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
6779 {
6780  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
6781 }
6782 
6783 template<typename T>
6784 static void vma_delete(VmaAllocator hAllocator, T* ptr)
6785 {
6786  if(ptr != VMA_NULL)
6787  {
6788  ptr->~T();
6789  VmaFree(hAllocator, ptr);
6790  }
6791 }
6792 
6793 template<typename T>
6794 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6795 {
6796  if(ptr != VMA_NULL)
6797  {
6798  for(size_t i = count; i--; )
6799  ptr[i].~T();
6800  VmaFree(hAllocator, ptr);
6801  }
6802 }
6803 
6805 // VmaStringBuilder
6806 
6807 #if VMA_STATS_STRING_ENABLED
6808 
6809 class VmaStringBuilder
6810 {
6811 public:
6812  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
6813  size_t GetLength() const { return m_Data.size(); }
6814  const char* GetData() const { return m_Data.data(); }
6815 
6816  void Add(char ch) { m_Data.push_back(ch); }
6817  void Add(const char* pStr);
6818  void AddNewLine() { Add('\n'); }
6819  void AddNumber(uint32_t num);
6820  void AddNumber(uint64_t num);
6821  void AddPointer(const void* ptr);
6822 
6823 private:
6824  VmaVector< char, VmaStlAllocator<char> > m_Data;
6825 };
6826 
6827 void VmaStringBuilder::Add(const char* pStr)
6828 {
6829  const size_t strLen = strlen(pStr);
6830  if(strLen > 0)
6831  {
6832  const size_t oldCount = m_Data.size();
6833  m_Data.resize(oldCount + strLen);
6834  memcpy(m_Data.data() + oldCount, pStr, strLen);
6835  }
6836 }
6837 
6838 void VmaStringBuilder::AddNumber(uint32_t num)
6839 {
6840  char buf[11];
6841  VmaUint32ToStr(buf, sizeof(buf), num);
6842  Add(buf);
6843 }
6844 
6845 void VmaStringBuilder::AddNumber(uint64_t num)
6846 {
6847  char buf[21];
6848  VmaUint64ToStr(buf, sizeof(buf), num);
6849  Add(buf);
6850 }
6851 
6852 void VmaStringBuilder::AddPointer(const void* ptr)
6853 {
6854  char buf[21];
6855  VmaPtrToStr(buf, sizeof(buf), ptr);
6856  Add(buf);
6857 }
6858 
6859 #endif // #if VMA_STATS_STRING_ENABLED
6860 
6862 // VmaJsonWriter
6863 
6864 #if VMA_STATS_STRING_ENABLED
6865 
6866 class VmaJsonWriter
6867 {
6868  VMA_CLASS_NO_COPY(VmaJsonWriter)
6869 public:
6870  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6871  ~VmaJsonWriter();
6872 
6873  void BeginObject(bool singleLine = false);
6874  void EndObject();
6875 
6876  void BeginArray(bool singleLine = false);
6877  void EndArray();
6878 
6879  void WriteString(const char* pStr);
6880  void BeginString(const char* pStr = VMA_NULL);
6881  void ContinueString(const char* pStr);
6882  void ContinueString(uint32_t n);
6883  void ContinueString(uint64_t n);
6884  void ContinueString_Pointer(const void* ptr);
6885  void EndString(const char* pStr = VMA_NULL);
6886 
6887  void WriteNumber(uint32_t n);
6888  void WriteNumber(uint64_t n);
6889  void WriteBool(bool b);
6890  void WriteNull();
6891 
6892 private:
6893  static const char* const INDENT;
6894 
6895  enum COLLECTION_TYPE
6896  {
6897  COLLECTION_TYPE_OBJECT,
6898  COLLECTION_TYPE_ARRAY,
6899  };
6900  struct StackItem
6901  {
6902  COLLECTION_TYPE type;
6903  uint32_t valueCount;
6904  bool singleLineMode;
6905  };
6906 
6907  VmaStringBuilder& m_SB;
6908  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6909  bool m_InsideString;
6910 
6911  void BeginValue(bool isString);
6912  void WriteIndent(bool oneLess = false);
6913 };
6914 
6915 const char* const VmaJsonWriter::INDENT = " ";
6916 
6917 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6918  m_SB(sb),
6919  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6920  m_InsideString(false)
6921 {
6922 }
6923 
6924 VmaJsonWriter::~VmaJsonWriter()
6925 {
6926  VMA_ASSERT(!m_InsideString);
6927  VMA_ASSERT(m_Stack.empty());
6928 }
6929 
6930 void VmaJsonWriter::BeginObject(bool singleLine)
6931 {
6932  VMA_ASSERT(!m_InsideString);
6933 
6934  BeginValue(false);
6935  m_SB.Add('{');
6936 
6937  StackItem item;
6938  item.type = COLLECTION_TYPE_OBJECT;
6939  item.valueCount = 0;
6940  item.singleLineMode = singleLine;
6941  m_Stack.push_back(item);
6942 }
6943 
6944 void VmaJsonWriter::EndObject()
6945 {
6946  VMA_ASSERT(!m_InsideString);
6947 
6948  WriteIndent(true);
6949  m_SB.Add('}');
6950 
6951  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6952  m_Stack.pop_back();
6953 }
6954 
6955 void VmaJsonWriter::BeginArray(bool singleLine)
6956 {
6957  VMA_ASSERT(!m_InsideString);
6958 
6959  BeginValue(false);
6960  m_SB.Add('[');
6961 
6962  StackItem item;
6963  item.type = COLLECTION_TYPE_ARRAY;
6964  item.valueCount = 0;
6965  item.singleLineMode = singleLine;
6966  m_Stack.push_back(item);
6967 }
6968 
6969 void VmaJsonWriter::EndArray()
6970 {
6971  VMA_ASSERT(!m_InsideString);
6972 
6973  WriteIndent(true);
6974  m_SB.Add(']');
6975 
6976  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6977  m_Stack.pop_back();
6978 }
6979 
6980 void VmaJsonWriter::WriteString(const char* pStr)
6981 {
6982  BeginString(pStr);
6983  EndString();
6984 }
6985 
6986 void VmaJsonWriter::BeginString(const char* pStr)
6987 {
6988  VMA_ASSERT(!m_InsideString);
6989 
6990  BeginValue(true);
6991  m_SB.Add('"');
6992  m_InsideString = true;
6993  if(pStr != VMA_NULL && pStr[0] != '\0')
6994  {
6995  ContinueString(pStr);
6996  }
6997 }
6998 
6999 void VmaJsonWriter::ContinueString(const char* pStr)
7000 {
7001  VMA_ASSERT(m_InsideString);
7002 
7003  const size_t strLen = strlen(pStr);
7004  for(size_t i = 0; i < strLen; ++i)
7005  {
7006  char ch = pStr[i];
7007  if(ch == '\\')
7008  {
7009  m_SB.Add("\\\\");
7010  }
7011  else if(ch == '"')
7012  {
7013  m_SB.Add("\\\"");
7014  }
7015  else if(ch >= 32)
7016  {
7017  m_SB.Add(ch);
7018  }
7019  else switch(ch)
7020  {
7021  case '\b':
7022  m_SB.Add("\\b");
7023  break;
7024  case '\f':
7025  m_SB.Add("\\f");
7026  break;
7027  case '\n':
7028  m_SB.Add("\\n");
7029  break;
7030  case '\r':
7031  m_SB.Add("\\r");
7032  break;
7033  case '\t':
7034  m_SB.Add("\\t");
7035  break;
7036  default:
7037  VMA_ASSERT(0 && "Character not currently supported.");
7038  break;
7039  }
7040  }
7041 }
7042 
7043 void VmaJsonWriter::ContinueString(uint32_t n)
7044 {
7045  VMA_ASSERT(m_InsideString);
7046  m_SB.AddNumber(n);
7047 }
7048 
7049 void VmaJsonWriter::ContinueString(uint64_t n)
7050 {
7051  VMA_ASSERT(m_InsideString);
7052  m_SB.AddNumber(n);
7053 }
7054 
7055 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7056 {
7057  VMA_ASSERT(m_InsideString);
7058  m_SB.AddPointer(ptr);
7059 }
7060 
7061 void VmaJsonWriter::EndString(const char* pStr)
7062 {
7063  VMA_ASSERT(m_InsideString);
7064  if(pStr != VMA_NULL && pStr[0] != '\0')
7065  {
7066  ContinueString(pStr);
7067  }
7068  m_SB.Add('"');
7069  m_InsideString = false;
7070 }
7071 
7072 void VmaJsonWriter::WriteNumber(uint32_t n)
7073 {
7074  VMA_ASSERT(!m_InsideString);
7075  BeginValue(false);
7076  m_SB.AddNumber(n);
7077 }
7078 
7079 void VmaJsonWriter::WriteNumber(uint64_t n)
7080 {
7081  VMA_ASSERT(!m_InsideString);
7082  BeginValue(false);
7083  m_SB.AddNumber(n);
7084 }
7085 
7086 void VmaJsonWriter::WriteBool(bool b)
7087 {
7088  VMA_ASSERT(!m_InsideString);
7089  BeginValue(false);
7090  m_SB.Add(b ? "true" : "false");
7091 }
7092 
7093 void VmaJsonWriter::WriteNull()
7094 {
7095  VMA_ASSERT(!m_InsideString);
7096  BeginValue(false);
7097  m_SB.Add("null");
7098 }
7099 
7100 void VmaJsonWriter::BeginValue(bool isString)
7101 {
7102  if(!m_Stack.empty())
7103  {
7104  StackItem& currItem = m_Stack.back();
7105  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7106  currItem.valueCount % 2 == 0)
7107  {
7108  VMA_ASSERT(isString);
7109  }
7110 
7111  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7112  currItem.valueCount % 2 != 0)
7113  {
7114  m_SB.Add(": ");
7115  }
7116  else if(currItem.valueCount > 0)
7117  {
7118  m_SB.Add(", ");
7119  WriteIndent();
7120  }
7121  else
7122  {
7123  WriteIndent();
7124  }
7125  ++currItem.valueCount;
7126  }
7127 }
7128 
7129 void VmaJsonWriter::WriteIndent(bool oneLess)
7130 {
7131  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7132  {
7133  m_SB.AddNewLine();
7134 
7135  size_t count = m_Stack.size();
7136  if(count > 0 && oneLess)
7137  {
7138  --count;
7139  }
7140  for(size_t i = 0; i < count; ++i)
7141  {
7142  m_SB.Add(INDENT);
7143  }
7144  }
7145 }
7146 
7147 #endif // #if VMA_STATS_STRING_ENABLED
7148 
7150 
7151 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7152 {
7153  if(IsUserDataString())
7154  {
7155  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7156 
7157  FreeUserDataString(hAllocator);
7158 
7159  if(pUserData != VMA_NULL)
7160  {
7161  const char* const newStrSrc = (char*)pUserData;
7162  const size_t newStrLen = strlen(newStrSrc);
7163  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
7164  memcpy(newStrDst, newStrSrc, newStrLen + 1);
7165  m_pUserData = newStrDst;
7166  }
7167  }
7168  else
7169  {
7170  m_pUserData = pUserData;
7171  }
7172 }
7173 
7174 void VmaAllocation_T::ChangeBlockAllocation(
7175  VmaAllocator hAllocator,
7176  VmaDeviceMemoryBlock* block,
7177  VkDeviceSize offset)
7178 {
7179  VMA_ASSERT(block != VMA_NULL);
7180  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7181 
7182  // Move mapping reference counter from old block to new block.
7183  if(block != m_BlockAllocation.m_Block)
7184  {
7185  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7186  if(IsPersistentMap())
7187  ++mapRefCount;
7188  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7189  block->Map(hAllocator, mapRefCount, VMA_NULL);
7190  }
7191 
7192  m_BlockAllocation.m_Block = block;
7193  m_BlockAllocation.m_Offset = offset;
7194 }
7195 
7196 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
7197 {
7198  VMA_ASSERT(newSize > 0);
7199  m_Size = newSize;
7200 }
7201 
7202 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7203 {
7204  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7205  m_BlockAllocation.m_Offset = newOffset;
7206 }
7207 
7208 VkDeviceSize VmaAllocation_T::GetOffset() const
7209 {
7210  switch(m_Type)
7211  {
7212  case ALLOCATION_TYPE_BLOCK:
7213  return m_BlockAllocation.m_Offset;
7214  case ALLOCATION_TYPE_DEDICATED:
7215  return 0;
7216  default:
7217  VMA_ASSERT(0);
7218  return 0;
7219  }
7220 }
7221 
7222 VkDeviceMemory VmaAllocation_T::GetMemory() const
7223 {
7224  switch(m_Type)
7225  {
7226  case ALLOCATION_TYPE_BLOCK:
7227  return m_BlockAllocation.m_Block->GetDeviceMemory();
7228  case ALLOCATION_TYPE_DEDICATED:
7229  return m_DedicatedAllocation.m_hMemory;
7230  default:
7231  VMA_ASSERT(0);
7232  return VK_NULL_HANDLE;
7233  }
7234 }
7235 
7236 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7237 {
7238  switch(m_Type)
7239  {
7240  case ALLOCATION_TYPE_BLOCK:
7241  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7242  case ALLOCATION_TYPE_DEDICATED:
7243  return m_DedicatedAllocation.m_MemoryTypeIndex;
7244  default:
7245  VMA_ASSERT(0);
7246  return UINT32_MAX;
7247  }
7248 }
7249 
7250 void* VmaAllocation_T::GetMappedData() const
7251 {
7252  switch(m_Type)
7253  {
7254  case ALLOCATION_TYPE_BLOCK:
7255  if(m_MapCount != 0)
7256  {
7257  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7258  VMA_ASSERT(pBlockData != VMA_NULL);
7259  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7260  }
7261  else
7262  {
7263  return VMA_NULL;
7264  }
7265  break;
7266  case ALLOCATION_TYPE_DEDICATED:
7267  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7268  return m_DedicatedAllocation.m_pMappedData;
7269  default:
7270  VMA_ASSERT(0);
7271  return VMA_NULL;
7272  }
7273 }
7274 
7275 bool VmaAllocation_T::CanBecomeLost() const
7276 {
7277  switch(m_Type)
7278  {
7279  case ALLOCATION_TYPE_BLOCK:
7280  return m_BlockAllocation.m_CanBecomeLost;
7281  case ALLOCATION_TYPE_DEDICATED:
7282  return false;
7283  default:
7284  VMA_ASSERT(0);
7285  return false;
7286  }
7287 }
7288 
7289 VmaPool VmaAllocation_T::GetPool() const
7290 {
7291  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7292  return m_BlockAllocation.m_hPool;
7293 }
7294 
7295 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7296 {
7297  VMA_ASSERT(CanBecomeLost());
7298 
7299  /*
7300  Warning: This is a carefully designed algorithm.
7301  Do not modify unless you really know what you're doing :)
7302  */
7303  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7304  for(;;)
7305  {
7306  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7307  {
7308  VMA_ASSERT(0);
7309  return false;
7310  }
7311  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7312  {
7313  return false;
7314  }
7315  else // Last use time earlier than current time.
7316  {
7317  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7318  {
7319  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7320  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7321  return true;
7322  }
7323  }
7324  }
7325 }
7326 
7327 #if VMA_STATS_STRING_ENABLED
7328 
7329 // Correspond to values of enum VmaSuballocationType.
7330 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7331  "FREE",
7332  "UNKNOWN",
7333  "BUFFER",
7334  "IMAGE_UNKNOWN",
7335  "IMAGE_LINEAR",
7336  "IMAGE_OPTIMAL",
7337 };
7338 
7339 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7340 {
7341  json.WriteString("Type");
7342  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7343 
7344  json.WriteString("Size");
7345  json.WriteNumber(m_Size);
7346 
7347  if(m_pUserData != VMA_NULL)
7348  {
7349  json.WriteString("UserData");
7350  if(IsUserDataString())
7351  {
7352  json.WriteString((const char*)m_pUserData);
7353  }
7354  else
7355  {
7356  json.BeginString();
7357  json.ContinueString_Pointer(m_pUserData);
7358  json.EndString();
7359  }
7360  }
7361 
7362  json.WriteString("CreationFrameIndex");
7363  json.WriteNumber(m_CreationFrameIndex);
7364 
7365  json.WriteString("LastUseFrameIndex");
7366  json.WriteNumber(GetLastUseFrameIndex());
7367 
7368  if(m_BufferImageUsage != 0)
7369  {
7370  json.WriteString("Usage");
7371  json.WriteNumber(m_BufferImageUsage);
7372  }
7373 }
7374 
7375 #endif
7376 
7377 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7378 {
7379  VMA_ASSERT(IsUserDataString());
7380  if(m_pUserData != VMA_NULL)
7381  {
7382  char* const oldStr = (char*)m_pUserData;
7383  const size_t oldStrLen = strlen(oldStr);
7384  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7385  m_pUserData = VMA_NULL;
7386  }
7387 }
7388 
7389 void VmaAllocation_T::BlockAllocMap()
7390 {
7391  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7392 
7393  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7394  {
7395  ++m_MapCount;
7396  }
7397  else
7398  {
7399  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7400  }
7401 }
7402 
7403 void VmaAllocation_T::BlockAllocUnmap()
7404 {
7405  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7406 
7407  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7408  {
7409  --m_MapCount;
7410  }
7411  else
7412  {
7413  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7414  }
7415 }
7416 
7417 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7418 {
7419  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7420 
7421  if(m_MapCount != 0)
7422  {
7423  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7424  {
7425  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7426  *ppData = m_DedicatedAllocation.m_pMappedData;
7427  ++m_MapCount;
7428  return VK_SUCCESS;
7429  }
7430  else
7431  {
7432  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7433  return VK_ERROR_MEMORY_MAP_FAILED;
7434  }
7435  }
7436  else
7437  {
7438  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7439  hAllocator->m_hDevice,
7440  m_DedicatedAllocation.m_hMemory,
7441  0, // offset
7442  VK_WHOLE_SIZE,
7443  0, // flags
7444  ppData);
7445  if(result == VK_SUCCESS)
7446  {
7447  m_DedicatedAllocation.m_pMappedData = *ppData;
7448  m_MapCount = 1;
7449  }
7450  return result;
7451  }
7452 }
7453 
7454 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7455 {
7456  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7457 
7458  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7459  {
7460  --m_MapCount;
7461  if(m_MapCount == 0)
7462  {
7463  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7464  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7465  hAllocator->m_hDevice,
7466  m_DedicatedAllocation.m_hMemory);
7467  }
7468  }
7469  else
7470  {
7471  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7472  }
7473 }
7474 
7475 #if VMA_STATS_STRING_ENABLED
7476 
7477 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7478 {
7479  json.BeginObject();
7480 
7481  json.WriteString("Blocks");
7482  json.WriteNumber(stat.blockCount);
7483 
7484  json.WriteString("Allocations");
7485  json.WriteNumber(stat.allocationCount);
7486 
7487  json.WriteString("UnusedRanges");
7488  json.WriteNumber(stat.unusedRangeCount);
7489 
7490  json.WriteString("UsedBytes");
7491  json.WriteNumber(stat.usedBytes);
7492 
7493  json.WriteString("UnusedBytes");
7494  json.WriteNumber(stat.unusedBytes);
7495 
7496  if(stat.allocationCount > 1)
7497  {
7498  json.WriteString("AllocationSize");
7499  json.BeginObject(true);
7500  json.WriteString("Min");
7501  json.WriteNumber(stat.allocationSizeMin);
7502  json.WriteString("Avg");
7503  json.WriteNumber(stat.allocationSizeAvg);
7504  json.WriteString("Max");
7505  json.WriteNumber(stat.allocationSizeMax);
7506  json.EndObject();
7507  }
7508 
7509  if(stat.unusedRangeCount > 1)
7510  {
7511  json.WriteString("UnusedRangeSize");
7512  json.BeginObject(true);
7513  json.WriteString("Min");
7514  json.WriteNumber(stat.unusedRangeSizeMin);
7515  json.WriteString("Avg");
7516  json.WriteNumber(stat.unusedRangeSizeAvg);
7517  json.WriteString("Max");
7518  json.WriteNumber(stat.unusedRangeSizeMax);
7519  json.EndObject();
7520  }
7521 
7522  json.EndObject();
7523 }
7524 
7525 #endif // #if VMA_STATS_STRING_ENABLED
7526 
7527 struct VmaSuballocationItemSizeLess
7528 {
7529  bool operator()(
7530  const VmaSuballocationList::iterator lhs,
7531  const VmaSuballocationList::iterator rhs) const
7532  {
7533  return lhs->size < rhs->size;
7534  }
7535  bool operator()(
7536  const VmaSuballocationList::iterator lhs,
7537  VkDeviceSize rhsSize) const
7538  {
7539  return lhs->size < rhsSize;
7540  }
7541 };
7542 
7543 
7545 // class VmaBlockMetadata
7546 
7547 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7548  m_Size(0),
7549  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7550 {
7551 }
7552 
7553 #if VMA_STATS_STRING_ENABLED
7554 
7555 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7556  VkDeviceSize unusedBytes,
7557  size_t allocationCount,
7558  size_t unusedRangeCount) const
7559 {
7560  json.BeginObject();
7561 
7562  json.WriteString("TotalBytes");
7563  json.WriteNumber(GetSize());
7564 
7565  json.WriteString("UnusedBytes");
7566  json.WriteNumber(unusedBytes);
7567 
7568  json.WriteString("Allocations");
7569  json.WriteNumber((uint64_t)allocationCount);
7570 
7571  json.WriteString("UnusedRanges");
7572  json.WriteNumber((uint64_t)unusedRangeCount);
7573 
7574  json.WriteString("Suballocations");
7575  json.BeginArray();
7576 }
7577 
7578 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
7579  VkDeviceSize offset,
7580  VmaAllocation hAllocation) const
7581 {
7582  json.BeginObject(true);
7583 
7584  json.WriteString("Offset");
7585  json.WriteNumber(offset);
7586 
7587  hAllocation->PrintParameters(json);
7588 
7589  json.EndObject();
7590 }
7591 
7592 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
7593  VkDeviceSize offset,
7594  VkDeviceSize size) const
7595 {
7596  json.BeginObject(true);
7597 
7598  json.WriteString("Offset");
7599  json.WriteNumber(offset);
7600 
7601  json.WriteString("Type");
7602  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7603 
7604  json.WriteString("Size");
7605  json.WriteNumber(size);
7606 
7607  json.EndObject();
7608 }
7609 
7610 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
7611 {
7612  json.EndArray();
7613  json.EndObject();
7614 }
7615 
7616 #endif // #if VMA_STATS_STRING_ENABLED
7617 
7619 // class VmaBlockMetadata_Generic
7620 
7621 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
7622  VmaBlockMetadata(hAllocator),
7623  m_FreeCount(0),
7624  m_SumFreeSize(0),
7625  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7626  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7627 {
7628 }
7629 
7630 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7631 {
7632 }
7633 
7634 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7635 {
7636  VmaBlockMetadata::Init(size);
7637 
7638  m_FreeCount = 1;
7639  m_SumFreeSize = size;
7640 
7641  VmaSuballocation suballoc = {};
7642  suballoc.offset = 0;
7643  suballoc.size = size;
7644  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7645  suballoc.hAllocation = VK_NULL_HANDLE;
7646 
7647  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7648  m_Suballocations.push_back(suballoc);
7649  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7650  --suballocItem;
7651  m_FreeSuballocationsBySize.push_back(suballocItem);
7652 }
7653 
7654 bool VmaBlockMetadata_Generic::Validate() const
7655 {
7656  VMA_VALIDATE(!m_Suballocations.empty());
7657 
7658  // Expected offset of new suballocation as calculated from previous ones.
7659  VkDeviceSize calculatedOffset = 0;
7660  // Expected number of free suballocations as calculated from traversing their list.
7661  uint32_t calculatedFreeCount = 0;
7662  // Expected sum size of free suballocations as calculated from traversing their list.
7663  VkDeviceSize calculatedSumFreeSize = 0;
7664  // Expected number of free suballocations that should be registered in
7665  // m_FreeSuballocationsBySize calculated from traversing their list.
7666  size_t freeSuballocationsToRegister = 0;
7667  // True if previous visited suballocation was free.
7668  bool prevFree = false;
7669 
7670  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7671  suballocItem != m_Suballocations.cend();
7672  ++suballocItem)
7673  {
7674  const VmaSuballocation& subAlloc = *suballocItem;
7675 
7676  // Actual offset of this suballocation doesn't match expected one.
7677  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7678 
7679  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7680  // Two adjacent free suballocations are invalid. They should be merged.
7681  VMA_VALIDATE(!prevFree || !currFree);
7682 
7683  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7684 
7685  if(currFree)
7686  {
7687  calculatedSumFreeSize += subAlloc.size;
7688  ++calculatedFreeCount;
7689  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7690  {
7691  ++freeSuballocationsToRegister;
7692  }
7693 
7694  // Margin required between allocations - every free space must be at least that large.
7695  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7696  }
7697  else
7698  {
7699  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7700  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7701 
7702  // Margin required between allocations - previous allocation must be free.
7703  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7704  }
7705 
7706  calculatedOffset += subAlloc.size;
7707  prevFree = currFree;
7708  }
7709 
7710  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
7711  // match expected one.
7712  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7713 
7714  VkDeviceSize lastSize = 0;
7715  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7716  {
7717  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7718 
7719  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
7720  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7721  // They must be sorted by size ascending.
7722  VMA_VALIDATE(suballocItem->size >= lastSize);
7723 
7724  lastSize = suballocItem->size;
7725  }
7726 
7727  // Check if totals match calculacted values.
7728  VMA_VALIDATE(ValidateFreeSuballocationList());
7729  VMA_VALIDATE(calculatedOffset == GetSize());
7730  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7731  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7732 
7733  return true;
7734 }
7735 
7736 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
7737 {
7738  if(!m_FreeSuballocationsBySize.empty())
7739  {
7740  return m_FreeSuballocationsBySize.back()->size;
7741  }
7742  else
7743  {
7744  return 0;
7745  }
7746 }
7747 
7748 bool VmaBlockMetadata_Generic::IsEmpty() const
7749 {
7750  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7751 }
7752 
7753 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7754 {
7755  outInfo.blockCount = 1;
7756 
7757  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7758  outInfo.allocationCount = rangeCount - m_FreeCount;
7759  outInfo.unusedRangeCount = m_FreeCount;
7760 
7761  outInfo.unusedBytes = m_SumFreeSize;
7762  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
7763 
7764  outInfo.allocationSizeMin = UINT64_MAX;
7765  outInfo.allocationSizeMax = 0;
7766  outInfo.unusedRangeSizeMin = UINT64_MAX;
7767  outInfo.unusedRangeSizeMax = 0;
7768 
7769  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7770  suballocItem != m_Suballocations.cend();
7771  ++suballocItem)
7772  {
7773  const VmaSuballocation& suballoc = *suballocItem;
7774  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7775  {
7776  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7777  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
7778  }
7779  else
7780  {
7781  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
7782  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
7783  }
7784  }
7785 }
7786 
7787 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
7788 {
7789  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7790 
7791  inoutStats.size += GetSize();
7792  inoutStats.unusedSize += m_SumFreeSize;
7793  inoutStats.allocationCount += rangeCount - m_FreeCount;
7794  inoutStats.unusedRangeCount += m_FreeCount;
7795  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
7796 }
7797 
7798 #if VMA_STATS_STRING_ENABLED
7799 
7800 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7801 {
7802  PrintDetailedMap_Begin(json,
7803  m_SumFreeSize, // unusedBytes
7804  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7805  m_FreeCount); // unusedRangeCount
7806 
7807  size_t i = 0;
7808  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7809  suballocItem != m_Suballocations.cend();
7810  ++suballocItem, ++i)
7811  {
7812  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7813  {
7814  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
7815  }
7816  else
7817  {
7818  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
7819  }
7820  }
7821 
7822  PrintDetailedMap_End(json);
7823 }
7824 
7825 #endif // #if VMA_STATS_STRING_ENABLED
7826 
7827 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7828  uint32_t currentFrameIndex,
7829  uint32_t frameInUseCount,
7830  VkDeviceSize bufferImageGranularity,
7831  VkDeviceSize allocSize,
7832  VkDeviceSize allocAlignment,
7833  bool upperAddress,
7834  VmaSuballocationType allocType,
7835  bool canMakeOtherLost,
7836  uint32_t strategy,
7837  VmaAllocationRequest* pAllocationRequest)
7838 {
7839  VMA_ASSERT(allocSize > 0);
7840  VMA_ASSERT(!upperAddress);
7841  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7842  VMA_ASSERT(pAllocationRequest != VMA_NULL);
7843  VMA_HEAVY_ASSERT(Validate());
7844 
7845  // There is not enough total free space in this block to fullfill the request: Early return.
7846  if(canMakeOtherLost == false &&
7847  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
7848  {
7849  return false;
7850  }
7851 
7852  // New algorithm, efficiently searching freeSuballocationsBySize.
7853  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7854  if(freeSuballocCount > 0)
7855  {
7857  {
7858  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7859  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7860  m_FreeSuballocationsBySize.data(),
7861  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7862  allocSize + 2 * VMA_DEBUG_MARGIN,
7863  VmaSuballocationItemSizeLess());
7864  size_t index = it - m_FreeSuballocationsBySize.data();
7865  for(; index < freeSuballocCount; ++index)
7866  {
7867  if(CheckAllocation(
7868  currentFrameIndex,
7869  frameInUseCount,
7870  bufferImageGranularity,
7871  allocSize,
7872  allocAlignment,
7873  allocType,
7874  m_FreeSuballocationsBySize[index],
7875  false, // canMakeOtherLost
7876  &pAllocationRequest->offset,
7877  &pAllocationRequest->itemsToMakeLostCount,
7878  &pAllocationRequest->sumFreeSize,
7879  &pAllocationRequest->sumItemSize))
7880  {
7881  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7882  return true;
7883  }
7884  }
7885  }
7886  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7887  {
7888  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7889  it != m_Suballocations.end();
7890  ++it)
7891  {
7892  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
7893  currentFrameIndex,
7894  frameInUseCount,
7895  bufferImageGranularity,
7896  allocSize,
7897  allocAlignment,
7898  allocType,
7899  it,
7900  false, // canMakeOtherLost
7901  &pAllocationRequest->offset,
7902  &pAllocationRequest->itemsToMakeLostCount,
7903  &pAllocationRequest->sumFreeSize,
7904  &pAllocationRequest->sumItemSize))
7905  {
7906  pAllocationRequest->item = it;
7907  return true;
7908  }
7909  }
7910  }
7911  else // WORST_FIT, FIRST_FIT
7912  {
7913  // Search staring from biggest suballocations.
7914  for(size_t index = freeSuballocCount; index--; )
7915  {
7916  if(CheckAllocation(
7917  currentFrameIndex,
7918  frameInUseCount,
7919  bufferImageGranularity,
7920  allocSize,
7921  allocAlignment,
7922  allocType,
7923  m_FreeSuballocationsBySize[index],
7924  false, // canMakeOtherLost
7925  &pAllocationRequest->offset,
7926  &pAllocationRequest->itemsToMakeLostCount,
7927  &pAllocationRequest->sumFreeSize,
7928  &pAllocationRequest->sumItemSize))
7929  {
7930  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7931  return true;
7932  }
7933  }
7934  }
7935  }
7936 
7937  if(canMakeOtherLost)
7938  {
7939  // Brute-force algorithm. TODO: Come up with something better.
7940 
7941  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
7942  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
7943 
7944  VmaAllocationRequest tmpAllocRequest = {};
7945  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
7946  suballocIt != m_Suballocations.end();
7947  ++suballocIt)
7948  {
7949  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
7950  suballocIt->hAllocation->CanBecomeLost())
7951  {
7952  if(CheckAllocation(
7953  currentFrameIndex,
7954  frameInUseCount,
7955  bufferImageGranularity,
7956  allocSize,
7957  allocAlignment,
7958  allocType,
7959  suballocIt,
7960  canMakeOtherLost,
7961  &tmpAllocRequest.offset,
7962  &tmpAllocRequest.itemsToMakeLostCount,
7963  &tmpAllocRequest.sumFreeSize,
7964  &tmpAllocRequest.sumItemSize))
7965  {
7966  tmpAllocRequest.item = suballocIt;
7967 
7968  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7970  {
7971  *pAllocationRequest = tmpAllocRequest;
7972  }
7973  }
7974  }
7975  }
7976 
7977  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7978  {
7979  return true;
7980  }
7981  }
7982 
7983  return false;
7984 }
7985 
7986 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7987  uint32_t currentFrameIndex,
7988  uint32_t frameInUseCount,
7989  VmaAllocationRequest* pAllocationRequest)
7990 {
7991  while(pAllocationRequest->itemsToMakeLostCount > 0)
7992  {
7993  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7994  {
7995  ++pAllocationRequest->item;
7996  }
7997  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7998  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7999  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
8000  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8001  {
8002  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
8003  --pAllocationRequest->itemsToMakeLostCount;
8004  }
8005  else
8006  {
8007  return false;
8008  }
8009  }
8010 
8011  VMA_HEAVY_ASSERT(Validate());
8012  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8013  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8014 
8015  return true;
8016 }
8017 
8018 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8019 {
8020  uint32_t lostAllocationCount = 0;
8021  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8022  it != m_Suballocations.end();
8023  ++it)
8024  {
8025  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8026  it->hAllocation->CanBecomeLost() &&
8027  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8028  {
8029  it = FreeSuballocation(it);
8030  ++lostAllocationCount;
8031  }
8032  }
8033  return lostAllocationCount;
8034 }
8035 
8036 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8037 {
8038  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8039  it != m_Suballocations.end();
8040  ++it)
8041  {
8042  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8043  {
8044  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8045  {
8046  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8047  return VK_ERROR_VALIDATION_FAILED_EXT;
8048  }
8049  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8050  {
8051  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8052  return VK_ERROR_VALIDATION_FAILED_EXT;
8053  }
8054  }
8055  }
8056 
8057  return VK_SUCCESS;
8058 }
8059 
8060 void VmaBlockMetadata_Generic::Alloc(
8061  const VmaAllocationRequest& request,
8062  VmaSuballocationType type,
8063  VkDeviceSize allocSize,
8064  bool upperAddress,
8065  VmaAllocation hAllocation)
8066 {
8067  VMA_ASSERT(!upperAddress);
8068  VMA_ASSERT(request.item != m_Suballocations.end());
8069  VmaSuballocation& suballoc = *request.item;
8070  // Given suballocation is a free block.
8071  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8072  // Given offset is inside this suballocation.
8073  VMA_ASSERT(request.offset >= suballoc.offset);
8074  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8075  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8076  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8077 
8078  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8079  // it to become used.
8080  UnregisterFreeSuballocation(request.item);
8081 
8082  suballoc.offset = request.offset;
8083  suballoc.size = allocSize;
8084  suballoc.type = type;
8085  suballoc.hAllocation = hAllocation;
8086 
8087  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8088  if(paddingEnd)
8089  {
8090  VmaSuballocation paddingSuballoc = {};
8091  paddingSuballoc.offset = request.offset + allocSize;
8092  paddingSuballoc.size = paddingEnd;
8093  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8094  VmaSuballocationList::iterator next = request.item;
8095  ++next;
8096  const VmaSuballocationList::iterator paddingEndItem =
8097  m_Suballocations.insert(next, paddingSuballoc);
8098  RegisterFreeSuballocation(paddingEndItem);
8099  }
8100 
8101  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8102  if(paddingBegin)
8103  {
8104  VmaSuballocation paddingSuballoc = {};
8105  paddingSuballoc.offset = request.offset - paddingBegin;
8106  paddingSuballoc.size = paddingBegin;
8107  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8108  const VmaSuballocationList::iterator paddingBeginItem =
8109  m_Suballocations.insert(request.item, paddingSuballoc);
8110  RegisterFreeSuballocation(paddingBeginItem);
8111  }
8112 
8113  // Update totals.
8114  m_FreeCount = m_FreeCount - 1;
8115  if(paddingBegin > 0)
8116  {
8117  ++m_FreeCount;
8118  }
8119  if(paddingEnd > 0)
8120  {
8121  ++m_FreeCount;
8122  }
8123  m_SumFreeSize -= allocSize;
8124 }
8125 
8126 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8127 {
8128  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8129  suballocItem != m_Suballocations.end();
8130  ++suballocItem)
8131  {
8132  VmaSuballocation& suballoc = *suballocItem;
8133  if(suballoc.hAllocation == allocation)
8134  {
8135  FreeSuballocation(suballocItem);
8136  VMA_HEAVY_ASSERT(Validate());
8137  return;
8138  }
8139  }
8140  VMA_ASSERT(0 && "Not found!");
8141 }
8142 
8143 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8144 {
8145  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8146  suballocItem != m_Suballocations.end();
8147  ++suballocItem)
8148  {
8149  VmaSuballocation& suballoc = *suballocItem;
8150  if(suballoc.offset == offset)
8151  {
8152  FreeSuballocation(suballocItem);
8153  return;
8154  }
8155  }
8156  VMA_ASSERT(0 && "Not found!");
8157 }
8158 
8159 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
8160 {
8161  typedef VmaSuballocationList::iterator iter_type;
8162  for(iter_type suballocItem = m_Suballocations.begin();
8163  suballocItem != m_Suballocations.end();
8164  ++suballocItem)
8165  {
8166  VmaSuballocation& suballoc = *suballocItem;
8167  if(suballoc.hAllocation == alloc)
8168  {
8169  iter_type nextItem = suballocItem;
8170  ++nextItem;
8171 
8172  // Should have been ensured on higher level.
8173  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
8174 
8175  // Shrinking.
8176  if(newSize < alloc->GetSize())
8177  {
8178  const VkDeviceSize sizeDiff = suballoc.size - newSize;
8179 
8180  // There is next item.
8181  if(nextItem != m_Suballocations.end())
8182  {
8183  // Next item is free.
8184  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8185  {
8186  // Grow this next item backward.
8187  UnregisterFreeSuballocation(nextItem);
8188  nextItem->offset -= sizeDiff;
8189  nextItem->size += sizeDiff;
8190  RegisterFreeSuballocation(nextItem);
8191  }
8192  // Next item is not free.
8193  else
8194  {
8195  // Create free item after current one.
8196  VmaSuballocation newFreeSuballoc;
8197  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8198  newFreeSuballoc.offset = suballoc.offset + newSize;
8199  newFreeSuballoc.size = sizeDiff;
8200  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8201  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
8202  RegisterFreeSuballocation(newFreeSuballocIt);
8203 
8204  ++m_FreeCount;
8205  }
8206  }
8207  // This is the last item.
8208  else
8209  {
8210  // Create free item at the end.
8211  VmaSuballocation newFreeSuballoc;
8212  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8213  newFreeSuballoc.offset = suballoc.offset + newSize;
8214  newFreeSuballoc.size = sizeDiff;
8215  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8216  m_Suballocations.push_back(newFreeSuballoc);
8217 
8218  iter_type newFreeSuballocIt = m_Suballocations.end();
8219  RegisterFreeSuballocation(--newFreeSuballocIt);
8220 
8221  ++m_FreeCount;
8222  }
8223 
8224  suballoc.size = newSize;
8225  m_SumFreeSize += sizeDiff;
8226  }
8227  // Growing.
8228  else
8229  {
8230  const VkDeviceSize sizeDiff = newSize - suballoc.size;
8231 
8232  // There is next item.
8233  if(nextItem != m_Suballocations.end())
8234  {
8235  // Next item is free.
8236  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8237  {
8238  // There is not enough free space, including margin.
8239  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
8240  {
8241  return false;
8242  }
8243 
8244  // There is more free space than required.
8245  if(nextItem->size > sizeDiff)
8246  {
8247  // Move and shrink this next item.
8248  UnregisterFreeSuballocation(nextItem);
8249  nextItem->offset += sizeDiff;
8250  nextItem->size -= sizeDiff;
8251  RegisterFreeSuballocation(nextItem);
8252  }
8253  // There is exactly the amount of free space required.
8254  else
8255  {
8256  // Remove this next free item.
8257  UnregisterFreeSuballocation(nextItem);
8258  m_Suballocations.erase(nextItem);
8259  --m_FreeCount;
8260  }
8261  }
8262  // Next item is not free - there is no space to grow.
8263  else
8264  {
8265  return false;
8266  }
8267  }
8268  // This is the last item - there is no space to grow.
8269  else
8270  {
8271  return false;
8272  }
8273 
8274  suballoc.size = newSize;
8275  m_SumFreeSize -= sizeDiff;
8276  }
8277 
8278  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
8279  return true;
8280  }
8281  }
8282  VMA_ASSERT(0 && "Not found!");
8283  return false;
8284 }
8285 
8286 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8287 {
8288  VkDeviceSize lastSize = 0;
8289  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8290  {
8291  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8292 
8293  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8294  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8295  VMA_VALIDATE(it->size >= lastSize);
8296  lastSize = it->size;
8297  }
8298  return true;
8299 }
8300 
8301 bool VmaBlockMetadata_Generic::CheckAllocation(
8302  uint32_t currentFrameIndex,
8303  uint32_t frameInUseCount,
8304  VkDeviceSize bufferImageGranularity,
8305  VkDeviceSize allocSize,
8306  VkDeviceSize allocAlignment,
8307  VmaSuballocationType allocType,
8308  VmaSuballocationList::const_iterator suballocItem,
8309  bool canMakeOtherLost,
8310  VkDeviceSize* pOffset,
8311  size_t* itemsToMakeLostCount,
8312  VkDeviceSize* pSumFreeSize,
8313  VkDeviceSize* pSumItemSize) const
8314 {
8315  VMA_ASSERT(allocSize > 0);
8316  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8317  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8318  VMA_ASSERT(pOffset != VMA_NULL);
8319 
8320  *itemsToMakeLostCount = 0;
8321  *pSumFreeSize = 0;
8322  *pSumItemSize = 0;
8323 
8324  if(canMakeOtherLost)
8325  {
8326  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8327  {
8328  *pSumFreeSize = suballocItem->size;
8329  }
8330  else
8331  {
8332  if(suballocItem->hAllocation->CanBecomeLost() &&
8333  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8334  {
8335  ++*itemsToMakeLostCount;
8336  *pSumItemSize = suballocItem->size;
8337  }
8338  else
8339  {
8340  return false;
8341  }
8342  }
8343 
8344  // Remaining size is too small for this request: Early return.
8345  if(GetSize() - suballocItem->offset < allocSize)
8346  {
8347  return false;
8348  }
8349 
8350  // Start from offset equal to beginning of this suballocation.
8351  *pOffset = suballocItem->offset;
8352 
8353  // Apply VMA_DEBUG_MARGIN at the beginning.
8354  if(VMA_DEBUG_MARGIN > 0)
8355  {
8356  *pOffset += VMA_DEBUG_MARGIN;
8357  }
8358 
8359  // Apply alignment.
8360  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8361 
8362  // Check previous suballocations for BufferImageGranularity conflicts.
8363  // Make bigger alignment if necessary.
8364  if(bufferImageGranularity > 1)
8365  {
8366  bool bufferImageGranularityConflict = false;
8367  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8368  while(prevSuballocItem != m_Suballocations.cbegin())
8369  {
8370  --prevSuballocItem;
8371  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8372  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8373  {
8374  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8375  {
8376  bufferImageGranularityConflict = true;
8377  break;
8378  }
8379  }
8380  else
8381  // Already on previous page.
8382  break;
8383  }
8384  if(bufferImageGranularityConflict)
8385  {
8386  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8387  }
8388  }
8389 
8390  // Now that we have final *pOffset, check if we are past suballocItem.
8391  // If yes, return false - this function should be called for another suballocItem as starting point.
8392  if(*pOffset >= suballocItem->offset + suballocItem->size)
8393  {
8394  return false;
8395  }
8396 
8397  // Calculate padding at the beginning based on current offset.
8398  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8399 
8400  // Calculate required margin at the end.
8401  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8402 
8403  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8404  // Another early return check.
8405  if(suballocItem->offset + totalSize > GetSize())
8406  {
8407  return false;
8408  }
8409 
8410  // Advance lastSuballocItem until desired size is reached.
8411  // Update itemsToMakeLostCount.
8412  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8413  if(totalSize > suballocItem->size)
8414  {
8415  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8416  while(remainingSize > 0)
8417  {
8418  ++lastSuballocItem;
8419  if(lastSuballocItem == m_Suballocations.cend())
8420  {
8421  return false;
8422  }
8423  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8424  {
8425  *pSumFreeSize += lastSuballocItem->size;
8426  }
8427  else
8428  {
8429  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8430  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8431  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8432  {
8433  ++*itemsToMakeLostCount;
8434  *pSumItemSize += lastSuballocItem->size;
8435  }
8436  else
8437  {
8438  return false;
8439  }
8440  }
8441  remainingSize = (lastSuballocItem->size < remainingSize) ?
8442  remainingSize - lastSuballocItem->size : 0;
8443  }
8444  }
8445 
8446  // Check next suballocations for BufferImageGranularity conflicts.
8447  // If conflict exists, we must mark more allocations lost or fail.
8448  if(bufferImageGranularity > 1)
8449  {
8450  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8451  ++nextSuballocItem;
8452  while(nextSuballocItem != m_Suballocations.cend())
8453  {
8454  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8455  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8456  {
8457  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8458  {
8459  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8460  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8461  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8462  {
8463  ++*itemsToMakeLostCount;
8464  }
8465  else
8466  {
8467  return false;
8468  }
8469  }
8470  }
8471  else
8472  {
8473  // Already on next page.
8474  break;
8475  }
8476  ++nextSuballocItem;
8477  }
8478  }
8479  }
8480  else
8481  {
8482  const VmaSuballocation& suballoc = *suballocItem;
8483  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8484 
8485  *pSumFreeSize = suballoc.size;
8486 
8487  // Size of this suballocation is too small for this request: Early return.
8488  if(suballoc.size < allocSize)
8489  {
8490  return false;
8491  }
8492 
8493  // Start from offset equal to beginning of this suballocation.
8494  *pOffset = suballoc.offset;
8495 
8496  // Apply VMA_DEBUG_MARGIN at the beginning.
8497  if(VMA_DEBUG_MARGIN > 0)
8498  {
8499  *pOffset += VMA_DEBUG_MARGIN;
8500  }
8501 
8502  // Apply alignment.
8503  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8504 
8505  // Check previous suballocations for BufferImageGranularity conflicts.
8506  // Make bigger alignment if necessary.
8507  if(bufferImageGranularity > 1)
8508  {
8509  bool bufferImageGranularityConflict = false;
8510  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8511  while(prevSuballocItem != m_Suballocations.cbegin())
8512  {
8513  --prevSuballocItem;
8514  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8515  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8516  {
8517  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8518  {
8519  bufferImageGranularityConflict = true;
8520  break;
8521  }
8522  }
8523  else
8524  // Already on previous page.
8525  break;
8526  }
8527  if(bufferImageGranularityConflict)
8528  {
8529  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8530  }
8531  }
8532 
8533  // Calculate padding at the beginning based on current offset.
8534  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8535 
8536  // Calculate required margin at the end.
8537  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8538 
8539  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8540  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8541  {
8542  return false;
8543  }
8544 
8545  // Check next suballocations for BufferImageGranularity conflicts.
8546  // If conflict exists, allocation cannot be made here.
8547  if(bufferImageGranularity > 1)
8548  {
8549  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8550  ++nextSuballocItem;
8551  while(nextSuballocItem != m_Suballocations.cend())
8552  {
8553  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8554  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8555  {
8556  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8557  {
8558  return false;
8559  }
8560  }
8561  else
8562  {
8563  // Already on next page.
8564  break;
8565  }
8566  ++nextSuballocItem;
8567  }
8568  }
8569  }
8570 
8571  // All tests passed: Success. pOffset is already filled.
8572  return true;
8573 }
8574 
8575 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8576 {
8577  VMA_ASSERT(item != m_Suballocations.end());
8578  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8579 
8580  VmaSuballocationList::iterator nextItem = item;
8581  ++nextItem;
8582  VMA_ASSERT(nextItem != m_Suballocations.end());
8583  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8584 
8585  item->size += nextItem->size;
8586  --m_FreeCount;
8587  m_Suballocations.erase(nextItem);
8588 }
8589 
8590 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8591 {
8592  // Change this suballocation to be marked as free.
8593  VmaSuballocation& suballoc = *suballocItem;
8594  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8595  suballoc.hAllocation = VK_NULL_HANDLE;
8596 
8597  // Update totals.
8598  ++m_FreeCount;
8599  m_SumFreeSize += suballoc.size;
8600 
8601  // Merge with previous and/or next suballocation if it's also free.
8602  bool mergeWithNext = false;
8603  bool mergeWithPrev = false;
8604 
8605  VmaSuballocationList::iterator nextItem = suballocItem;
8606  ++nextItem;
8607  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8608  {
8609  mergeWithNext = true;
8610  }
8611 
8612  VmaSuballocationList::iterator prevItem = suballocItem;
8613  if(suballocItem != m_Suballocations.begin())
8614  {
8615  --prevItem;
8616  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8617  {
8618  mergeWithPrev = true;
8619  }
8620  }
8621 
8622  if(mergeWithNext)
8623  {
8624  UnregisterFreeSuballocation(nextItem);
8625  MergeFreeWithNext(suballocItem);
8626  }
8627 
8628  if(mergeWithPrev)
8629  {
8630  UnregisterFreeSuballocation(prevItem);
8631  MergeFreeWithNext(prevItem);
8632  RegisterFreeSuballocation(prevItem);
8633  return prevItem;
8634  }
8635  else
8636  {
8637  RegisterFreeSuballocation(suballocItem);
8638  return suballocItem;
8639  }
8640 }
8641 
8642 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8643 {
8644  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8645  VMA_ASSERT(item->size > 0);
8646 
8647  // You may want to enable this validation at the beginning or at the end of
8648  // this function, depending on what do you want to check.
8649  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8650 
8651  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8652  {
8653  if(m_FreeSuballocationsBySize.empty())
8654  {
8655  m_FreeSuballocationsBySize.push_back(item);
8656  }
8657  else
8658  {
8659  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8660  }
8661  }
8662 
8663  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8664 }
8665 
8666 
8667 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8668 {
8669  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8670  VMA_ASSERT(item->size > 0);
8671 
8672  // You may want to enable this validation at the beginning or at the end of
8673  // this function, depending on what do you want to check.
8674  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8675 
8676  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8677  {
8678  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8679  m_FreeSuballocationsBySize.data(),
8680  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8681  item,
8682  VmaSuballocationItemSizeLess());
8683  for(size_t index = it - m_FreeSuballocationsBySize.data();
8684  index < m_FreeSuballocationsBySize.size();
8685  ++index)
8686  {
8687  if(m_FreeSuballocationsBySize[index] == item)
8688  {
8689  VmaVectorRemove(m_FreeSuballocationsBySize, index);
8690  return;
8691  }
8692  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8693  }
8694  VMA_ASSERT(0 && "Not found.");
8695  }
8696 
8697  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8698 }
8699 
8700 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8701  VkDeviceSize bufferImageGranularity,
8702  VmaSuballocationType& inOutPrevSuballocType) const
8703 {
8704  if(bufferImageGranularity == 1 || IsEmpty())
8705  {
8706  return false;
8707  }
8708 
8709  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8710  bool typeConflictFound = false;
8711  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8712  it != m_Suballocations.cend();
8713  ++it)
8714  {
8715  const VmaSuballocationType suballocType = it->type;
8716  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8717  {
8718  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8719  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8720  {
8721  typeConflictFound = true;
8722  }
8723  inOutPrevSuballocType = suballocType;
8724  }
8725  }
8726 
8727  return typeConflictFound || minAlignment >= bufferImageGranularity;
8728 }
8729 
8731 // class VmaBlockMetadata_Linear
8732 
8733 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
8734  VmaBlockMetadata(hAllocator),
8735  m_SumFreeSize(0),
8736  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8737  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8738  m_1stVectorIndex(0),
8739  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8740  m_1stNullItemsBeginCount(0),
8741  m_1stNullItemsMiddleCount(0),
8742  m_2ndNullItemsCount(0)
8743 {
8744 }
8745 
8746 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8747 {
8748 }
8749 
8750 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8751 {
8752  VmaBlockMetadata::Init(size);
8753  m_SumFreeSize = size;
8754 }
8755 
8756 bool VmaBlockMetadata_Linear::Validate() const
8757 {
8758  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8759  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8760 
8761  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8762  VMA_VALIDATE(!suballocations1st.empty() ||
8763  suballocations2nd.empty() ||
8764  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8765 
8766  if(!suballocations1st.empty())
8767  {
8768  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
8769  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8770  // Null item at the end should be just pop_back().
8771  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8772  }
8773  if(!suballocations2nd.empty())
8774  {
8775  // Null item at the end should be just pop_back().
8776  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8777  }
8778 
8779  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8780  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8781 
8782  VkDeviceSize sumUsedSize = 0;
8783  const size_t suballoc1stCount = suballocations1st.size();
8784  VkDeviceSize offset = VMA_DEBUG_MARGIN;
8785 
8786  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8787  {
8788  const size_t suballoc2ndCount = suballocations2nd.size();
8789  size_t nullItem2ndCount = 0;
8790  for(size_t i = 0; i < suballoc2ndCount; ++i)
8791  {
8792  const VmaSuballocation& suballoc = suballocations2nd[i];
8793  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8794 
8795  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8796  VMA_VALIDATE(suballoc.offset >= offset);
8797 
8798  if(!currFree)
8799  {
8800  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8801  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8802  sumUsedSize += suballoc.size;
8803  }
8804  else
8805  {
8806  ++nullItem2ndCount;
8807  }
8808 
8809  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8810  }
8811 
8812  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8813  }
8814 
8815  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8816  {
8817  const VmaSuballocation& suballoc = suballocations1st[i];
8818  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8819  suballoc.hAllocation == VK_NULL_HANDLE);
8820  }
8821 
8822  size_t nullItem1stCount = m_1stNullItemsBeginCount;
8823 
8824  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8825  {
8826  const VmaSuballocation& suballoc = suballocations1st[i];
8827  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8828 
8829  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8830  VMA_VALIDATE(suballoc.offset >= offset);
8831  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8832 
8833  if(!currFree)
8834  {
8835  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8836  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8837  sumUsedSize += suballoc.size;
8838  }
8839  else
8840  {
8841  ++nullItem1stCount;
8842  }
8843 
8844  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8845  }
8846  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8847 
8848  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8849  {
8850  const size_t suballoc2ndCount = suballocations2nd.size();
8851  size_t nullItem2ndCount = 0;
8852  for(size_t i = suballoc2ndCount; i--; )
8853  {
8854  const VmaSuballocation& suballoc = suballocations2nd[i];
8855  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8856 
8857  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8858  VMA_VALIDATE(suballoc.offset >= offset);
8859 
8860  if(!currFree)
8861  {
8862  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8863  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8864  sumUsedSize += suballoc.size;
8865  }
8866  else
8867  {
8868  ++nullItem2ndCount;
8869  }
8870 
8871  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8872  }
8873 
8874  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8875  }
8876 
8877  VMA_VALIDATE(offset <= GetSize());
8878  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
8879 
8880  return true;
8881 }
8882 
8883 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
8884 {
8885  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
8886  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
8887 }
8888 
8889 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
8890 {
8891  const VkDeviceSize size = GetSize();
8892 
8893  /*
8894  We don't consider gaps inside allocation vectors with freed allocations because
8895  they are not suitable for reuse in linear allocator. We consider only space that
8896  is available for new allocations.
8897  */
8898  if(IsEmpty())
8899  {
8900  return size;
8901  }
8902 
8903  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8904 
8905  switch(m_2ndVectorMode)
8906  {
8907  case SECOND_VECTOR_EMPTY:
8908  /*
8909  Available space is after end of 1st, as well as before beginning of 1st (which
8910  whould make it a ring buffer).
8911  */
8912  {
8913  const size_t suballocations1stCount = suballocations1st.size();
8914  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
8915  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8916  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
8917  return VMA_MAX(
8918  firstSuballoc.offset,
8919  size - (lastSuballoc.offset + lastSuballoc.size));
8920  }
8921  break;
8922 
8923  case SECOND_VECTOR_RING_BUFFER:
8924  /*
8925  Available space is only between end of 2nd and beginning of 1st.
8926  */
8927  {
8928  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8929  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
8930  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
8931  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
8932  }
8933  break;
8934 
8935  case SECOND_VECTOR_DOUBLE_STACK:
8936  /*
8937  Available space is only between end of 1st and top of 2nd.
8938  */
8939  {
8940  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8941  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
8942  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
8943  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
8944  }
8945  break;
8946 
8947  default:
8948  VMA_ASSERT(0);
8949  return 0;
8950  }
8951 }
8952 
8953 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8954 {
8955  const VkDeviceSize size = GetSize();
8956  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8957  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8958  const size_t suballoc1stCount = suballocations1st.size();
8959  const size_t suballoc2ndCount = suballocations2nd.size();
8960 
8961  outInfo.blockCount = 1;
8962  outInfo.allocationCount = (uint32_t)GetAllocationCount();
8963  outInfo.unusedRangeCount = 0;
8964  outInfo.usedBytes = 0;
8965  outInfo.allocationSizeMin = UINT64_MAX;
8966  outInfo.allocationSizeMax = 0;
8967  outInfo.unusedRangeSizeMin = UINT64_MAX;
8968  outInfo.unusedRangeSizeMax = 0;
8969 
8970  VkDeviceSize lastOffset = 0;
8971 
8972  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8973  {
8974  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8975  size_t nextAlloc2ndIndex = 0;
8976  while(lastOffset < freeSpace2ndTo1stEnd)
8977  {
8978  // Find next non-null allocation or move nextAllocIndex to the end.
8979  while(nextAlloc2ndIndex < suballoc2ndCount &&
8980  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8981  {
8982  ++nextAlloc2ndIndex;
8983  }
8984 
8985  // Found non-null allocation.
8986  if(nextAlloc2ndIndex < suballoc2ndCount)
8987  {
8988  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8989 
8990  // 1. Process free space before this allocation.
8991  if(lastOffset < suballoc.offset)
8992  {
8993  // There is free space from lastOffset to suballoc.offset.
8994  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8995  ++outInfo.unusedRangeCount;
8996  outInfo.unusedBytes += unusedRangeSize;
8997  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8998  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8999  }
9000 
9001  // 2. Process this allocation.
9002  // There is allocation with suballoc.offset, suballoc.size.
9003  outInfo.usedBytes += suballoc.size;
9004  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9005  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9006 
9007  // 3. Prepare for next iteration.
9008  lastOffset = suballoc.offset + suballoc.size;
9009  ++nextAlloc2ndIndex;
9010  }
9011  // We are at the end.
9012  else
9013  {
9014  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9015  if(lastOffset < freeSpace2ndTo1stEnd)
9016  {
9017  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9018  ++outInfo.unusedRangeCount;
9019  outInfo.unusedBytes += unusedRangeSize;
9020  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9021  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9022  }
9023 
9024  // End of loop.
9025  lastOffset = freeSpace2ndTo1stEnd;
9026  }
9027  }
9028  }
9029 
9030  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9031  const VkDeviceSize freeSpace1stTo2ndEnd =
9032  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9033  while(lastOffset < freeSpace1stTo2ndEnd)
9034  {
9035  // Find next non-null allocation or move nextAllocIndex to the end.
9036  while(nextAlloc1stIndex < suballoc1stCount &&
9037  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9038  {
9039  ++nextAlloc1stIndex;
9040  }
9041 
9042  // Found non-null allocation.
9043  if(nextAlloc1stIndex < suballoc1stCount)
9044  {
9045  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9046 
9047  // 1. Process free space before this allocation.
9048  if(lastOffset < suballoc.offset)
9049  {
9050  // There is free space from lastOffset to suballoc.offset.
9051  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9052  ++outInfo.unusedRangeCount;
9053  outInfo.unusedBytes += unusedRangeSize;
9054  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9055  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9056  }
9057 
9058  // 2. Process this allocation.
9059  // There is allocation with suballoc.offset, suballoc.size.
9060  outInfo.usedBytes += suballoc.size;
9061  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9062  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9063 
9064  // 3. Prepare for next iteration.
9065  lastOffset = suballoc.offset + suballoc.size;
9066  ++nextAlloc1stIndex;
9067  }
9068  // We are at the end.
9069  else
9070  {
9071  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9072  if(lastOffset < freeSpace1stTo2ndEnd)
9073  {
9074  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9075  ++outInfo.unusedRangeCount;
9076  outInfo.unusedBytes += unusedRangeSize;
9077  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9078  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9079  }
9080 
9081  // End of loop.
9082  lastOffset = freeSpace1stTo2ndEnd;
9083  }
9084  }
9085 
9086  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9087  {
9088  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9089  while(lastOffset < size)
9090  {
9091  // Find next non-null allocation or move nextAllocIndex to the end.
9092  while(nextAlloc2ndIndex != SIZE_MAX &&
9093  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9094  {
9095  --nextAlloc2ndIndex;
9096  }
9097 
9098  // Found non-null allocation.
9099  if(nextAlloc2ndIndex != SIZE_MAX)
9100  {
9101  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9102 
9103  // 1. Process free space before this allocation.
9104  if(lastOffset < suballoc.offset)
9105  {
9106  // There is free space from lastOffset to suballoc.offset.
9107  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9108  ++outInfo.unusedRangeCount;
9109  outInfo.unusedBytes += unusedRangeSize;
9110  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9111  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9112  }
9113 
9114  // 2. Process this allocation.
9115  // There is allocation with suballoc.offset, suballoc.size.
9116  outInfo.usedBytes += suballoc.size;
9117  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9118  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9119 
9120  // 3. Prepare for next iteration.
9121  lastOffset = suballoc.offset + suballoc.size;
9122  --nextAlloc2ndIndex;
9123  }
9124  // We are at the end.
9125  else
9126  {
9127  // There is free space from lastOffset to size.
9128  if(lastOffset < size)
9129  {
9130  const VkDeviceSize unusedRangeSize = size - lastOffset;
9131  ++outInfo.unusedRangeCount;
9132  outInfo.unusedBytes += unusedRangeSize;
9133  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9134  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9135  }
9136 
9137  // End of loop.
9138  lastOffset = size;
9139  }
9140  }
9141  }
9142 
9143  outInfo.unusedBytes = size - outInfo.usedBytes;
9144 }
9145 
9146 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9147 {
9148  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9149  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9150  const VkDeviceSize size = GetSize();
9151  const size_t suballoc1stCount = suballocations1st.size();
9152  const size_t suballoc2ndCount = suballocations2nd.size();
9153 
9154  inoutStats.size += size;
9155 
9156  VkDeviceSize lastOffset = 0;
9157 
9158  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9159  {
9160  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9161  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9162  while(lastOffset < freeSpace2ndTo1stEnd)
9163  {
9164  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9165  while(nextAlloc2ndIndex < suballoc2ndCount &&
9166  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9167  {
9168  ++nextAlloc2ndIndex;
9169  }
9170 
9171  // Found non-null allocation.
9172  if(nextAlloc2ndIndex < suballoc2ndCount)
9173  {
9174  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9175 
9176  // 1. Process free space before this allocation.
9177  if(lastOffset < suballoc.offset)
9178  {
9179  // There is free space from lastOffset to suballoc.offset.
9180  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9181  inoutStats.unusedSize += unusedRangeSize;
9182  ++inoutStats.unusedRangeCount;
9183  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9184  }
9185 
9186  // 2. Process this allocation.
9187  // There is allocation with suballoc.offset, suballoc.size.
9188  ++inoutStats.allocationCount;
9189 
9190  // 3. Prepare for next iteration.
9191  lastOffset = suballoc.offset + suballoc.size;
9192  ++nextAlloc2ndIndex;
9193  }
9194  // We are at the end.
9195  else
9196  {
9197  if(lastOffset < freeSpace2ndTo1stEnd)
9198  {
9199  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9200  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9201  inoutStats.unusedSize += unusedRangeSize;
9202  ++inoutStats.unusedRangeCount;
9203  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9204  }
9205 
9206  // End of loop.
9207  lastOffset = freeSpace2ndTo1stEnd;
9208  }
9209  }
9210  }
9211 
9212  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9213  const VkDeviceSize freeSpace1stTo2ndEnd =
9214  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9215  while(lastOffset < freeSpace1stTo2ndEnd)
9216  {
9217  // Find next non-null allocation or move nextAllocIndex to the end.
9218  while(nextAlloc1stIndex < suballoc1stCount &&
9219  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9220  {
9221  ++nextAlloc1stIndex;
9222  }
9223 
9224  // Found non-null allocation.
9225  if(nextAlloc1stIndex < suballoc1stCount)
9226  {
9227  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9228 
9229  // 1. Process free space before this allocation.
9230  if(lastOffset < suballoc.offset)
9231  {
9232  // There is free space from lastOffset to suballoc.offset.
9233  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9234  inoutStats.unusedSize += unusedRangeSize;
9235  ++inoutStats.unusedRangeCount;
9236  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9237  }
9238 
9239  // 2. Process this allocation.
9240  // There is allocation with suballoc.offset, suballoc.size.
9241  ++inoutStats.allocationCount;
9242 
9243  // 3. Prepare for next iteration.
9244  lastOffset = suballoc.offset + suballoc.size;
9245  ++nextAlloc1stIndex;
9246  }
9247  // We are at the end.
9248  else
9249  {
9250  if(lastOffset < freeSpace1stTo2ndEnd)
9251  {
9252  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9253  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9254  inoutStats.unusedSize += unusedRangeSize;
9255  ++inoutStats.unusedRangeCount;
9256  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9257  }
9258 
9259  // End of loop.
9260  lastOffset = freeSpace1stTo2ndEnd;
9261  }
9262  }
9263 
9264  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9265  {
9266  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9267  while(lastOffset < size)
9268  {
9269  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9270  while(nextAlloc2ndIndex != SIZE_MAX &&
9271  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9272  {
9273  --nextAlloc2ndIndex;
9274  }
9275 
9276  // Found non-null allocation.
9277  if(nextAlloc2ndIndex != SIZE_MAX)
9278  {
9279  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9280 
9281  // 1. Process free space before this allocation.
9282  if(lastOffset < suballoc.offset)
9283  {
9284  // There is free space from lastOffset to suballoc.offset.
9285  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9286  inoutStats.unusedSize += unusedRangeSize;
9287  ++inoutStats.unusedRangeCount;
9288  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9289  }
9290 
9291  // 2. Process this allocation.
9292  // There is allocation with suballoc.offset, suballoc.size.
9293  ++inoutStats.allocationCount;
9294 
9295  // 3. Prepare for next iteration.
9296  lastOffset = suballoc.offset + suballoc.size;
9297  --nextAlloc2ndIndex;
9298  }
9299  // We are at the end.
9300  else
9301  {
9302  if(lastOffset < size)
9303  {
9304  // There is free space from lastOffset to size.
9305  const VkDeviceSize unusedRangeSize = size - lastOffset;
9306  inoutStats.unusedSize += unusedRangeSize;
9307  ++inoutStats.unusedRangeCount;
9308  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9309  }
9310 
9311  // End of loop.
9312  lastOffset = size;
9313  }
9314  }
9315  }
9316 }
9317 
9318 #if VMA_STATS_STRING_ENABLED
9319 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9320 {
9321  const VkDeviceSize size = GetSize();
9322  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9323  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9324  const size_t suballoc1stCount = suballocations1st.size();
9325  const size_t suballoc2ndCount = suballocations2nd.size();
9326 
9327  // FIRST PASS
9328 
9329  size_t unusedRangeCount = 0;
9330  VkDeviceSize usedBytes = 0;
9331 
9332  VkDeviceSize lastOffset = 0;
9333 
9334  size_t alloc2ndCount = 0;
9335  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9336  {
9337  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9338  size_t nextAlloc2ndIndex = 0;
9339  while(lastOffset < freeSpace2ndTo1stEnd)
9340  {
9341  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9342  while(nextAlloc2ndIndex < suballoc2ndCount &&
9343  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9344  {
9345  ++nextAlloc2ndIndex;
9346  }
9347 
9348  // Found non-null allocation.
9349  if(nextAlloc2ndIndex < suballoc2ndCount)
9350  {
9351  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9352 
9353  // 1. Process free space before this allocation.
9354  if(lastOffset < suballoc.offset)
9355  {
9356  // There is free space from lastOffset to suballoc.offset.
9357  ++unusedRangeCount;
9358  }
9359 
9360  // 2. Process this allocation.
9361  // There is allocation with suballoc.offset, suballoc.size.
9362  ++alloc2ndCount;
9363  usedBytes += suballoc.size;
9364 
9365  // 3. Prepare for next iteration.
9366  lastOffset = suballoc.offset + suballoc.size;
9367  ++nextAlloc2ndIndex;
9368  }
9369  // We are at the end.
9370  else
9371  {
9372  if(lastOffset < freeSpace2ndTo1stEnd)
9373  {
9374  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9375  ++unusedRangeCount;
9376  }
9377 
9378  // End of loop.
9379  lastOffset = freeSpace2ndTo1stEnd;
9380  }
9381  }
9382  }
9383 
9384  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9385  size_t alloc1stCount = 0;
9386  const VkDeviceSize freeSpace1stTo2ndEnd =
9387  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9388  while(lastOffset < freeSpace1stTo2ndEnd)
9389  {
9390  // Find next non-null allocation or move nextAllocIndex to the end.
9391  while(nextAlloc1stIndex < suballoc1stCount &&
9392  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9393  {
9394  ++nextAlloc1stIndex;
9395  }
9396 
9397  // Found non-null allocation.
9398  if(nextAlloc1stIndex < suballoc1stCount)
9399  {
9400  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9401 
9402  // 1. Process free space before this allocation.
9403  if(lastOffset < suballoc.offset)
9404  {
9405  // There is free space from lastOffset to suballoc.offset.
9406  ++unusedRangeCount;
9407  }
9408 
9409  // 2. Process this allocation.
9410  // There is allocation with suballoc.offset, suballoc.size.
9411  ++alloc1stCount;
9412  usedBytes += suballoc.size;
9413 
9414  // 3. Prepare for next iteration.
9415  lastOffset = suballoc.offset + suballoc.size;
9416  ++nextAlloc1stIndex;
9417  }
9418  // We are at the end.
9419  else
9420  {
9421  if(lastOffset < size)
9422  {
9423  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9424  ++unusedRangeCount;
9425  }
9426 
9427  // End of loop.
9428  lastOffset = freeSpace1stTo2ndEnd;
9429  }
9430  }
9431 
9432  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9433  {
9434  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9435  while(lastOffset < size)
9436  {
9437  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9438  while(nextAlloc2ndIndex != SIZE_MAX &&
9439  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9440  {
9441  --nextAlloc2ndIndex;
9442  }
9443 
9444  // Found non-null allocation.
9445  if(nextAlloc2ndIndex != SIZE_MAX)
9446  {
9447  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9448 
9449  // 1. Process free space before this allocation.
9450  if(lastOffset < suballoc.offset)
9451  {
9452  // There is free space from lastOffset to suballoc.offset.
9453  ++unusedRangeCount;
9454  }
9455 
9456  // 2. Process this allocation.
9457  // There is allocation with suballoc.offset, suballoc.size.
9458  ++alloc2ndCount;
9459  usedBytes += suballoc.size;
9460 
9461  // 3. Prepare for next iteration.
9462  lastOffset = suballoc.offset + suballoc.size;
9463  --nextAlloc2ndIndex;
9464  }
9465  // We are at the end.
9466  else
9467  {
9468  if(lastOffset < size)
9469  {
9470  // There is free space from lastOffset to size.
9471  ++unusedRangeCount;
9472  }
9473 
9474  // End of loop.
9475  lastOffset = size;
9476  }
9477  }
9478  }
9479 
9480  const VkDeviceSize unusedBytes = size - usedBytes;
9481  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9482 
9483  // SECOND PASS
9484  lastOffset = 0;
9485 
9486  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9487  {
9488  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9489  size_t nextAlloc2ndIndex = 0;
9490  while(lastOffset < freeSpace2ndTo1stEnd)
9491  {
9492  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9493  while(nextAlloc2ndIndex < suballoc2ndCount &&
9494  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9495  {
9496  ++nextAlloc2ndIndex;
9497  }
9498 
9499  // Found non-null allocation.
9500  if(nextAlloc2ndIndex < suballoc2ndCount)
9501  {
9502  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9503 
9504  // 1. Process free space before this allocation.
9505  if(lastOffset < suballoc.offset)
9506  {
9507  // There is free space from lastOffset to suballoc.offset.
9508  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9509  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9510  }
9511 
9512  // 2. Process this allocation.
9513  // There is allocation with suballoc.offset, suballoc.size.
9514  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9515 
9516  // 3. Prepare for next iteration.
9517  lastOffset = suballoc.offset + suballoc.size;
9518  ++nextAlloc2ndIndex;
9519  }
9520  // We are at the end.
9521  else
9522  {
9523  if(lastOffset < freeSpace2ndTo1stEnd)
9524  {
9525  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9526  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9527  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9528  }
9529 
9530  // End of loop.
9531  lastOffset = freeSpace2ndTo1stEnd;
9532  }
9533  }
9534  }
9535 
9536  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9537  while(lastOffset < freeSpace1stTo2ndEnd)
9538  {
9539  // Find next non-null allocation or move nextAllocIndex to the end.
9540  while(nextAlloc1stIndex < suballoc1stCount &&
9541  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9542  {
9543  ++nextAlloc1stIndex;
9544  }
9545 
9546  // Found non-null allocation.
9547  if(nextAlloc1stIndex < suballoc1stCount)
9548  {
9549  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9550 
9551  // 1. Process free space before this allocation.
9552  if(lastOffset < suballoc.offset)
9553  {
9554  // There is free space from lastOffset to suballoc.offset.
9555  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9556  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9557  }
9558 
9559  // 2. Process this allocation.
9560  // There is allocation with suballoc.offset, suballoc.size.
9561  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9562 
9563  // 3. Prepare for next iteration.
9564  lastOffset = suballoc.offset + suballoc.size;
9565  ++nextAlloc1stIndex;
9566  }
9567  // We are at the end.
9568  else
9569  {
9570  if(lastOffset < freeSpace1stTo2ndEnd)
9571  {
9572  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9573  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9574  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9575  }
9576 
9577  // End of loop.
9578  lastOffset = freeSpace1stTo2ndEnd;
9579  }
9580  }
9581 
9582  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9583  {
9584  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9585  while(lastOffset < size)
9586  {
9587  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9588  while(nextAlloc2ndIndex != SIZE_MAX &&
9589  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9590  {
9591  --nextAlloc2ndIndex;
9592  }
9593 
9594  // Found non-null allocation.
9595  if(nextAlloc2ndIndex != SIZE_MAX)
9596  {
9597  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9598 
9599  // 1. Process free space before this allocation.
9600  if(lastOffset < suballoc.offset)
9601  {
9602  // There is free space from lastOffset to suballoc.offset.
9603  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9604  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9605  }
9606 
9607  // 2. Process this allocation.
9608  // There is allocation with suballoc.offset, suballoc.size.
9609  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9610 
9611  // 3. Prepare for next iteration.
9612  lastOffset = suballoc.offset + suballoc.size;
9613  --nextAlloc2ndIndex;
9614  }
9615  // We are at the end.
9616  else
9617  {
9618  if(lastOffset < size)
9619  {
9620  // There is free space from lastOffset to size.
9621  const VkDeviceSize unusedRangeSize = size - lastOffset;
9622  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9623  }
9624 
9625  // End of loop.
9626  lastOffset = size;
9627  }
9628  }
9629  }
9630 
9631  PrintDetailedMap_End(json);
9632 }
9633 #endif // #if VMA_STATS_STRING_ENABLED
9634 
9635 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9636  uint32_t currentFrameIndex,
9637  uint32_t frameInUseCount,
9638  VkDeviceSize bufferImageGranularity,
9639  VkDeviceSize allocSize,
9640  VkDeviceSize allocAlignment,
9641  bool upperAddress,
9642  VmaSuballocationType allocType,
9643  bool canMakeOtherLost,
9644  uint32_t strategy,
9645  VmaAllocationRequest* pAllocationRequest)
9646 {
9647  VMA_ASSERT(allocSize > 0);
9648  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9649  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9650  VMA_HEAVY_ASSERT(Validate());
9651 
9652  const VkDeviceSize size = GetSize();
9653  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9654  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9655 
9656  if(upperAddress)
9657  {
9658  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9659  {
9660  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9661  return false;
9662  }
9663 
9664  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9665  if(allocSize > size)
9666  {
9667  return false;
9668  }
9669  VkDeviceSize resultBaseOffset = size - allocSize;
9670  if(!suballocations2nd.empty())
9671  {
9672  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9673  resultBaseOffset = lastSuballoc.offset - allocSize;
9674  if(allocSize > lastSuballoc.offset)
9675  {
9676  return false;
9677  }
9678  }
9679 
9680  // Start from offset equal to end of free space.
9681  VkDeviceSize resultOffset = resultBaseOffset;
9682 
9683  // Apply VMA_DEBUG_MARGIN at the end.
9684  if(VMA_DEBUG_MARGIN > 0)
9685  {
9686  if(resultOffset < VMA_DEBUG_MARGIN)
9687  {
9688  return false;
9689  }
9690  resultOffset -= VMA_DEBUG_MARGIN;
9691  }
9692 
9693  // Apply alignment.
9694  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9695 
9696  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9697  // Make bigger alignment if necessary.
9698  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9699  {
9700  bool bufferImageGranularityConflict = false;
9701  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9702  {
9703  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9704  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9705  {
9706  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9707  {
9708  bufferImageGranularityConflict = true;
9709  break;
9710  }
9711  }
9712  else
9713  // Already on previous page.
9714  break;
9715  }
9716  if(bufferImageGranularityConflict)
9717  {
9718  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9719  }
9720  }
9721 
9722  // There is enough free space.
9723  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9724  suballocations1st.back().offset + suballocations1st.back().size :
9725  0;
9726  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9727  {
9728  // Check previous suballocations for BufferImageGranularity conflicts.
9729  // If conflict exists, allocation cannot be made here.
9730  if(bufferImageGranularity > 1)
9731  {
9732  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9733  {
9734  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9735  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9736  {
9737  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9738  {
9739  return false;
9740  }
9741  }
9742  else
9743  {
9744  // Already on next page.
9745  break;
9746  }
9747  }
9748  }
9749 
9750  // All tests passed: Success.
9751  pAllocationRequest->offset = resultOffset;
9752  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9753  pAllocationRequest->sumItemSize = 0;
9754  // pAllocationRequest->item unused.
9755  pAllocationRequest->itemsToMakeLostCount = 0;
9756  return true;
9757  }
9758  }
9759  else // !upperAddress
9760  {
9761  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9762  {
9763  // Try to allocate at the end of 1st vector.
9764 
9765  VkDeviceSize resultBaseOffset = 0;
9766  if(!suballocations1st.empty())
9767  {
9768  const VmaSuballocation& lastSuballoc = suballocations1st.back();
9769  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9770  }
9771 
9772  // Start from offset equal to beginning of free space.
9773  VkDeviceSize resultOffset = resultBaseOffset;
9774 
9775  // Apply VMA_DEBUG_MARGIN at the beginning.
9776  if(VMA_DEBUG_MARGIN > 0)
9777  {
9778  resultOffset += VMA_DEBUG_MARGIN;
9779  }
9780 
9781  // Apply alignment.
9782  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9783 
9784  // Check previous suballocations for BufferImageGranularity conflicts.
9785  // Make bigger alignment if necessary.
9786  if(bufferImageGranularity > 1 && !suballocations1st.empty())
9787  {
9788  bool bufferImageGranularityConflict = false;
9789  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9790  {
9791  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9792  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9793  {
9794  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9795  {
9796  bufferImageGranularityConflict = true;
9797  break;
9798  }
9799  }
9800  else
9801  // Already on previous page.
9802  break;
9803  }
9804  if(bufferImageGranularityConflict)
9805  {
9806  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9807  }
9808  }
9809 
9810  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9811  suballocations2nd.back().offset : size;
9812 
9813  // There is enough free space at the end after alignment.
9814  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9815  {
9816  // Check next suballocations for BufferImageGranularity conflicts.
9817  // If conflict exists, allocation cannot be made here.
9818  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9819  {
9820  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9821  {
9822  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9823  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9824  {
9825  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9826  {
9827  return false;
9828  }
9829  }
9830  else
9831  {
9832  // Already on previous page.
9833  break;
9834  }
9835  }
9836  }
9837 
9838  // All tests passed: Success.
9839  pAllocationRequest->offset = resultOffset;
9840  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
9841  pAllocationRequest->sumItemSize = 0;
9842  // pAllocationRequest->item unused.
9843  pAllocationRequest->itemsToMakeLostCount = 0;
9844  return true;
9845  }
9846  }
9847 
9848  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9849  // beginning of 1st vector as the end of free space.
9850  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9851  {
9852  VMA_ASSERT(!suballocations1st.empty());
9853 
9854  VkDeviceSize resultBaseOffset = 0;
9855  if(!suballocations2nd.empty())
9856  {
9857  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9858  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9859  }
9860 
9861  // Start from offset equal to beginning of free space.
9862  VkDeviceSize resultOffset = resultBaseOffset;
9863 
9864  // Apply VMA_DEBUG_MARGIN at the beginning.
9865  if(VMA_DEBUG_MARGIN > 0)
9866  {
9867  resultOffset += VMA_DEBUG_MARGIN;
9868  }
9869 
9870  // Apply alignment.
9871  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9872 
9873  // Check previous suballocations for BufferImageGranularity conflicts.
9874  // Make bigger alignment if necessary.
9875  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9876  {
9877  bool bufferImageGranularityConflict = false;
9878  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
9879  {
9880  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
9881  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9882  {
9883  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9884  {
9885  bufferImageGranularityConflict = true;
9886  break;
9887  }
9888  }
9889  else
9890  // Already on previous page.
9891  break;
9892  }
9893  if(bufferImageGranularityConflict)
9894  {
9895  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9896  }
9897  }
9898 
9899  pAllocationRequest->itemsToMakeLostCount = 0;
9900  pAllocationRequest->sumItemSize = 0;
9901  size_t index1st = m_1stNullItemsBeginCount;
9902 
9903  if(canMakeOtherLost)
9904  {
9905  while(index1st < suballocations1st.size() &&
9906  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
9907  {
9908  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
9909  const VmaSuballocation& suballoc = suballocations1st[index1st];
9910  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
9911  {
9912  // No problem.
9913  }
9914  else
9915  {
9916  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9917  if(suballoc.hAllocation->CanBecomeLost() &&
9918  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9919  {
9920  ++pAllocationRequest->itemsToMakeLostCount;
9921  pAllocationRequest->sumItemSize += suballoc.size;
9922  }
9923  else
9924  {
9925  return false;
9926  }
9927  }
9928  ++index1st;
9929  }
9930 
9931  // Check next suballocations for BufferImageGranularity conflicts.
9932  // If conflict exists, we must mark more allocations lost or fail.
9933  if(bufferImageGranularity > 1)
9934  {
9935  while(index1st < suballocations1st.size())
9936  {
9937  const VmaSuballocation& suballoc = suballocations1st[index1st];
9938  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
9939  {
9940  if(suballoc.hAllocation != VK_NULL_HANDLE)
9941  {
9942  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
9943  if(suballoc.hAllocation->CanBecomeLost() &&
9944  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9945  {
9946  ++pAllocationRequest->itemsToMakeLostCount;
9947  pAllocationRequest->sumItemSize += suballoc.size;
9948  }
9949  else
9950  {
9951  return false;
9952  }
9953  }
9954  }
9955  else
9956  {
9957  // Already on next page.
9958  break;
9959  }
9960  ++index1st;
9961  }
9962  }
9963  }
9964 
9965  // There is enough free space at the end after alignment.
9966  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
9967  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
9968  {
9969  // Check next suballocations for BufferImageGranularity conflicts.
9970  // If conflict exists, allocation cannot be made here.
9971  if(bufferImageGranularity > 1)
9972  {
9973  for(size_t nextSuballocIndex = index1st;
9974  nextSuballocIndex < suballocations1st.size();
9975  nextSuballocIndex++)
9976  {
9977  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
9978  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9979  {
9980  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9981  {
9982  return false;
9983  }
9984  }
9985  else
9986  {
9987  // Already on next page.
9988  break;
9989  }
9990  }
9991  }
9992 
9993  // All tests passed: Success.
9994  pAllocationRequest->offset = resultOffset;
9995  pAllocationRequest->sumFreeSize =
9996  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
9997  - resultBaseOffset
9998  - pAllocationRequest->sumItemSize;
9999  // pAllocationRequest->item unused.
10000  return true;
10001  }
10002  }
10003  }
10004 
10005  return false;
10006 }
10007 
10008 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10009  uint32_t currentFrameIndex,
10010  uint32_t frameInUseCount,
10011  VmaAllocationRequest* pAllocationRequest)
10012 {
10013  if(pAllocationRequest->itemsToMakeLostCount == 0)
10014  {
10015  return true;
10016  }
10017 
10018  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10019 
10020  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10021  size_t index1st = m_1stNullItemsBeginCount;
10022  size_t madeLostCount = 0;
10023  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10024  {
10025  VMA_ASSERT(index1st < suballocations1st.size());
10026  VmaSuballocation& suballoc = suballocations1st[index1st];
10027  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10028  {
10029  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10030  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10031  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10032  {
10033  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10034  suballoc.hAllocation = VK_NULL_HANDLE;
10035  m_SumFreeSize += suballoc.size;
10036  ++m_1stNullItemsMiddleCount;
10037  ++madeLostCount;
10038  }
10039  else
10040  {
10041  return false;
10042  }
10043  }
10044  ++index1st;
10045  }
10046 
10047  CleanupAfterFree();
10048  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10049 
10050  return true;
10051 }
10052 
10053 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10054 {
10055  uint32_t lostAllocationCount = 0;
10056 
10057  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10058  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10059  {
10060  VmaSuballocation& suballoc = suballocations1st[i];
10061  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10062  suballoc.hAllocation->CanBecomeLost() &&
10063  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10064  {
10065  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10066  suballoc.hAllocation = VK_NULL_HANDLE;
10067  ++m_1stNullItemsMiddleCount;
10068  m_SumFreeSize += suballoc.size;
10069  ++lostAllocationCount;
10070  }
10071  }
10072 
10073  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10074  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10075  {
10076  VmaSuballocation& suballoc = suballocations2nd[i];
10077  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10078  suballoc.hAllocation->CanBecomeLost() &&
10079  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10080  {
10081  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10082  suballoc.hAllocation = VK_NULL_HANDLE;
10083  ++m_2ndNullItemsCount;
10084  ++lostAllocationCount;
10085  }
10086  }
10087 
10088  if(lostAllocationCount)
10089  {
10090  CleanupAfterFree();
10091  }
10092 
10093  return lostAllocationCount;
10094 }
10095 
10096 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10097 {
10098  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10099  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10100  {
10101  const VmaSuballocation& suballoc = suballocations1st[i];
10102  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10103  {
10104  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10105  {
10106  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10107  return VK_ERROR_VALIDATION_FAILED_EXT;
10108  }
10109  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10110  {
10111  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10112  return VK_ERROR_VALIDATION_FAILED_EXT;
10113  }
10114  }
10115  }
10116 
10117  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10118  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10119  {
10120  const VmaSuballocation& suballoc = suballocations2nd[i];
10121  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10122  {
10123  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10124  {
10125  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10126  return VK_ERROR_VALIDATION_FAILED_EXT;
10127  }
10128  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10129  {
10130  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10131  return VK_ERROR_VALIDATION_FAILED_EXT;
10132  }
10133  }
10134  }
10135 
10136  return VK_SUCCESS;
10137 }
10138 
10139 void VmaBlockMetadata_Linear::Alloc(
10140  const VmaAllocationRequest& request,
10141  VmaSuballocationType type,
10142  VkDeviceSize allocSize,
10143  bool upperAddress,
10144  VmaAllocation hAllocation)
10145 {
10146  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10147 
10148  if(upperAddress)
10149  {
10150  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10151  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10152  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10153  suballocations2nd.push_back(newSuballoc);
10154  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10155  }
10156  else
10157  {
10158  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10159 
10160  // First allocation.
10161  if(suballocations1st.empty())
10162  {
10163  suballocations1st.push_back(newSuballoc);
10164  }
10165  else
10166  {
10167  // New allocation at the end of 1st vector.
10168  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
10169  {
10170  // Check if it fits before the end of the block.
10171  VMA_ASSERT(request.offset + allocSize <= GetSize());
10172  suballocations1st.push_back(newSuballoc);
10173  }
10174  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10175  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
10176  {
10177  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10178 
10179  switch(m_2ndVectorMode)
10180  {
10181  case SECOND_VECTOR_EMPTY:
10182  // First allocation from second part ring buffer.
10183  VMA_ASSERT(suballocations2nd.empty());
10184  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10185  break;
10186  case SECOND_VECTOR_RING_BUFFER:
10187  // 2-part ring buffer is already started.
10188  VMA_ASSERT(!suballocations2nd.empty());
10189  break;
10190  case SECOND_VECTOR_DOUBLE_STACK:
10191  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10192  break;
10193  default:
10194  VMA_ASSERT(0);
10195  }
10196 
10197  suballocations2nd.push_back(newSuballoc);
10198  }
10199  else
10200  {
10201  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10202  }
10203  }
10204  }
10205 
10206  m_SumFreeSize -= newSuballoc.size;
10207 }
10208 
10209 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10210 {
10211  FreeAtOffset(allocation->GetOffset());
10212 }
10213 
10214 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10215 {
10216  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10217  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10218 
10219  if(!suballocations1st.empty())
10220  {
10221  // First allocation: Mark it as next empty at the beginning.
10222  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10223  if(firstSuballoc.offset == offset)
10224  {
10225  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10226  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10227  m_SumFreeSize += firstSuballoc.size;
10228  ++m_1stNullItemsBeginCount;
10229  CleanupAfterFree();
10230  return;
10231  }
10232  }
10233 
10234  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10235  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10236  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10237  {
10238  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10239  if(lastSuballoc.offset == offset)
10240  {
10241  m_SumFreeSize += lastSuballoc.size;
10242  suballocations2nd.pop_back();
10243  CleanupAfterFree();
10244  return;
10245  }
10246  }
10247  // Last allocation in 1st vector.
10248  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10249  {
10250  VmaSuballocation& lastSuballoc = suballocations1st.back();
10251  if(lastSuballoc.offset == offset)
10252  {
10253  m_SumFreeSize += lastSuballoc.size;
10254  suballocations1st.pop_back();
10255  CleanupAfterFree();
10256  return;
10257  }
10258  }
10259 
10260  // Item from the middle of 1st vector.
10261  {
10262  VmaSuballocation refSuballoc;
10263  refSuballoc.offset = offset;
10264  // Rest of members stays uninitialized intentionally for better performance.
10265  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
10266  suballocations1st.begin() + m_1stNullItemsBeginCount,
10267  suballocations1st.end(),
10268  refSuballoc);
10269  if(it != suballocations1st.end())
10270  {
10271  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10272  it->hAllocation = VK_NULL_HANDLE;
10273  ++m_1stNullItemsMiddleCount;
10274  m_SumFreeSize += it->size;
10275  CleanupAfterFree();
10276  return;
10277  }
10278  }
10279 
10280  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10281  {
10282  // Item from the middle of 2nd vector.
10283  VmaSuballocation refSuballoc;
10284  refSuballoc.offset = offset;
10285  // Rest of members stays uninitialized intentionally for better performance.
10286  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10287  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
10288  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
10289  if(it != suballocations2nd.end())
10290  {
10291  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10292  it->hAllocation = VK_NULL_HANDLE;
10293  ++m_2ndNullItemsCount;
10294  m_SumFreeSize += it->size;
10295  CleanupAfterFree();
10296  return;
10297  }
10298  }
10299 
10300  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10301 }
10302 
10303 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10304 {
10305  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10306  const size_t suballocCount = AccessSuballocations1st().size();
10307  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10308 }
10309 
10310 void VmaBlockMetadata_Linear::CleanupAfterFree()
10311 {
10312  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10313  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10314 
10315  if(IsEmpty())
10316  {
10317  suballocations1st.clear();
10318  suballocations2nd.clear();
10319  m_1stNullItemsBeginCount = 0;
10320  m_1stNullItemsMiddleCount = 0;
10321  m_2ndNullItemsCount = 0;
10322  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10323  }
10324  else
10325  {
10326  const size_t suballoc1stCount = suballocations1st.size();
10327  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10328  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10329 
10330  // Find more null items at the beginning of 1st vector.
10331  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10332  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10333  {
10334  ++m_1stNullItemsBeginCount;
10335  --m_1stNullItemsMiddleCount;
10336  }
10337 
10338  // Find more null items at the end of 1st vector.
10339  while(m_1stNullItemsMiddleCount > 0 &&
10340  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10341  {
10342  --m_1stNullItemsMiddleCount;
10343  suballocations1st.pop_back();
10344  }
10345 
10346  // Find more null items at the end of 2nd vector.
10347  while(m_2ndNullItemsCount > 0 &&
10348  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10349  {
10350  --m_2ndNullItemsCount;
10351  suballocations2nd.pop_back();
10352  }
10353 
10354  if(ShouldCompact1st())
10355  {
10356  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10357  size_t srcIndex = m_1stNullItemsBeginCount;
10358  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10359  {
10360  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10361  {
10362  ++srcIndex;
10363  }
10364  if(dstIndex != srcIndex)
10365  {
10366  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10367  }
10368  ++srcIndex;
10369  }
10370  suballocations1st.resize(nonNullItemCount);
10371  m_1stNullItemsBeginCount = 0;
10372  m_1stNullItemsMiddleCount = 0;
10373  }
10374 
10375  // 2nd vector became empty.
10376  if(suballocations2nd.empty())
10377  {
10378  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10379  }
10380 
10381  // 1st vector became empty.
10382  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10383  {
10384  suballocations1st.clear();
10385  m_1stNullItemsBeginCount = 0;
10386 
10387  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10388  {
10389  // Swap 1st with 2nd. Now 2nd is empty.
10390  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10391  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10392  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10393  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10394  {
10395  ++m_1stNullItemsBeginCount;
10396  --m_1stNullItemsMiddleCount;
10397  }
10398  m_2ndNullItemsCount = 0;
10399  m_1stVectorIndex ^= 1;
10400  }
10401  }
10402  }
10403 
10404  VMA_HEAVY_ASSERT(Validate());
10405 }
10406 
10407 
10409 // class VmaBlockMetadata_Buddy
10410 
10411 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10412  VmaBlockMetadata(hAllocator),
10413  m_Root(VMA_NULL),
10414  m_AllocationCount(0),
10415  m_FreeCount(1),
10416  m_SumFreeSize(0)
10417 {
10418  memset(m_FreeList, 0, sizeof(m_FreeList));
10419 }
10420 
10421 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10422 {
10423  DeleteNode(m_Root);
10424 }
10425 
10426 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10427 {
10428  VmaBlockMetadata::Init(size);
10429 
10430  m_UsableSize = VmaPrevPow2(size);
10431  m_SumFreeSize = m_UsableSize;
10432 
10433  // Calculate m_LevelCount.
10434  m_LevelCount = 1;
10435  while(m_LevelCount < MAX_LEVELS &&
10436  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10437  {
10438  ++m_LevelCount;
10439  }
10440 
10441  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10442  rootNode->offset = 0;
10443  rootNode->type = Node::TYPE_FREE;
10444  rootNode->parent = VMA_NULL;
10445  rootNode->buddy = VMA_NULL;
10446 
10447  m_Root = rootNode;
10448  AddToFreeListFront(0, rootNode);
10449 }
10450 
10451 bool VmaBlockMetadata_Buddy::Validate() const
10452 {
10453  // Validate tree.
10454  ValidationContext ctx;
10455  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10456  {
10457  VMA_VALIDATE(false && "ValidateNode failed.");
10458  }
10459  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10460  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10461 
10462  // Validate free node lists.
10463  for(uint32_t level = 0; level < m_LevelCount; ++level)
10464  {
10465  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10466  m_FreeList[level].front->free.prev == VMA_NULL);
10467 
10468  for(Node* node = m_FreeList[level].front;
10469  node != VMA_NULL;
10470  node = node->free.next)
10471  {
10472  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10473 
10474  if(node->free.next == VMA_NULL)
10475  {
10476  VMA_VALIDATE(m_FreeList[level].back == node);
10477  }
10478  else
10479  {
10480  VMA_VALIDATE(node->free.next->free.prev == node);
10481  }
10482  }
10483  }
10484 
10485  // Validate that free lists ar higher levels are empty.
10486  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10487  {
10488  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10489  }
10490 
10491  return true;
10492 }
10493 
10494 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10495 {
10496  for(uint32_t level = 0; level < m_LevelCount; ++level)
10497  {
10498  if(m_FreeList[level].front != VMA_NULL)
10499  {
10500  return LevelToNodeSize(level);
10501  }
10502  }
10503  return 0;
10504 }
10505 
10506 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10507 {
10508  const VkDeviceSize unusableSize = GetUnusableSize();
10509 
10510  outInfo.blockCount = 1;
10511 
10512  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10513  outInfo.usedBytes = outInfo.unusedBytes = 0;
10514 
10515  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10516  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10517  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10518 
10519  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10520 
10521  if(unusableSize > 0)
10522  {
10523  ++outInfo.unusedRangeCount;
10524  outInfo.unusedBytes += unusableSize;
10525  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10526  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10527  }
10528 }
10529 
10530 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10531 {
10532  const VkDeviceSize unusableSize = GetUnusableSize();
10533 
10534  inoutStats.size += GetSize();
10535  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10536  inoutStats.allocationCount += m_AllocationCount;
10537  inoutStats.unusedRangeCount += m_FreeCount;
10538  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10539 
10540  if(unusableSize > 0)
10541  {
10542  ++inoutStats.unusedRangeCount;
10543  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10544  }
10545 }
10546 
10547 #if VMA_STATS_STRING_ENABLED
10548 
10549 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10550 {
10551  // TODO optimize
10552  VmaStatInfo stat;
10553  CalcAllocationStatInfo(stat);
10554 
10555  PrintDetailedMap_Begin(
10556  json,
10557  stat.unusedBytes,
10558  stat.allocationCount,
10559  stat.unusedRangeCount);
10560 
10561  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10562 
10563  const VkDeviceSize unusableSize = GetUnusableSize();
10564  if(unusableSize > 0)
10565  {
10566  PrintDetailedMap_UnusedRange(json,
10567  m_UsableSize, // offset
10568  unusableSize); // size
10569  }
10570 
10571  PrintDetailedMap_End(json);
10572 }
10573 
10574 #endif // #if VMA_STATS_STRING_ENABLED
10575 
10576 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10577  uint32_t currentFrameIndex,
10578  uint32_t frameInUseCount,
10579  VkDeviceSize bufferImageGranularity,
10580  VkDeviceSize allocSize,
10581  VkDeviceSize allocAlignment,
10582  bool upperAddress,
10583  VmaSuballocationType allocType,
10584  bool canMakeOtherLost,
10585  uint32_t strategy,
10586  VmaAllocationRequest* pAllocationRequest)
10587 {
10588  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10589 
10590  // Simple way to respect bufferImageGranularity. May be optimized some day.
10591  // Whenever it might be an OPTIMAL image...
10592  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10593  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10594  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10595  {
10596  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10597  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10598  }
10599 
10600  if(allocSize > m_UsableSize)
10601  {
10602  return false;
10603  }
10604 
10605  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10606  for(uint32_t level = targetLevel + 1; level--; )
10607  {
10608  for(Node* freeNode = m_FreeList[level].front;
10609  freeNode != VMA_NULL;
10610  freeNode = freeNode->free.next)
10611  {
10612  if(freeNode->offset % allocAlignment == 0)
10613  {
10614  pAllocationRequest->offset = freeNode->offset;
10615  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10616  pAllocationRequest->sumItemSize = 0;
10617  pAllocationRequest->itemsToMakeLostCount = 0;
10618  pAllocationRequest->customData = (void*)(uintptr_t)level;
10619  return true;
10620  }
10621  }
10622  }
10623 
10624  return false;
10625 }
10626 
10627 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10628  uint32_t currentFrameIndex,
10629  uint32_t frameInUseCount,
10630  VmaAllocationRequest* pAllocationRequest)
10631 {
10632  /*
10633  Lost allocations are not supported in buddy allocator at the moment.
10634  Support might be added in the future.
10635  */
10636  return pAllocationRequest->itemsToMakeLostCount == 0;
10637 }
10638 
10639 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10640 {
10641  /*
10642  Lost allocations are not supported in buddy allocator at the moment.
10643  Support might be added in the future.
10644  */
10645  return 0;
10646 }
10647 
10648 void VmaBlockMetadata_Buddy::Alloc(
10649  const VmaAllocationRequest& request,
10650  VmaSuballocationType type,
10651  VkDeviceSize allocSize,
10652  bool upperAddress,
10653  VmaAllocation hAllocation)
10654 {
10655  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10656  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10657 
10658  Node* currNode = m_FreeList[currLevel].front;
10659  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10660  while(currNode->offset != request.offset)
10661  {
10662  currNode = currNode->free.next;
10663  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10664  }
10665 
10666  // Go down, splitting free nodes.
10667  while(currLevel < targetLevel)
10668  {
10669  // currNode is already first free node at currLevel.
10670  // Remove it from list of free nodes at this currLevel.
10671  RemoveFromFreeList(currLevel, currNode);
10672 
10673  const uint32_t childrenLevel = currLevel + 1;
10674 
10675  // Create two free sub-nodes.
10676  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10677  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10678 
10679  leftChild->offset = currNode->offset;
10680  leftChild->type = Node::TYPE_FREE;
10681  leftChild->parent = currNode;
10682  leftChild->buddy = rightChild;
10683 
10684  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10685  rightChild->type = Node::TYPE_FREE;
10686  rightChild->parent = currNode;
10687  rightChild->buddy = leftChild;
10688 
10689  // Convert current currNode to split type.
10690  currNode->type = Node::TYPE_SPLIT;
10691  currNode->split.leftChild = leftChild;
10692 
10693  // Add child nodes to free list. Order is important!
10694  AddToFreeListFront(childrenLevel, rightChild);
10695  AddToFreeListFront(childrenLevel, leftChild);
10696 
10697  ++m_FreeCount;
10698  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
10699  ++currLevel;
10700  currNode = m_FreeList[currLevel].front;
10701 
10702  /*
10703  We can be sure that currNode, as left child of node previously split,
10704  also fullfills the alignment requirement.
10705  */
10706  }
10707 
10708  // Remove from free list.
10709  VMA_ASSERT(currLevel == targetLevel &&
10710  currNode != VMA_NULL &&
10711  currNode->type == Node::TYPE_FREE);
10712  RemoveFromFreeList(currLevel, currNode);
10713 
10714  // Convert to allocation node.
10715  currNode->type = Node::TYPE_ALLOCATION;
10716  currNode->allocation.alloc = hAllocation;
10717 
10718  ++m_AllocationCount;
10719  --m_FreeCount;
10720  m_SumFreeSize -= allocSize;
10721 }
10722 
10723 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10724 {
10725  if(node->type == Node::TYPE_SPLIT)
10726  {
10727  DeleteNode(node->split.leftChild->buddy);
10728  DeleteNode(node->split.leftChild);
10729  }
10730 
10731  vma_delete(GetAllocationCallbacks(), node);
10732 }
10733 
10734 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
10735 {
10736  VMA_VALIDATE(level < m_LevelCount);
10737  VMA_VALIDATE(curr->parent == parent);
10738  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10739  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10740  switch(curr->type)
10741  {
10742  case Node::TYPE_FREE:
10743  // curr->free.prev, next are validated separately.
10744  ctx.calculatedSumFreeSize += levelNodeSize;
10745  ++ctx.calculatedFreeCount;
10746  break;
10747  case Node::TYPE_ALLOCATION:
10748  ++ctx.calculatedAllocationCount;
10749  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10750  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10751  break;
10752  case Node::TYPE_SPLIT:
10753  {
10754  const uint32_t childrenLevel = level + 1;
10755  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10756  const Node* const leftChild = curr->split.leftChild;
10757  VMA_VALIDATE(leftChild != VMA_NULL);
10758  VMA_VALIDATE(leftChild->offset == curr->offset);
10759  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10760  {
10761  VMA_VALIDATE(false && "ValidateNode for left child failed.");
10762  }
10763  const Node* const rightChild = leftChild->buddy;
10764  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10765  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10766  {
10767  VMA_VALIDATE(false && "ValidateNode for right child failed.");
10768  }
10769  }
10770  break;
10771  default:
10772  return false;
10773  }
10774 
10775  return true;
10776 }
10777 
10778 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
10779 {
10780  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
10781  uint32_t level = 0;
10782  VkDeviceSize currLevelNodeSize = m_UsableSize;
10783  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10784  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10785  {
10786  ++level;
10787  currLevelNodeSize = nextLevelNodeSize;
10788  nextLevelNodeSize = currLevelNodeSize >> 1;
10789  }
10790  return level;
10791 }
10792 
10793 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
10794 {
10795  // Find node and level.
10796  Node* node = m_Root;
10797  VkDeviceSize nodeOffset = 0;
10798  uint32_t level = 0;
10799  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
10800  while(node->type == Node::TYPE_SPLIT)
10801  {
10802  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
10803  if(offset < nodeOffset + nextLevelSize)
10804  {
10805  node = node->split.leftChild;
10806  }
10807  else
10808  {
10809  node = node->split.leftChild->buddy;
10810  nodeOffset += nextLevelSize;
10811  }
10812  ++level;
10813  levelNodeSize = nextLevelSize;
10814  }
10815 
10816  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
10817  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
10818 
10819  ++m_FreeCount;
10820  --m_AllocationCount;
10821  m_SumFreeSize += alloc->GetSize();
10822 
10823  node->type = Node::TYPE_FREE;
10824 
10825  // Join free nodes if possible.
10826  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
10827  {
10828  RemoveFromFreeList(level, node->buddy);
10829  Node* const parent = node->parent;
10830 
10831  vma_delete(GetAllocationCallbacks(), node->buddy);
10832  vma_delete(GetAllocationCallbacks(), node);
10833  parent->type = Node::TYPE_FREE;
10834 
10835  node = parent;
10836  --level;
10837  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
10838  --m_FreeCount;
10839  }
10840 
10841  AddToFreeListFront(level, node);
10842 }
10843 
10844 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
10845 {
10846  switch(node->type)
10847  {
10848  case Node::TYPE_FREE:
10849  ++outInfo.unusedRangeCount;
10850  outInfo.unusedBytes += levelNodeSize;
10851  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
10852  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
10853  break;
10854  case Node::TYPE_ALLOCATION:
10855  {
10856  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10857  ++outInfo.allocationCount;
10858  outInfo.usedBytes += allocSize;
10859  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
10860  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
10861 
10862  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
10863  if(unusedRangeSize > 0)
10864  {
10865  ++outInfo.unusedRangeCount;
10866  outInfo.unusedBytes += unusedRangeSize;
10867  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
10868  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
10869  }
10870  }
10871  break;
10872  case Node::TYPE_SPLIT:
10873  {
10874  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10875  const Node* const leftChild = node->split.leftChild;
10876  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
10877  const Node* const rightChild = leftChild->buddy;
10878  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
10879  }
10880  break;
10881  default:
10882  VMA_ASSERT(0);
10883  }
10884 }
10885 
10886 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
10887 {
10888  VMA_ASSERT(node->type == Node::TYPE_FREE);
10889 
10890  // List is empty.
10891  Node* const frontNode = m_FreeList[level].front;
10892  if(frontNode == VMA_NULL)
10893  {
10894  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
10895  node->free.prev = node->free.next = VMA_NULL;
10896  m_FreeList[level].front = m_FreeList[level].back = node;
10897  }
10898  else
10899  {
10900  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
10901  node->free.prev = VMA_NULL;
10902  node->free.next = frontNode;
10903  frontNode->free.prev = node;
10904  m_FreeList[level].front = node;
10905  }
10906 }
10907 
10908 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
10909 {
10910  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
10911 
10912  // It is at the front.
10913  if(node->free.prev == VMA_NULL)
10914  {
10915  VMA_ASSERT(m_FreeList[level].front == node);
10916  m_FreeList[level].front = node->free.next;
10917  }
10918  else
10919  {
10920  Node* const prevFreeNode = node->free.prev;
10921  VMA_ASSERT(prevFreeNode->free.next == node);
10922  prevFreeNode->free.next = node->free.next;
10923  }
10924 
10925  // It is at the back.
10926  if(node->free.next == VMA_NULL)
10927  {
10928  VMA_ASSERT(m_FreeList[level].back == node);
10929  m_FreeList[level].back = node->free.prev;
10930  }
10931  else
10932  {
10933  Node* const nextFreeNode = node->free.next;
10934  VMA_ASSERT(nextFreeNode->free.prev == node);
10935  nextFreeNode->free.prev = node->free.prev;
10936  }
10937 }
10938 
10939 #if VMA_STATS_STRING_ENABLED
10940 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
10941 {
10942  switch(node->type)
10943  {
10944  case Node::TYPE_FREE:
10945  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
10946  break;
10947  case Node::TYPE_ALLOCATION:
10948  {
10949  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
10950  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10951  if(allocSize < levelNodeSize)
10952  {
10953  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
10954  }
10955  }
10956  break;
10957  case Node::TYPE_SPLIT:
10958  {
10959  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10960  const Node* const leftChild = node->split.leftChild;
10961  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
10962  const Node* const rightChild = leftChild->buddy;
10963  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
10964  }
10965  break;
10966  default:
10967  VMA_ASSERT(0);
10968  }
10969 }
10970 #endif // #if VMA_STATS_STRING_ENABLED
10971 
10972 
10974 // class VmaDeviceMemoryBlock
10975 
10976 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
10977  m_pMetadata(VMA_NULL),
10978  m_MemoryTypeIndex(UINT32_MAX),
10979  m_Id(0),
10980  m_hMemory(VK_NULL_HANDLE),
10981  m_MapCount(0),
10982  m_pMappedData(VMA_NULL)
10983 {
10984 }
10985 
10986 void VmaDeviceMemoryBlock::Init(
10987  VmaAllocator hAllocator,
10988  uint32_t newMemoryTypeIndex,
10989  VkDeviceMemory newMemory,
10990  VkDeviceSize newSize,
10991  uint32_t id,
10992  uint32_t algorithm)
10993 {
10994  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
10995 
10996  m_MemoryTypeIndex = newMemoryTypeIndex;
10997  m_Id = id;
10998  m_hMemory = newMemory;
10999 
11000  switch(algorithm)
11001  {
11003  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
11004  break;
11006  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11007  break;
11008  default:
11009  VMA_ASSERT(0);
11010  // Fall-through.
11011  case 0:
11012  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11013  }
11014  m_pMetadata->Init(newSize);
11015 }
11016 
11017 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11018 {
11019  // This is the most important assert in the entire library.
11020  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11021  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11022 
11023  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11024  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11025  m_hMemory = VK_NULL_HANDLE;
11026 
11027  vma_delete(allocator, m_pMetadata);
11028  m_pMetadata = VMA_NULL;
11029 }
11030 
11031 bool VmaDeviceMemoryBlock::Validate() const
11032 {
11033  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11034  (m_pMetadata->GetSize() != 0));
11035 
11036  return m_pMetadata->Validate();
11037 }
11038 
11039 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11040 {
11041  void* pData = nullptr;
11042  VkResult res = Map(hAllocator, 1, &pData);
11043  if(res != VK_SUCCESS)
11044  {
11045  return res;
11046  }
11047 
11048  res = m_pMetadata->CheckCorruption(pData);
11049 
11050  Unmap(hAllocator, 1);
11051 
11052  return res;
11053 }
11054 
11055 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11056 {
11057  if(count == 0)
11058  {
11059  return VK_SUCCESS;
11060  }
11061 
11062  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11063  if(m_MapCount != 0)
11064  {
11065  m_MapCount += count;
11066  VMA_ASSERT(m_pMappedData != VMA_NULL);
11067  if(ppData != VMA_NULL)
11068  {
11069  *ppData = m_pMappedData;
11070  }
11071  return VK_SUCCESS;
11072  }
11073  else
11074  {
11075  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11076  hAllocator->m_hDevice,
11077  m_hMemory,
11078  0, // offset
11079  VK_WHOLE_SIZE,
11080  0, // flags
11081  &m_pMappedData);
11082  if(result == VK_SUCCESS)
11083  {
11084  if(ppData != VMA_NULL)
11085  {
11086  *ppData = m_pMappedData;
11087  }
11088  m_MapCount = count;
11089  }
11090  return result;
11091  }
11092 }
11093 
11094 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11095 {
11096  if(count == 0)
11097  {
11098  return;
11099  }
11100 
11101  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11102  if(m_MapCount >= count)
11103  {
11104  m_MapCount -= count;
11105  if(m_MapCount == 0)
11106  {
11107  m_pMappedData = VMA_NULL;
11108  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11109  }
11110  }
11111  else
11112  {
11113  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11114  }
11115 }
11116 
11117 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11118 {
11119  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11120  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11121 
11122  void* pData;
11123  VkResult res = Map(hAllocator, 1, &pData);
11124  if(res != VK_SUCCESS)
11125  {
11126  return res;
11127  }
11128 
11129  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11130  VmaWriteMagicValue(pData, allocOffset + allocSize);
11131 
11132  Unmap(hAllocator, 1);
11133 
11134  return VK_SUCCESS;
11135 }
11136 
11137 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11138 {
11139  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11140  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11141 
11142  void* pData;
11143  VkResult res = Map(hAllocator, 1, &pData);
11144  if(res != VK_SUCCESS)
11145  {
11146  return res;
11147  }
11148 
11149  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11150  {
11151  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11152  }
11153  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11154  {
11155  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11156  }
11157 
11158  Unmap(hAllocator, 1);
11159 
11160  return VK_SUCCESS;
11161 }
11162 
11163 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11164  const VmaAllocator hAllocator,
11165  const VmaAllocation hAllocation,
11166  VkBuffer hBuffer)
11167 {
11168  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11169  hAllocation->GetBlock() == this);
11170  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11171  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11172  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
11173  hAllocator->m_hDevice,
11174  hBuffer,
11175  m_hMemory,
11176  hAllocation->GetOffset());
11177 }
11178 
11179 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11180  const VmaAllocator hAllocator,
11181  const VmaAllocation hAllocation,
11182  VkImage hImage)
11183 {
11184  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11185  hAllocation->GetBlock() == this);
11186  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11187  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11188  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
11189  hAllocator->m_hDevice,
11190  hImage,
11191  m_hMemory,
11192  hAllocation->GetOffset());
11193 }
11194 
11195 static void InitStatInfo(VmaStatInfo& outInfo)
11196 {
11197  memset(&outInfo, 0, sizeof(outInfo));
11198  outInfo.allocationSizeMin = UINT64_MAX;
11199  outInfo.unusedRangeSizeMin = UINT64_MAX;
11200 }
11201 
11202 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11203 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11204 {
11205  inoutInfo.blockCount += srcInfo.blockCount;
11206  inoutInfo.allocationCount += srcInfo.allocationCount;
11207  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11208  inoutInfo.usedBytes += srcInfo.usedBytes;
11209  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11210  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11211  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11212  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11213  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11214 }
11215 
11216 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11217 {
11218  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11219  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11220  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11221  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11222 }
11223 
11224 VmaPool_T::VmaPool_T(
11225  VmaAllocator hAllocator,
11226  const VmaPoolCreateInfo& createInfo,
11227  VkDeviceSize preferredBlockSize) :
11228  m_BlockVector(
11229  hAllocator,
11230  createInfo.memoryTypeIndex,
11231  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11232  createInfo.minBlockCount,
11233  createInfo.maxBlockCount,
11234  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11235  createInfo.frameInUseCount,
11236  true, // isCustomPool
11237  createInfo.blockSize != 0, // explicitBlockSize
11238  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11239  m_Id(0)
11240 {
11241 }
11242 
11243 VmaPool_T::~VmaPool_T()
11244 {
11245 }
11246 
11247 #if VMA_STATS_STRING_ENABLED
11248 
11249 #endif // #if VMA_STATS_STRING_ENABLED
11250 
11251 VmaBlockVector::VmaBlockVector(
11252  VmaAllocator hAllocator,
11253  uint32_t memoryTypeIndex,
11254  VkDeviceSize preferredBlockSize,
11255  size_t minBlockCount,
11256  size_t maxBlockCount,
11257  VkDeviceSize bufferImageGranularity,
11258  uint32_t frameInUseCount,
11259  bool isCustomPool,
11260  bool explicitBlockSize,
11261  uint32_t algorithm) :
11262  m_hAllocator(hAllocator),
11263  m_MemoryTypeIndex(memoryTypeIndex),
11264  m_PreferredBlockSize(preferredBlockSize),
11265  m_MinBlockCount(minBlockCount),
11266  m_MaxBlockCount(maxBlockCount),
11267  m_BufferImageGranularity(bufferImageGranularity),
11268  m_FrameInUseCount(frameInUseCount),
11269  m_IsCustomPool(isCustomPool),
11270  m_ExplicitBlockSize(explicitBlockSize),
11271  m_Algorithm(algorithm),
11272  m_HasEmptyBlock(false),
11273  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11274  m_NextBlockId(0)
11275 {
11276 }
11277 
11278 VmaBlockVector::~VmaBlockVector()
11279 {
11280  for(size_t i = m_Blocks.size(); i--; )
11281  {
11282  m_Blocks[i]->Destroy(m_hAllocator);
11283  vma_delete(m_hAllocator, m_Blocks[i]);
11284  }
11285 }
11286 
11287 VkResult VmaBlockVector::CreateMinBlocks()
11288 {
11289  for(size_t i = 0; i < m_MinBlockCount; ++i)
11290  {
11291  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11292  if(res != VK_SUCCESS)
11293  {
11294  return res;
11295  }
11296  }
11297  return VK_SUCCESS;
11298 }
11299 
11300 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11301 {
11302  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11303 
11304  const size_t blockCount = m_Blocks.size();
11305 
11306  pStats->size = 0;
11307  pStats->unusedSize = 0;
11308  pStats->allocationCount = 0;
11309  pStats->unusedRangeCount = 0;
11310  pStats->unusedRangeSizeMax = 0;
11311  pStats->blockCount = blockCount;
11312 
11313  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11314  {
11315  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11316  VMA_ASSERT(pBlock);
11317  VMA_HEAVY_ASSERT(pBlock->Validate());
11318  pBlock->m_pMetadata->AddPoolStats(*pStats);
11319  }
11320 }
11321 
11322 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11323 {
11324  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11325  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11326  (VMA_DEBUG_MARGIN > 0) &&
11327  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11328 }
11329 
11330 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11331 
11332 VkResult VmaBlockVector::Allocate(
11333  VmaPool hCurrentPool,
11334  uint32_t currentFrameIndex,
11335  VkDeviceSize size,
11336  VkDeviceSize alignment,
11337  const VmaAllocationCreateInfo& createInfo,
11338  VmaSuballocationType suballocType,
11339  size_t allocationCount,
11340  VmaAllocation* pAllocations)
11341 {
11342  size_t allocIndex;
11343  VkResult res = VK_SUCCESS;
11344 
11345  {
11346  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11347  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11348  {
11349  res = AllocatePage(
11350  hCurrentPool,
11351  currentFrameIndex,
11352  size,
11353  alignment,
11354  createInfo,
11355  suballocType,
11356  pAllocations + allocIndex);
11357  if(res != VK_SUCCESS)
11358  {
11359  break;
11360  }
11361  }
11362  }
11363 
11364  if(res != VK_SUCCESS)
11365  {
11366  // Free all already created allocations.
11367  while(allocIndex--)
11368  {
11369  Free(pAllocations[allocIndex]);
11370  }
11371  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11372  }
11373 
11374  return res;
11375 }
11376 
11377 VkResult VmaBlockVector::AllocatePage(
11378  VmaPool hCurrentPool,
11379  uint32_t currentFrameIndex,
11380  VkDeviceSize size,
11381  VkDeviceSize alignment,
11382  const VmaAllocationCreateInfo& createInfo,
11383  VmaSuballocationType suballocType,
11384  VmaAllocation* pAllocation)
11385 {
11386  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11387  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11388  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11389  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11390  const bool canCreateNewBlock =
11391  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11392  (m_Blocks.size() < m_MaxBlockCount);
11393  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11394 
11395  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11396  // Which in turn is available only when maxBlockCount = 1.
11397  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11398  {
11399  canMakeOtherLost = false;
11400  }
11401 
11402  // Upper address can only be used with linear allocator and within single memory block.
11403  if(isUpperAddress &&
11404  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11405  {
11406  return VK_ERROR_FEATURE_NOT_PRESENT;
11407  }
11408 
11409  // Validate strategy.
11410  switch(strategy)
11411  {
11412  case 0:
11414  break;
11418  break;
11419  default:
11420  return VK_ERROR_FEATURE_NOT_PRESENT;
11421  }
11422 
11423  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11424  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11425  {
11426  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11427  }
11428 
11429  /*
11430  Under certain condition, this whole section can be skipped for optimization, so
11431  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11432  e.g. for custom pools with linear algorithm.
11433  */
11434  if(!canMakeOtherLost || canCreateNewBlock)
11435  {
11436  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11437  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11439 
11440  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11441  {
11442  // Use only last block.
11443  if(!m_Blocks.empty())
11444  {
11445  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11446  VMA_ASSERT(pCurrBlock);
11447  VkResult res = AllocateFromBlock(
11448  pCurrBlock,
11449  hCurrentPool,
11450  currentFrameIndex,
11451  size,
11452  alignment,
11453  allocFlagsCopy,
11454  createInfo.pUserData,
11455  suballocType,
11456  strategy,
11457  pAllocation);
11458  if(res == VK_SUCCESS)
11459  {
11460  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11461  return VK_SUCCESS;
11462  }
11463  }
11464  }
11465  else
11466  {
11468  {
11469  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11470  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11471  {
11472  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11473  VMA_ASSERT(pCurrBlock);
11474  VkResult res = AllocateFromBlock(
11475  pCurrBlock,
11476  hCurrentPool,
11477  currentFrameIndex,
11478  size,
11479  alignment,
11480  allocFlagsCopy,
11481  createInfo.pUserData,
11482  suballocType,
11483  strategy,
11484  pAllocation);
11485  if(res == VK_SUCCESS)
11486  {
11487  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11488  return VK_SUCCESS;
11489  }
11490  }
11491  }
11492  else // WORST_FIT, FIRST_FIT
11493  {
11494  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11495  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11496  {
11497  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11498  VMA_ASSERT(pCurrBlock);
11499  VkResult res = AllocateFromBlock(
11500  pCurrBlock,
11501  hCurrentPool,
11502  currentFrameIndex,
11503  size,
11504  alignment,
11505  allocFlagsCopy,
11506  createInfo.pUserData,
11507  suballocType,
11508  strategy,
11509  pAllocation);
11510  if(res == VK_SUCCESS)
11511  {
11512  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11513  return VK_SUCCESS;
11514  }
11515  }
11516  }
11517  }
11518 
11519  // 2. Try to create new block.
11520  if(canCreateNewBlock)
11521  {
11522  // Calculate optimal size for new block.
11523  VkDeviceSize newBlockSize = m_PreferredBlockSize;
11524  uint32_t newBlockSizeShift = 0;
11525  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11526 
11527  if(!m_ExplicitBlockSize)
11528  {
11529  // Allocate 1/8, 1/4, 1/2 as first blocks.
11530  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11531  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11532  {
11533  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11534  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11535  {
11536  newBlockSize = smallerNewBlockSize;
11537  ++newBlockSizeShift;
11538  }
11539  else
11540  {
11541  break;
11542  }
11543  }
11544  }
11545 
11546  size_t newBlockIndex = 0;
11547  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11548  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11549  if(!m_ExplicitBlockSize)
11550  {
11551  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11552  {
11553  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11554  if(smallerNewBlockSize >= size)
11555  {
11556  newBlockSize = smallerNewBlockSize;
11557  ++newBlockSizeShift;
11558  res = CreateBlock(newBlockSize, &newBlockIndex);
11559  }
11560  else
11561  {
11562  break;
11563  }
11564  }
11565  }
11566 
11567  if(res == VK_SUCCESS)
11568  {
11569  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11570  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11571 
11572  res = AllocateFromBlock(
11573  pBlock,
11574  hCurrentPool,
11575  currentFrameIndex,
11576  size,
11577  alignment,
11578  allocFlagsCopy,
11579  createInfo.pUserData,
11580  suballocType,
11581  strategy,
11582  pAllocation);
11583  if(res == VK_SUCCESS)
11584  {
11585  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
11586  return VK_SUCCESS;
11587  }
11588  else
11589  {
11590  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11591  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11592  }
11593  }
11594  }
11595  }
11596 
11597  // 3. Try to allocate from existing blocks with making other allocations lost.
11598  if(canMakeOtherLost)
11599  {
11600  uint32_t tryIndex = 0;
11601  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11602  {
11603  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11604  VmaAllocationRequest bestRequest = {};
11605  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11606 
11607  // 1. Search existing allocations.
11609  {
11610  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11611  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11612  {
11613  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11614  VMA_ASSERT(pCurrBlock);
11615  VmaAllocationRequest currRequest = {};
11616  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11617  currentFrameIndex,
11618  m_FrameInUseCount,
11619  m_BufferImageGranularity,
11620  size,
11621  alignment,
11622  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11623  suballocType,
11624  canMakeOtherLost,
11625  strategy,
11626  &currRequest))
11627  {
11628  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11629  if(pBestRequestBlock == VMA_NULL ||
11630  currRequestCost < bestRequestCost)
11631  {
11632  pBestRequestBlock = pCurrBlock;
11633  bestRequest = currRequest;
11634  bestRequestCost = currRequestCost;
11635 
11636  if(bestRequestCost == 0)
11637  {
11638  break;
11639  }
11640  }
11641  }
11642  }
11643  }
11644  else // WORST_FIT, FIRST_FIT
11645  {
11646  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11647  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11648  {
11649  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11650  VMA_ASSERT(pCurrBlock);
11651  VmaAllocationRequest currRequest = {};
11652  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11653  currentFrameIndex,
11654  m_FrameInUseCount,
11655  m_BufferImageGranularity,
11656  size,
11657  alignment,
11658  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11659  suballocType,
11660  canMakeOtherLost,
11661  strategy,
11662  &currRequest))
11663  {
11664  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11665  if(pBestRequestBlock == VMA_NULL ||
11666  currRequestCost < bestRequestCost ||
11668  {
11669  pBestRequestBlock = pCurrBlock;
11670  bestRequest = currRequest;
11671  bestRequestCost = currRequestCost;
11672 
11673  if(bestRequestCost == 0 ||
11675  {
11676  break;
11677  }
11678  }
11679  }
11680  }
11681  }
11682 
11683  if(pBestRequestBlock != VMA_NULL)
11684  {
11685  if(mapped)
11686  {
11687  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11688  if(res != VK_SUCCESS)
11689  {
11690  return res;
11691  }
11692  }
11693 
11694  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11695  currentFrameIndex,
11696  m_FrameInUseCount,
11697  &bestRequest))
11698  {
11699  // We no longer have an empty Allocation.
11700  if(pBestRequestBlock->m_pMetadata->IsEmpty())
11701  {
11702  m_HasEmptyBlock = false;
11703  }
11704  // Allocate from this pBlock.
11705  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11706  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
11707  (*pAllocation)->InitBlockAllocation(
11708  hCurrentPool,
11709  pBestRequestBlock,
11710  bestRequest.offset,
11711  alignment,
11712  size,
11713  suballocType,
11714  mapped,
11715  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11716  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11717  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
11718  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
11719  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11720  {
11721  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11722  }
11723  if(IsCorruptionDetectionEnabled())
11724  {
11725  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11726  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11727  }
11728  return VK_SUCCESS;
11729  }
11730  // else: Some allocations must have been touched while we are here. Next try.
11731  }
11732  else
11733  {
11734  // Could not find place in any of the blocks - break outer loop.
11735  break;
11736  }
11737  }
11738  /* Maximum number of tries exceeded - a very unlike event when many other
11739  threads are simultaneously touching allocations making it impossible to make
11740  lost at the same time as we try to allocate. */
11741  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11742  {
11743  return VK_ERROR_TOO_MANY_OBJECTS;
11744  }
11745  }
11746 
11747  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11748 }
11749 
11750 void VmaBlockVector::Free(
11751  VmaAllocation hAllocation)
11752 {
11753  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11754 
11755  // Scope for lock.
11756  {
11757  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11758 
11759  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11760 
11761  if(IsCorruptionDetectionEnabled())
11762  {
11763  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11764  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
11765  }
11766 
11767  if(hAllocation->IsPersistentMap())
11768  {
11769  pBlock->Unmap(m_hAllocator, 1);
11770  }
11771 
11772  pBlock->m_pMetadata->Free(hAllocation);
11773  VMA_HEAVY_ASSERT(pBlock->Validate());
11774 
11775  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
11776 
11777  // pBlock became empty after this deallocation.
11778  if(pBlock->m_pMetadata->IsEmpty())
11779  {
11780  // Already has empty Allocation. We don't want to have two, so delete this one.
11781  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11782  {
11783  pBlockToDelete = pBlock;
11784  Remove(pBlock);
11785  }
11786  // We now have first empty block.
11787  else
11788  {
11789  m_HasEmptyBlock = true;
11790  }
11791  }
11792  // pBlock didn't become empty, but we have another empty block - find and free that one.
11793  // (This is optional, heuristics.)
11794  else if(m_HasEmptyBlock)
11795  {
11796  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
11797  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
11798  {
11799  pBlockToDelete = pLastBlock;
11800  m_Blocks.pop_back();
11801  m_HasEmptyBlock = false;
11802  }
11803  }
11804 
11805  IncrementallySortBlocks();
11806  }
11807 
11808  // Destruction of a free Allocation. Deferred until this point, outside of mutex
11809  // lock, for performance reason.
11810  if(pBlockToDelete != VMA_NULL)
11811  {
11812  VMA_DEBUG_LOG(" Deleted empty allocation");
11813  pBlockToDelete->Destroy(m_hAllocator);
11814  vma_delete(m_hAllocator, pBlockToDelete);
11815  }
11816 }
11817 
11818 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
11819 {
11820  VkDeviceSize result = 0;
11821  for(size_t i = m_Blocks.size(); i--; )
11822  {
11823  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
11824  if(result >= m_PreferredBlockSize)
11825  {
11826  break;
11827  }
11828  }
11829  return result;
11830 }
11831 
11832 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
11833 {
11834  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11835  {
11836  if(m_Blocks[blockIndex] == pBlock)
11837  {
11838  VmaVectorRemove(m_Blocks, blockIndex);
11839  return;
11840  }
11841  }
11842  VMA_ASSERT(0);
11843 }
11844 
11845 void VmaBlockVector::IncrementallySortBlocks()
11846 {
11847  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11848  {
11849  // Bubble sort only until first swap.
11850  for(size_t i = 1; i < m_Blocks.size(); ++i)
11851  {
11852  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
11853  {
11854  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
11855  return;
11856  }
11857  }
11858  }
11859 }
11860 
11861 VkResult VmaBlockVector::AllocateFromBlock(
11862  VmaDeviceMemoryBlock* pBlock,
11863  VmaPool hCurrentPool,
11864  uint32_t currentFrameIndex,
11865  VkDeviceSize size,
11866  VkDeviceSize alignment,
11867  VmaAllocationCreateFlags allocFlags,
11868  void* pUserData,
11869  VmaSuballocationType suballocType,
11870  uint32_t strategy,
11871  VmaAllocation* pAllocation)
11872 {
11873  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
11874  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11875  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11876  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11877 
11878  VmaAllocationRequest currRequest = {};
11879  if(pBlock->m_pMetadata->CreateAllocationRequest(
11880  currentFrameIndex,
11881  m_FrameInUseCount,
11882  m_BufferImageGranularity,
11883  size,
11884  alignment,
11885  isUpperAddress,
11886  suballocType,
11887  false, // canMakeOtherLost
11888  strategy,
11889  &currRequest))
11890  {
11891  // Allocate from pCurrBlock.
11892  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
11893 
11894  if(mapped)
11895  {
11896  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
11897  if(res != VK_SUCCESS)
11898  {
11899  return res;
11900  }
11901  }
11902 
11903  // We no longer have an empty Allocation.
11904  if(pBlock->m_pMetadata->IsEmpty())
11905  {
11906  m_HasEmptyBlock = false;
11907  }
11908 
11909  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11910  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
11911  (*pAllocation)->InitBlockAllocation(
11912  hCurrentPool,
11913  pBlock,
11914  currRequest.offset,
11915  alignment,
11916  size,
11917  suballocType,
11918  mapped,
11919  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11920  VMA_HEAVY_ASSERT(pBlock->Validate());
11921  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
11922  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11923  {
11924  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11925  }
11926  if(IsCorruptionDetectionEnabled())
11927  {
11928  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
11929  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11930  }
11931  return VK_SUCCESS;
11932  }
11933  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11934 }
11935 
11936 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
11937 {
11938  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
11939  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
11940  allocInfo.allocationSize = blockSize;
11941  VkDeviceMemory mem = VK_NULL_HANDLE;
11942  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
11943  if(res < 0)
11944  {
11945  return res;
11946  }
11947 
11948  // New VkDeviceMemory successfully created.
11949 
11950  // Create new Allocation for it.
11951  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
11952  pBlock->Init(
11953  m_hAllocator,
11954  m_MemoryTypeIndex,
11955  mem,
11956  allocInfo.allocationSize,
11957  m_NextBlockId++,
11958  m_Algorithm);
11959 
11960  m_Blocks.push_back(pBlock);
11961  if(pNewBlockIndex != VMA_NULL)
11962  {
11963  *pNewBlockIndex = m_Blocks.size() - 1;
11964  }
11965 
11966  return VK_SUCCESS;
11967 }
11968 
11969 void VmaBlockVector::ApplyDefragmentationMovesCpu(
11970  class VmaBlockVectorDefragmentationContext* pDefragCtx,
11971  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
11972 {
11973  const size_t blockCount = m_Blocks.size();
11974  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
11975 
11976  enum BLOCK_FLAG
11977  {
11978  BLOCK_FLAG_USED = 0x00000001,
11979  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
11980  };
11981 
11982  struct BlockInfo
11983  {
11984  uint32_t flags;
11985  void* pMappedData;
11986  };
11987  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
11988  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
11989  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
11990 
11991  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
11992  const size_t moveCount = moves.size();
11993  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11994  {
11995  const VmaDefragmentationMove& move = moves[moveIndex];
11996  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
11997  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
11998  }
11999 
12000  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12001 
12002  // Go over all blocks. Get mapped pointer or map if necessary.
12003  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12004  {
12005  BlockInfo& currBlockInfo = blockInfo[blockIndex];
12006  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12007  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12008  {
12009  currBlockInfo.pMappedData = pBlock->GetMappedData();
12010  // It is not originally mapped - map it.
12011  if(currBlockInfo.pMappedData == VMA_NULL)
12012  {
12013  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12014  if(pDefragCtx->res == VK_SUCCESS)
12015  {
12016  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12017  }
12018  }
12019  }
12020  }
12021 
12022  // Go over all moves. Do actual data transfer.
12023  if(pDefragCtx->res == VK_SUCCESS)
12024  {
12025  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12026  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12027 
12028  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12029  {
12030  const VmaDefragmentationMove& move = moves[moveIndex];
12031 
12032  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12033  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12034 
12035  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12036 
12037  // Invalidate source.
12038  if(isNonCoherent)
12039  {
12040  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12041  memRange.memory = pSrcBlock->GetDeviceMemory();
12042  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12043  memRange.size = VMA_MIN(
12044  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12045  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12046  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12047  }
12048 
12049  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12050  memmove(
12051  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12052  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12053  static_cast<size_t>(move.size));
12054 
12055  if(IsCorruptionDetectionEnabled())
12056  {
12057  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12058  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12059  }
12060 
12061  // Flush destination.
12062  if(isNonCoherent)
12063  {
12064  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12065  memRange.memory = pDstBlock->GetDeviceMemory();
12066  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12067  memRange.size = VMA_MIN(
12068  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12069  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12070  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12071  }
12072  }
12073  }
12074 
12075  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12076  // Regardless of pCtx->res == VK_SUCCESS.
12077  for(size_t blockIndex = blockCount; blockIndex--; )
12078  {
12079  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12080  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12081  {
12082  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12083  pBlock->Unmap(m_hAllocator, 1);
12084  }
12085  }
12086 }
12087 
12088 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12089  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12090  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12091  VkCommandBuffer commandBuffer)
12092 {
12093  const size_t blockCount = m_Blocks.size();
12094 
12095  pDefragCtx->blockContexts.resize(blockCount);
12096  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12097 
12098  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12099  const size_t moveCount = moves.size();
12100  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12101  {
12102  const VmaDefragmentationMove& move = moves[moveIndex];
12103  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12104  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12105  }
12106 
12107  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12108 
12109  // Go over all blocks. Create and bind buffer for whole block if necessary.
12110  {
12111  VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
12112  bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
12113  VK_BUFFER_USAGE_TRANSFER_DST_BIT;
12114 
12115  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12116  {
12117  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12118  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12119  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12120  {
12121  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12122  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12123  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12124  if(pDefragCtx->res == VK_SUCCESS)
12125  {
12126  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12127  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12128  }
12129  }
12130  }
12131  }
12132 
12133  // Go over all moves. Post data transfer commands to command buffer.
12134  if(pDefragCtx->res == VK_SUCCESS)
12135  {
12136  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12137  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12138 
12139  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12140  {
12141  const VmaDefragmentationMove& move = moves[moveIndex];
12142 
12143  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12144  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12145 
12146  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12147 
12148  VkBufferCopy region = {
12149  move.srcOffset,
12150  move.dstOffset,
12151  move.size };
12152  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12153  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12154  }
12155  }
12156 
12157  // Save buffers to defrag context for later destruction.
12158  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12159  {
12160  pDefragCtx->res = VK_NOT_READY;
12161  }
12162 }
12163 
12164 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12165 {
12166  m_HasEmptyBlock = false;
12167  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12168  {
12169  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12170  if(pBlock->m_pMetadata->IsEmpty())
12171  {
12172  if(m_Blocks.size() > m_MinBlockCount)
12173  {
12174  if(pDefragmentationStats != VMA_NULL)
12175  {
12176  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12177  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12178  }
12179 
12180  VmaVectorRemove(m_Blocks, blockIndex);
12181  pBlock->Destroy(m_hAllocator);
12182  vma_delete(m_hAllocator, pBlock);
12183  }
12184  else
12185  {
12186  m_HasEmptyBlock = true;
12187  }
12188  }
12189  }
12190 }
12191 
12192 #if VMA_STATS_STRING_ENABLED
12193 
12194 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12195 {
12196  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12197 
12198  json.BeginObject();
12199 
12200  if(m_IsCustomPool)
12201  {
12202  json.WriteString("MemoryTypeIndex");
12203  json.WriteNumber(m_MemoryTypeIndex);
12204 
12205  json.WriteString("BlockSize");
12206  json.WriteNumber(m_PreferredBlockSize);
12207 
12208  json.WriteString("BlockCount");
12209  json.BeginObject(true);
12210  if(m_MinBlockCount > 0)
12211  {
12212  json.WriteString("Min");
12213  json.WriteNumber((uint64_t)m_MinBlockCount);
12214  }
12215  if(m_MaxBlockCount < SIZE_MAX)
12216  {
12217  json.WriteString("Max");
12218  json.WriteNumber((uint64_t)m_MaxBlockCount);
12219  }
12220  json.WriteString("Cur");
12221  json.WriteNumber((uint64_t)m_Blocks.size());
12222  json.EndObject();
12223 
12224  if(m_FrameInUseCount > 0)
12225  {
12226  json.WriteString("FrameInUseCount");
12227  json.WriteNumber(m_FrameInUseCount);
12228  }
12229 
12230  if(m_Algorithm != 0)
12231  {
12232  json.WriteString("Algorithm");
12233  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12234  }
12235  }
12236  else
12237  {
12238  json.WriteString("PreferredBlockSize");
12239  json.WriteNumber(m_PreferredBlockSize);
12240  }
12241 
12242  json.WriteString("Blocks");
12243  json.BeginObject();
12244  for(size_t i = 0; i < m_Blocks.size(); ++i)
12245  {
12246  json.BeginString();
12247  json.ContinueString(m_Blocks[i]->GetId());
12248  json.EndString();
12249 
12250  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12251  }
12252  json.EndObject();
12253 
12254  json.EndObject();
12255 }
12256 
12257 #endif // #if VMA_STATS_STRING_ENABLED
12258 
12259 void VmaBlockVector::Defragment(
12260  class VmaBlockVectorDefragmentationContext* pCtx,
12261  VmaDefragmentationStats* pStats,
12262  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12263  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12264  VkCommandBuffer commandBuffer)
12265 {
12266  pCtx->res = VK_SUCCESS;
12267 
12268  const VkMemoryPropertyFlags memPropFlags =
12269  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12270  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12271  const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
12272 
12273  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12274  isHostVisible;
12275  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12276  (VMA_DEBUG_DETECT_CORRUPTION == 0 || !(isHostVisible && isHostCoherent));
12277 
12278  // There are options to defragment this memory type.
12279  if(canDefragmentOnCpu || canDefragmentOnGpu)
12280  {
12281  bool defragmentOnGpu;
12282  // There is only one option to defragment this memory type.
12283  if(canDefragmentOnGpu != canDefragmentOnCpu)
12284  {
12285  defragmentOnGpu = canDefragmentOnGpu;
12286  }
12287  // Both options are available: Heuristics to choose the best one.
12288  else
12289  {
12290  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12291  m_hAllocator->IsIntegratedGpu();
12292  }
12293 
12294  bool overlappingMoveSupported = !defragmentOnGpu;
12295 
12296  if(m_hAllocator->m_UseMutex)
12297  {
12298  m_Mutex.LockWrite();
12299  pCtx->mutexLocked = true;
12300  }
12301 
12302  pCtx->Begin(overlappingMoveSupported);
12303 
12304  // Defragment.
12305 
12306  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12307  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12308  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12309  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12310  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12311 
12312  // Accumulate statistics.
12313  if(pStats != VMA_NULL)
12314  {
12315  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12316  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12317  pStats->bytesMoved += bytesMoved;
12318  pStats->allocationsMoved += allocationsMoved;
12319  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12320  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12321  if(defragmentOnGpu)
12322  {
12323  maxGpuBytesToMove -= bytesMoved;
12324  maxGpuAllocationsToMove -= allocationsMoved;
12325  }
12326  else
12327  {
12328  maxCpuBytesToMove -= bytesMoved;
12329  maxCpuAllocationsToMove -= allocationsMoved;
12330  }
12331  }
12332 
12333  if(pCtx->res >= VK_SUCCESS)
12334  {
12335  if(defragmentOnGpu)
12336  {
12337  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12338  }
12339  else
12340  {
12341  ApplyDefragmentationMovesCpu(pCtx, moves);
12342  }
12343  }
12344  }
12345 }
12346 
12347 void VmaBlockVector::DefragmentationEnd(
12348  class VmaBlockVectorDefragmentationContext* pCtx,
12349  VmaDefragmentationStats* pStats)
12350 {
12351  // Destroy buffers.
12352  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12353  {
12354  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12355  if(blockCtx.hBuffer)
12356  {
12357  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12358  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12359  }
12360  }
12361 
12362  if(pCtx->res >= VK_SUCCESS)
12363  {
12364  FreeEmptyBlocks(pStats);
12365  }
12366 
12367  if(pCtx->mutexLocked)
12368  {
12369  VMA_ASSERT(m_hAllocator->m_UseMutex);
12370  m_Mutex.UnlockWrite();
12371  }
12372 }
12373 
12374 size_t VmaBlockVector::CalcAllocationCount() const
12375 {
12376  size_t result = 0;
12377  for(size_t i = 0; i < m_Blocks.size(); ++i)
12378  {
12379  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12380  }
12381  return result;
12382 }
12383 
12384 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12385 {
12386  if(m_BufferImageGranularity == 1)
12387  {
12388  return false;
12389  }
12390  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12391  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12392  {
12393  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12394  VMA_ASSERT(m_Algorithm == 0);
12395  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12396  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12397  {
12398  return true;
12399  }
12400  }
12401  return false;
12402 }
12403 
12404 void VmaBlockVector::MakePoolAllocationsLost(
12405  uint32_t currentFrameIndex,
12406  size_t* pLostAllocationCount)
12407 {
12408  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12409  size_t lostAllocationCount = 0;
12410  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12411  {
12412  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12413  VMA_ASSERT(pBlock);
12414  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12415  }
12416  if(pLostAllocationCount != VMA_NULL)
12417  {
12418  *pLostAllocationCount = lostAllocationCount;
12419  }
12420 }
12421 
12422 VkResult VmaBlockVector::CheckCorruption()
12423 {
12424  if(!IsCorruptionDetectionEnabled())
12425  {
12426  return VK_ERROR_FEATURE_NOT_PRESENT;
12427  }
12428 
12429  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12430  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12431  {
12432  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12433  VMA_ASSERT(pBlock);
12434  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12435  if(res != VK_SUCCESS)
12436  {
12437  return res;
12438  }
12439  }
12440  return VK_SUCCESS;
12441 }
12442 
12443 void VmaBlockVector::AddStats(VmaStats* pStats)
12444 {
12445  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12446  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12447 
12448  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12449 
12450  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12451  {
12452  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12453  VMA_ASSERT(pBlock);
12454  VMA_HEAVY_ASSERT(pBlock->Validate());
12455  VmaStatInfo allocationStatInfo;
12456  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12457  VmaAddStatInfo(pStats->total, allocationStatInfo);
12458  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12459  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12460  }
12461 }
12462 
12464 // VmaDefragmentationAlgorithm_Generic members definition
12465 
12466 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12467  VmaAllocator hAllocator,
12468  VmaBlockVector* pBlockVector,
12469  uint32_t currentFrameIndex,
12470  bool overlappingMoveSupported) :
12471  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12472  m_AllAllocations(false),
12473  m_AllocationCount(0),
12474  m_BytesMoved(0),
12475  m_AllocationsMoved(0),
12476  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12477 {
12478  // Create block info for each block.
12479  const size_t blockCount = m_pBlockVector->m_Blocks.size();
12480  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12481  {
12482  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12483  pBlockInfo->m_OriginalBlockIndex = blockIndex;
12484  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12485  m_Blocks.push_back(pBlockInfo);
12486  }
12487 
12488  // Sort them by m_pBlock pointer value.
12489  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12490 }
12491 
12492 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12493 {
12494  for(size_t i = m_Blocks.size(); i--; )
12495  {
12496  vma_delete(m_hAllocator, m_Blocks[i]);
12497  }
12498 }
12499 
12500 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12501 {
12502  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12503  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12504  {
12505  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12506  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12507  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12508  {
12509  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12510  (*it)->m_Allocations.push_back(allocInfo);
12511  }
12512  else
12513  {
12514  VMA_ASSERT(0);
12515  }
12516 
12517  ++m_AllocationCount;
12518  }
12519 }
12520 
12521 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12522  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12523  VkDeviceSize maxBytesToMove,
12524  uint32_t maxAllocationsToMove)
12525 {
12526  if(m_Blocks.empty())
12527  {
12528  return VK_SUCCESS;
12529  }
12530 
12531  // This is a choice based on research.
12532  // Option 1:
12533  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12534  // Option 2:
12535  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12536  // Option 3:
12537  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12538 
12539  size_t srcBlockMinIndex = 0;
12540  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12541  /*
12542  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12543  {
12544  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12545  if(blocksWithNonMovableCount > 0)
12546  {
12547  srcBlockMinIndex = blocksWithNonMovableCount - 1;
12548  }
12549  }
12550  */
12551 
12552  size_t srcBlockIndex = m_Blocks.size() - 1;
12553  size_t srcAllocIndex = SIZE_MAX;
12554  for(;;)
12555  {
12556  // 1. Find next allocation to move.
12557  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12558  // 1.2. Then start from last to first m_Allocations.
12559  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12560  {
12561  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12562  {
12563  // Finished: no more allocations to process.
12564  if(srcBlockIndex == srcBlockMinIndex)
12565  {
12566  return VK_SUCCESS;
12567  }
12568  else
12569  {
12570  --srcBlockIndex;
12571  srcAllocIndex = SIZE_MAX;
12572  }
12573  }
12574  else
12575  {
12576  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12577  }
12578  }
12579 
12580  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12581  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12582 
12583  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12584  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12585  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12586  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12587 
12588  // 2. Try to find new place for this allocation in preceding or current block.
12589  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12590  {
12591  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12592  VmaAllocationRequest dstAllocRequest;
12593  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12594  m_CurrentFrameIndex,
12595  m_pBlockVector->GetFrameInUseCount(),
12596  m_pBlockVector->GetBufferImageGranularity(),
12597  size,
12598  alignment,
12599  false, // upperAddress
12600  suballocType,
12601  false, // canMakeOtherLost
12602  strategy,
12603  &dstAllocRequest) &&
12604  MoveMakesSense(
12605  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12606  {
12607  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12608 
12609  // Reached limit on number of allocations or bytes to move.
12610  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12611  (m_BytesMoved + size > maxBytesToMove))
12612  {
12613  return VK_SUCCESS;
12614  }
12615 
12616  VmaDefragmentationMove move;
12617  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12618  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12619  move.srcOffset = srcOffset;
12620  move.dstOffset = dstAllocRequest.offset;
12621  move.size = size;
12622  moves.push_back(move);
12623 
12624  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12625  dstAllocRequest,
12626  suballocType,
12627  size,
12628  false, // upperAddress
12629  allocInfo.m_hAllocation);
12630  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12631 
12632  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12633 
12634  if(allocInfo.m_pChanged != VMA_NULL)
12635  {
12636  *allocInfo.m_pChanged = VK_TRUE;
12637  }
12638 
12639  ++m_AllocationsMoved;
12640  m_BytesMoved += size;
12641 
12642  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12643 
12644  break;
12645  }
12646  }
12647 
12648  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
12649 
12650  if(srcAllocIndex > 0)
12651  {
12652  --srcAllocIndex;
12653  }
12654  else
12655  {
12656  if(srcBlockIndex > 0)
12657  {
12658  --srcBlockIndex;
12659  srcAllocIndex = SIZE_MAX;
12660  }
12661  else
12662  {
12663  return VK_SUCCESS;
12664  }
12665  }
12666  }
12667 }
12668 
12669 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
12670 {
12671  size_t result = 0;
12672  for(size_t i = 0; i < m_Blocks.size(); ++i)
12673  {
12674  if(m_Blocks[i]->m_HasNonMovableAllocations)
12675  {
12676  ++result;
12677  }
12678  }
12679  return result;
12680 }
12681 
12682 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12683  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12684  VkDeviceSize maxBytesToMove,
12685  uint32_t maxAllocationsToMove)
12686 {
12687  if(!m_AllAllocations && m_AllocationCount == 0)
12688  {
12689  return VK_SUCCESS;
12690  }
12691 
12692  const size_t blockCount = m_Blocks.size();
12693  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12694  {
12695  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12696 
12697  if(m_AllAllocations)
12698  {
12699  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12700  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12701  it != pMetadata->m_Suballocations.end();
12702  ++it)
12703  {
12704  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12705  {
12706  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12707  pBlockInfo->m_Allocations.push_back(allocInfo);
12708  }
12709  }
12710  }
12711 
12712  pBlockInfo->CalcHasNonMovableAllocations();
12713 
12714  // This is a choice based on research.
12715  // Option 1:
12716  pBlockInfo->SortAllocationsByOffsetDescending();
12717  // Option 2:
12718  //pBlockInfo->SortAllocationsBySizeDescending();
12719  }
12720 
12721  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
12722  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12723 
12724  // This is a choice based on research.
12725  const uint32_t roundCount = 2;
12726 
12727  // Execute defragmentation rounds (the main part).
12728  VkResult result = VK_SUCCESS;
12729  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12730  {
12731  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12732  }
12733 
12734  return result;
12735 }
12736 
12737 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12738  size_t dstBlockIndex, VkDeviceSize dstOffset,
12739  size_t srcBlockIndex, VkDeviceSize srcOffset)
12740 {
12741  if(dstBlockIndex < srcBlockIndex)
12742  {
12743  return true;
12744  }
12745  if(dstBlockIndex > srcBlockIndex)
12746  {
12747  return false;
12748  }
12749  if(dstOffset < srcOffset)
12750  {
12751  return true;
12752  }
12753  return false;
12754 }
12755 
12757 // VmaDefragmentationAlgorithm_Fast
12758 
12759 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12760  VmaAllocator hAllocator,
12761  VmaBlockVector* pBlockVector,
12762  uint32_t currentFrameIndex,
12763  bool overlappingMoveSupported) :
12764  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12765  m_OverlappingMoveSupported(overlappingMoveSupported),
12766  m_AllocationCount(0),
12767  m_AllAllocations(false),
12768  m_BytesMoved(0),
12769  m_AllocationsMoved(0),
12770  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12771 {
12772  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12773 
12774 }
12775 
12776 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12777 {
12778 }
12779 
12780 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12781  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12782  VkDeviceSize maxBytesToMove,
12783  uint32_t maxAllocationsToMove)
12784 {
12785  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12786 
12787  const size_t blockCount = m_pBlockVector->GetBlockCount();
12788  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12789  {
12790  return VK_SUCCESS;
12791  }
12792 
12793  PreprocessMetadata();
12794 
12795  // Sort blocks in order from most destination.
12796 
12797  m_BlockInfos.resize(blockCount);
12798  for(size_t i = 0; i < blockCount; ++i)
12799  {
12800  m_BlockInfos[i].origBlockIndex = i;
12801  }
12802 
12803  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
12804  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
12805  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
12806  });
12807 
12808  // THE MAIN ALGORITHM
12809 
12810  FreeSpaceDatabase freeSpaceDb;
12811 
12812  size_t dstBlockInfoIndex = 0;
12813  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12814  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12815  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12816  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
12817  VkDeviceSize dstOffset = 0;
12818 
12819  bool end = false;
12820  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
12821  {
12822  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
12823  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
12824  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
12825  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
12826  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
12827  {
12828  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
12829  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
12830  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
12831  if(m_AllocationsMoved == maxAllocationsToMove ||
12832  m_BytesMoved + srcAllocSize > maxBytesToMove)
12833  {
12834  end = true;
12835  break;
12836  }
12837  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
12838 
12839  // Try to place it in one of free spaces from the database.
12840  size_t freeSpaceInfoIndex;
12841  VkDeviceSize dstAllocOffset;
12842  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
12843  freeSpaceInfoIndex, dstAllocOffset))
12844  {
12845  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
12846  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
12847  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
12848  VkDeviceSize freeSpaceBlockSize = pFreeSpaceMetadata->GetSize();
12849 
12850  // Same block
12851  if(freeSpaceInfoIndex == srcBlockInfoIndex)
12852  {
12853  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12854 
12855  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12856 
12857  VmaSuballocation suballoc = *srcSuballocIt;
12858  suballoc.offset = dstAllocOffset;
12859  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
12860  m_BytesMoved += srcAllocSize;
12861  ++m_AllocationsMoved;
12862 
12863  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12864  ++nextSuballocIt;
12865  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12866  srcSuballocIt = nextSuballocIt;
12867 
12868  InsertSuballoc(pFreeSpaceMetadata, suballoc);
12869 
12870  VmaDefragmentationMove move = {
12871  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12872  srcAllocOffset, dstAllocOffset,
12873  srcAllocSize };
12874  moves.push_back(move);
12875  }
12876  // Different block
12877  else
12878  {
12879  // MOVE OPTION 2: Move the allocation to a different block.
12880 
12881  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
12882 
12883  VmaSuballocation suballoc = *srcSuballocIt;
12884  suballoc.offset = dstAllocOffset;
12885  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
12886  m_BytesMoved += srcAllocSize;
12887  ++m_AllocationsMoved;
12888 
12889  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12890  ++nextSuballocIt;
12891  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12892  srcSuballocIt = nextSuballocIt;
12893 
12894  InsertSuballoc(pFreeSpaceMetadata, suballoc);
12895 
12896  VmaDefragmentationMove move = {
12897  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12898  srcAllocOffset, dstAllocOffset,
12899  srcAllocSize };
12900  moves.push_back(move);
12901  }
12902  }
12903  else
12904  {
12905  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
12906 
12907  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
12908  while(dstBlockInfoIndex < srcBlockInfoIndex &&
12909  dstAllocOffset + srcAllocSize > dstBlockSize)
12910  {
12911  // But before that, register remaining free space at the end of dst block.
12912  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
12913 
12914  ++dstBlockInfoIndex;
12915  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12916  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12917  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12918  dstBlockSize = pDstMetadata->GetSize();
12919  dstOffset = 0;
12920  dstAllocOffset = 0;
12921  }
12922 
12923  // Same block
12924  if(dstBlockInfoIndex == srcBlockInfoIndex)
12925  {
12926  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12927 
12928  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
12929 
12930  bool skipOver = overlap;
12931  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
12932  {
12933  // If destination and source place overlap, skip if it would move it
12934  // by only < 1/64 of its size.
12935  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
12936  }
12937 
12938  if(skipOver)
12939  {
12940  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
12941 
12942  dstOffset = srcAllocOffset + srcAllocSize;
12943  ++srcSuballocIt;
12944  }
12945  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12946  else
12947  {
12948  srcSuballocIt->offset = dstAllocOffset;
12949  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
12950  dstOffset = dstAllocOffset + srcAllocSize;
12951  m_BytesMoved += srcAllocSize;
12952  ++m_AllocationsMoved;
12953  ++srcSuballocIt;
12954  VmaDefragmentationMove move = {
12955  srcOrigBlockIndex, dstOrigBlockIndex,
12956  srcAllocOffset, dstAllocOffset,
12957  srcAllocSize };
12958  moves.push_back(move);
12959  }
12960  }
12961  // Different block
12962  else
12963  {
12964  // MOVE OPTION 2: Move the allocation to a different block.
12965 
12966  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
12967  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
12968 
12969  VmaSuballocation suballoc = *srcSuballocIt;
12970  suballoc.offset = dstAllocOffset;
12971  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
12972  dstOffset = dstAllocOffset + srcAllocSize;
12973  m_BytesMoved += srcAllocSize;
12974  ++m_AllocationsMoved;
12975 
12976  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12977  ++nextSuballocIt;
12978  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12979  srcSuballocIt = nextSuballocIt;
12980 
12981  pDstMetadata->m_Suballocations.push_back(suballoc);
12982 
12983  VmaDefragmentationMove move = {
12984  srcOrigBlockIndex, dstOrigBlockIndex,
12985  srcAllocOffset, dstAllocOffset,
12986  srcAllocSize };
12987  moves.push_back(move);
12988  }
12989  }
12990  }
12991  }
12992 
12993  m_BlockInfos.clear();
12994 
12995  PostprocessMetadata();
12996 
12997  return VK_SUCCESS;
12998 }
12999 
13000 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
13001 {
13002  const size_t blockCount = m_pBlockVector->GetBlockCount();
13003  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13004  {
13005  VmaBlockMetadata_Generic* const pMetadata =
13006  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13007  pMetadata->m_FreeCount = 0;
13008  pMetadata->m_SumFreeSize = pMetadata->GetSize();
13009  pMetadata->m_FreeSuballocationsBySize.clear();
13010  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13011  it != pMetadata->m_Suballocations.end(); )
13012  {
13013  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13014  {
13015  VmaSuballocationList::iterator nextIt = it;
13016  ++nextIt;
13017  pMetadata->m_Suballocations.erase(it);
13018  it = nextIt;
13019  }
13020  else
13021  {
13022  ++it;
13023  }
13024  }
13025  }
13026 }
13027 
13028 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13029 {
13030  const size_t blockCount = m_pBlockVector->GetBlockCount();
13031  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13032  {
13033  VmaBlockMetadata_Generic* const pMetadata =
13034  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13035  const VkDeviceSize blockSize = pMetadata->GetSize();
13036 
13037  // No allocations in this block - entire area is free.
13038  if(pMetadata->m_Suballocations.empty())
13039  {
13040  pMetadata->m_FreeCount = 1;
13041  //pMetadata->m_SumFreeSize is already set to blockSize.
13042  VmaSuballocation suballoc = {
13043  0, // offset
13044  blockSize, // size
13045  VMA_NULL, // hAllocation
13046  VMA_SUBALLOCATION_TYPE_FREE };
13047  pMetadata->m_Suballocations.push_back(suballoc);
13048  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13049  }
13050  // There are some allocations in this block.
13051  else
13052  {
13053  VkDeviceSize offset = 0;
13054  VmaSuballocationList::iterator it;
13055  for(it = pMetadata->m_Suballocations.begin();
13056  it != pMetadata->m_Suballocations.end();
13057  ++it)
13058  {
13059  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13060  VMA_ASSERT(it->offset >= offset);
13061 
13062  // Need to insert preceding free space.
13063  if(it->offset > offset)
13064  {
13065  ++pMetadata->m_FreeCount;
13066  const VkDeviceSize freeSize = it->offset - offset;
13067  VmaSuballocation suballoc = {
13068  offset, // offset
13069  freeSize, // size
13070  VMA_NULL, // hAllocation
13071  VMA_SUBALLOCATION_TYPE_FREE };
13072  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13073  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13074  {
13075  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13076  }
13077  }
13078 
13079  pMetadata->m_SumFreeSize -= it->size;
13080  offset = it->offset + it->size;
13081  }
13082 
13083  // Need to insert trailing free space.
13084  if(offset < blockSize)
13085  {
13086  ++pMetadata->m_FreeCount;
13087  const VkDeviceSize freeSize = blockSize - offset;
13088  VmaSuballocation suballoc = {
13089  offset, // offset
13090  freeSize, // size
13091  VMA_NULL, // hAllocation
13092  VMA_SUBALLOCATION_TYPE_FREE };
13093  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13094  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13095  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13096  {
13097  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13098  }
13099  }
13100 
13101  VMA_SORT(
13102  pMetadata->m_FreeSuballocationsBySize.begin(),
13103  pMetadata->m_FreeSuballocationsBySize.end(),
13104  VmaSuballocationItemSizeLess());
13105  }
13106 
13107  VMA_HEAVY_ASSERT(pMetadata->Validate());
13108  }
13109 }
13110 
13111 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13112 {
13113  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13114  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13115  while(it != pMetadata->m_Suballocations.end())
13116  {
13117  if(it->offset < suballoc.offset)
13118  {
13119  ++it;
13120  }
13121  }
13122  pMetadata->m_Suballocations.insert(it, suballoc);
13123 }
13124 
13126 // VmaBlockVectorDefragmentationContext
13127 
13128 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13129  VmaAllocator hAllocator,
13130  VmaPool hCustomPool,
13131  VmaBlockVector* pBlockVector,
13132  uint32_t currFrameIndex,
13133  uint32_t algorithmFlags) :
13134  res(VK_SUCCESS),
13135  mutexLocked(false),
13136  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13137  m_hAllocator(hAllocator),
13138  m_hCustomPool(hCustomPool),
13139  m_pBlockVector(pBlockVector),
13140  m_CurrFrameIndex(currFrameIndex),
13141  m_AlgorithmFlags(algorithmFlags),
13142  m_pAlgorithm(VMA_NULL),
13143  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13144  m_AllAllocations(false)
13145 {
13146 }
13147 
13148 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13149 {
13150  vma_delete(m_hAllocator, m_pAlgorithm);
13151 }
13152 
13153 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13154 {
13155  AllocInfo info = { hAlloc, pChanged };
13156  m_Allocations.push_back(info);
13157 }
13158 
13159 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13160 {
13161  const bool allAllocations = m_AllAllocations ||
13162  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13163 
13164  /********************************
13165  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13166  ********************************/
13167 
13168  /*
13169  Fast algorithm is supported only when certain criteria are met:
13170  - VMA_DEBUG_MARGIN is 0.
13171  - All allocations in this block vector are moveable.
13172  - There is no possibility of image/buffer granularity conflict.
13173  */
13174  if(VMA_DEBUG_MARGIN == 0 &&
13175  allAllocations &&
13176  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13177  {
13178  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13179  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13180  }
13181  else
13182  {
13183  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13184  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13185  }
13186 
13187  if(allAllocations)
13188  {
13189  m_pAlgorithm->AddAll();
13190  }
13191  else
13192  {
13193  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13194  {
13195  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13196  }
13197  }
13198 }
13199 
13201 // VmaDefragmentationContext
13202 
13203 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13204  VmaAllocator hAllocator,
13205  uint32_t currFrameIndex,
13206  uint32_t flags,
13207  VmaDefragmentationStats* pStats) :
13208  m_hAllocator(hAllocator),
13209  m_CurrFrameIndex(currFrameIndex),
13210  m_Flags(flags),
13211  m_pStats(pStats),
13212  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13213 {
13214  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13215 }
13216 
13217 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13218 {
13219  for(size_t i = m_CustomPoolContexts.size(); i--; )
13220  {
13221  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13222  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13223  vma_delete(m_hAllocator, pBlockVectorCtx);
13224  }
13225  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13226  {
13227  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13228  if(pBlockVectorCtx)
13229  {
13230  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13231  vma_delete(m_hAllocator, pBlockVectorCtx);
13232  }
13233  }
13234 }
13235 
13236 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13237 {
13238  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13239  {
13240  VmaPool pool = pPools[poolIndex];
13241  VMA_ASSERT(pool);
13242  // Pools with algorithm other than default are not defragmented.
13243  if(pool->m_BlockVector.GetAlgorithm() == 0)
13244  {
13245  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13246 
13247  for(size_t i = m_CustomPoolContexts.size(); i--; )
13248  {
13249  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13250  {
13251  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13252  break;
13253  }
13254  }
13255 
13256  if(!pBlockVectorDefragCtx)
13257  {
13258  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13259  m_hAllocator,
13260  pool,
13261  &pool->m_BlockVector,
13262  m_CurrFrameIndex,
13263  m_Flags);
13264  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13265  }
13266 
13267  pBlockVectorDefragCtx->AddAll();
13268  }
13269  }
13270 }
13271 
13272 void VmaDefragmentationContext_T::AddAllocations(
13273  uint32_t allocationCount,
13274  VmaAllocation* pAllocations,
13275  VkBool32* pAllocationsChanged)
13276 {
13277  // Dispatch pAllocations among defragmentators. Create them when necessary.
13278  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13279  {
13280  const VmaAllocation hAlloc = pAllocations[allocIndex];
13281  VMA_ASSERT(hAlloc);
13282  // DedicatedAlloc cannot be defragmented.
13283  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13284  // Lost allocation cannot be defragmented.
13285  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13286  {
13287  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13288 
13289  const VmaPool hAllocPool = hAlloc->GetPool();
13290  // This allocation belongs to custom pool.
13291  if(hAllocPool != VK_NULL_HANDLE)
13292  {
13293  // Pools with algorithm other than default are not defragmented.
13294  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13295  {
13296  for(size_t i = m_CustomPoolContexts.size(); i--; )
13297  {
13298  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13299  {
13300  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13301  break;
13302  }
13303  }
13304  if(!pBlockVectorDefragCtx)
13305  {
13306  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13307  m_hAllocator,
13308  hAllocPool,
13309  &hAllocPool->m_BlockVector,
13310  m_CurrFrameIndex,
13311  m_Flags);
13312  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13313  }
13314  }
13315  }
13316  // This allocation belongs to default pool.
13317  else
13318  {
13319  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13320  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13321  if(!pBlockVectorDefragCtx)
13322  {
13323  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13324  m_hAllocator,
13325  VMA_NULL, // hCustomPool
13326  m_hAllocator->m_pBlockVectors[memTypeIndex],
13327  m_CurrFrameIndex,
13328  m_Flags);
13329  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13330  }
13331  }
13332 
13333  if(pBlockVectorDefragCtx)
13334  {
13335  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13336  &pAllocationsChanged[allocIndex] : VMA_NULL;
13337  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13338  }
13339  }
13340  }
13341 }
13342 
13343 VkResult VmaDefragmentationContext_T::Defragment(
13344  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13345  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13346  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13347 {
13348  if(pStats)
13349  {
13350  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13351  }
13352 
13353  if(commandBuffer == VK_NULL_HANDLE)
13354  {
13355  maxGpuBytesToMove = 0;
13356  maxGpuAllocationsToMove = 0;
13357  }
13358 
13359  VkResult res = VK_SUCCESS;
13360 
13361  // Process default pools.
13362  for(uint32_t memTypeIndex = 0;
13363  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13364  ++memTypeIndex)
13365  {
13366  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13367  if(pBlockVectorCtx)
13368  {
13369  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13370  pBlockVectorCtx->GetBlockVector()->Defragment(
13371  pBlockVectorCtx,
13372  pStats,
13373  maxCpuBytesToMove, maxCpuAllocationsToMove,
13374  maxGpuBytesToMove, maxGpuAllocationsToMove,
13375  commandBuffer);
13376  if(pBlockVectorCtx->res != VK_SUCCESS)
13377  {
13378  res = pBlockVectorCtx->res;
13379  }
13380  }
13381  }
13382 
13383  // Process custom pools.
13384  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13385  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13386  ++customCtxIndex)
13387  {
13388  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13389  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13390  pBlockVectorCtx->GetBlockVector()->Defragment(
13391  pBlockVectorCtx,
13392  pStats,
13393  maxCpuBytesToMove, maxCpuAllocationsToMove,
13394  maxGpuBytesToMove, maxGpuAllocationsToMove,
13395  commandBuffer);
13396  if(pBlockVectorCtx->res != VK_SUCCESS)
13397  {
13398  res = pBlockVectorCtx->res;
13399  }
13400  }
13401 
13402  return res;
13403 }
13404 
13406 // VmaRecorder
13407 
13408 #if VMA_RECORDING_ENABLED
13409 
13410 VmaRecorder::VmaRecorder() :
13411  m_UseMutex(true),
13412  m_Flags(0),
13413  m_File(VMA_NULL),
13414  m_Freq(INT64_MAX),
13415  m_StartCounter(INT64_MAX)
13416 {
13417 }
13418 
13419 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13420 {
13421  m_UseMutex = useMutex;
13422  m_Flags = settings.flags;
13423 
13424  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13425  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13426 
13427  // Open file for writing.
13428  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13429  if(err != 0)
13430  {
13431  return VK_ERROR_INITIALIZATION_FAILED;
13432  }
13433 
13434  // Write header.
13435  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13436  fprintf(m_File, "%s\n", "1,5");
13437 
13438  return VK_SUCCESS;
13439 }
13440 
13441 VmaRecorder::~VmaRecorder()
13442 {
13443  if(m_File != VMA_NULL)
13444  {
13445  fclose(m_File);
13446  }
13447 }
13448 
13449 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13450 {
13451  CallParams callParams;
13452  GetBasicParams(callParams);
13453 
13454  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13455  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13456  Flush();
13457 }
13458 
13459 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13460 {
13461  CallParams callParams;
13462  GetBasicParams(callParams);
13463 
13464  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13465  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13466  Flush();
13467 }
13468 
13469 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13470 {
13471  CallParams callParams;
13472  GetBasicParams(callParams);
13473 
13474  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13475  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13476  createInfo.memoryTypeIndex,
13477  createInfo.flags,
13478  createInfo.blockSize,
13479  (uint64_t)createInfo.minBlockCount,
13480  (uint64_t)createInfo.maxBlockCount,
13481  createInfo.frameInUseCount,
13482  pool);
13483  Flush();
13484 }
13485 
13486 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13487 {
13488  CallParams callParams;
13489  GetBasicParams(callParams);
13490 
13491  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13492  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13493  pool);
13494  Flush();
13495 }
13496 
13497 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13498  const VkMemoryRequirements& vkMemReq,
13499  const VmaAllocationCreateInfo& createInfo,
13500  VmaAllocation allocation)
13501 {
13502  CallParams callParams;
13503  GetBasicParams(callParams);
13504 
13505  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13506  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13507  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13508  vkMemReq.size,
13509  vkMemReq.alignment,
13510  vkMemReq.memoryTypeBits,
13511  createInfo.flags,
13512  createInfo.usage,
13513  createInfo.requiredFlags,
13514  createInfo.preferredFlags,
13515  createInfo.memoryTypeBits,
13516  createInfo.pool,
13517  allocation,
13518  userDataStr.GetString());
13519  Flush();
13520 }
13521 
13522 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
13523  const VkMemoryRequirements& vkMemReq,
13524  const VmaAllocationCreateInfo& createInfo,
13525  uint64_t allocationCount,
13526  const VmaAllocation* pAllocations)
13527 {
13528  CallParams callParams;
13529  GetBasicParams(callParams);
13530 
13531  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13532  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13533  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
13534  vkMemReq.size,
13535  vkMemReq.alignment,
13536  vkMemReq.memoryTypeBits,
13537  createInfo.flags,
13538  createInfo.usage,
13539  createInfo.requiredFlags,
13540  createInfo.preferredFlags,
13541  createInfo.memoryTypeBits,
13542  createInfo.pool);
13543  PrintPointerList(allocationCount, pAllocations);
13544  fprintf(m_File, ",%s\n", userDataStr.GetString());
13545  Flush();
13546 }
13547 
13548 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13549  const VkMemoryRequirements& vkMemReq,
13550  bool requiresDedicatedAllocation,
13551  bool prefersDedicatedAllocation,
13552  const VmaAllocationCreateInfo& createInfo,
13553  VmaAllocation allocation)
13554 {
13555  CallParams callParams;
13556  GetBasicParams(callParams);
13557 
13558  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13559  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13560  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13561  vkMemReq.size,
13562  vkMemReq.alignment,
13563  vkMemReq.memoryTypeBits,
13564  requiresDedicatedAllocation ? 1 : 0,
13565  prefersDedicatedAllocation ? 1 : 0,
13566  createInfo.flags,
13567  createInfo.usage,
13568  createInfo.requiredFlags,
13569  createInfo.preferredFlags,
13570  createInfo.memoryTypeBits,
13571  createInfo.pool,
13572  allocation,
13573  userDataStr.GetString());
13574  Flush();
13575 }
13576 
13577 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13578  const VkMemoryRequirements& vkMemReq,
13579  bool requiresDedicatedAllocation,
13580  bool prefersDedicatedAllocation,
13581  const VmaAllocationCreateInfo& createInfo,
13582  VmaAllocation allocation)
13583 {
13584  CallParams callParams;
13585  GetBasicParams(callParams);
13586 
13587  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13588  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13589  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13590  vkMemReq.size,
13591  vkMemReq.alignment,
13592  vkMemReq.memoryTypeBits,
13593  requiresDedicatedAllocation ? 1 : 0,
13594  prefersDedicatedAllocation ? 1 : 0,
13595  createInfo.flags,
13596  createInfo.usage,
13597  createInfo.requiredFlags,
13598  createInfo.preferredFlags,
13599  createInfo.memoryTypeBits,
13600  createInfo.pool,
13601  allocation,
13602  userDataStr.GetString());
13603  Flush();
13604 }
13605 
13606 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13607  VmaAllocation allocation)
13608 {
13609  CallParams callParams;
13610  GetBasicParams(callParams);
13611 
13612  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13613  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13614  allocation);
13615  Flush();
13616 }
13617 
13618 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
13619  uint64_t allocationCount,
13620  const VmaAllocation* pAllocations)
13621 {
13622  CallParams callParams;
13623  GetBasicParams(callParams);
13624 
13625  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13626  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
13627  PrintPointerList(allocationCount, pAllocations);
13628  fprintf(m_File, "\n");
13629  Flush();
13630 }
13631 
13632 void VmaRecorder::RecordResizeAllocation(
13633  uint32_t frameIndex,
13634  VmaAllocation allocation,
13635  VkDeviceSize newSize)
13636 {
13637  CallParams callParams;
13638  GetBasicParams(callParams);
13639 
13640  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13641  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
13642  allocation, newSize);
13643  Flush();
13644 }
13645 
13646 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13647  VmaAllocation allocation,
13648  const void* pUserData)
13649 {
13650  CallParams callParams;
13651  GetBasicParams(callParams);
13652 
13653  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13654  UserDataString userDataStr(
13655  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
13656  pUserData);
13657  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13658  allocation,
13659  userDataStr.GetString());
13660  Flush();
13661 }
13662 
13663 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13664  VmaAllocation allocation)
13665 {
13666  CallParams callParams;
13667  GetBasicParams(callParams);
13668 
13669  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13670  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13671  allocation);
13672  Flush();
13673 }
13674 
13675 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13676  VmaAllocation allocation)
13677 {
13678  CallParams callParams;
13679  GetBasicParams(callParams);
13680 
13681  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13682  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13683  allocation);
13684  Flush();
13685 }
13686 
13687 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13688  VmaAllocation allocation)
13689 {
13690  CallParams callParams;
13691  GetBasicParams(callParams);
13692 
13693  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13694  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13695  allocation);
13696  Flush();
13697 }
13698 
13699 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13700  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13701 {
13702  CallParams callParams;
13703  GetBasicParams(callParams);
13704 
13705  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13706  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13707  allocation,
13708  offset,
13709  size);
13710  Flush();
13711 }
13712 
13713 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13714  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13715 {
13716  CallParams callParams;
13717  GetBasicParams(callParams);
13718 
13719  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13720  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13721  allocation,
13722  offset,
13723  size);
13724  Flush();
13725 }
13726 
13727 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13728  const VkBufferCreateInfo& bufCreateInfo,
13729  const VmaAllocationCreateInfo& allocCreateInfo,
13730  VmaAllocation allocation)
13731 {
13732  CallParams callParams;
13733  GetBasicParams(callParams);
13734 
13735  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13736  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13737  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13738  bufCreateInfo.flags,
13739  bufCreateInfo.size,
13740  bufCreateInfo.usage,
13741  bufCreateInfo.sharingMode,
13742  allocCreateInfo.flags,
13743  allocCreateInfo.usage,
13744  allocCreateInfo.requiredFlags,
13745  allocCreateInfo.preferredFlags,
13746  allocCreateInfo.memoryTypeBits,
13747  allocCreateInfo.pool,
13748  allocation,
13749  userDataStr.GetString());
13750  Flush();
13751 }
13752 
13753 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13754  const VkImageCreateInfo& imageCreateInfo,
13755  const VmaAllocationCreateInfo& allocCreateInfo,
13756  VmaAllocation allocation)
13757 {
13758  CallParams callParams;
13759  GetBasicParams(callParams);
13760 
13761  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13762  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13763  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13764  imageCreateInfo.flags,
13765  imageCreateInfo.imageType,
13766  imageCreateInfo.format,
13767  imageCreateInfo.extent.width,
13768  imageCreateInfo.extent.height,
13769  imageCreateInfo.extent.depth,
13770  imageCreateInfo.mipLevels,
13771  imageCreateInfo.arrayLayers,
13772  imageCreateInfo.samples,
13773  imageCreateInfo.tiling,
13774  imageCreateInfo.usage,
13775  imageCreateInfo.sharingMode,
13776  imageCreateInfo.initialLayout,
13777  allocCreateInfo.flags,
13778  allocCreateInfo.usage,
13779  allocCreateInfo.requiredFlags,
13780  allocCreateInfo.preferredFlags,
13781  allocCreateInfo.memoryTypeBits,
13782  allocCreateInfo.pool,
13783  allocation,
13784  userDataStr.GetString());
13785  Flush();
13786 }
13787 
13788 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13789  VmaAllocation allocation)
13790 {
13791  CallParams callParams;
13792  GetBasicParams(callParams);
13793 
13794  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13795  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13796  allocation);
13797  Flush();
13798 }
13799 
13800 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
13801  VmaAllocation allocation)
13802 {
13803  CallParams callParams;
13804  GetBasicParams(callParams);
13805 
13806  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13807  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
13808  allocation);
13809  Flush();
13810 }
13811 
13812 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
13813  VmaAllocation allocation)
13814 {
13815  CallParams callParams;
13816  GetBasicParams(callParams);
13817 
13818  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13819  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13820  allocation);
13821  Flush();
13822 }
13823 
13824 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
13825  VmaAllocation allocation)
13826 {
13827  CallParams callParams;
13828  GetBasicParams(callParams);
13829 
13830  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13831  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
13832  allocation);
13833  Flush();
13834 }
13835 
13836 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
13837  VmaPool pool)
13838 {
13839  CallParams callParams;
13840  GetBasicParams(callParams);
13841 
13842  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13843  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
13844  pool);
13845  Flush();
13846 }
13847 
13848 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
13849  const VmaDefragmentationInfo2& info,
13851 {
13852  CallParams callParams;
13853  GetBasicParams(callParams);
13854 
13855  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13856  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
13857  info.flags);
13858  PrintPointerList(info.allocationCount, info.pAllocations);
13859  fprintf(m_File, ",");
13860  PrintPointerList(info.poolCount, info.pPools);
13861  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
13862  info.maxCpuBytesToMove,
13864  info.maxGpuBytesToMove,
13866  info.commandBuffer,
13867  ctx);
13868  Flush();
13869 }
13870 
13871 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
13873 {
13874  CallParams callParams;
13875  GetBasicParams(callParams);
13876 
13877  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13878  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
13879  ctx);
13880  Flush();
13881 }
13882 
13883 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
13884 {
13885  if(pUserData != VMA_NULL)
13886  {
13887  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
13888  {
13889  m_Str = (const char*)pUserData;
13890  }
13891  else
13892  {
13893  sprintf_s(m_PtrStr, "%p", pUserData);
13894  m_Str = m_PtrStr;
13895  }
13896  }
13897  else
13898  {
13899  m_Str = "";
13900  }
13901 }
13902 
13903 void VmaRecorder::WriteConfiguration(
13904  const VkPhysicalDeviceProperties& devProps,
13905  const VkPhysicalDeviceMemoryProperties& memProps,
13906  bool dedicatedAllocationExtensionEnabled)
13907 {
13908  fprintf(m_File, "Config,Begin\n");
13909 
13910  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
13911  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
13912  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
13913  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
13914  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
13915  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
13916 
13917  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
13918  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
13919  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
13920 
13921  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
13922  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
13923  {
13924  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
13925  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
13926  }
13927  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
13928  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
13929  {
13930  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
13931  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
13932  }
13933 
13934  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
13935 
13936  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
13937  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
13938  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
13939  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
13940  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
13941  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
13942  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
13943  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
13944  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
13945 
13946  fprintf(m_File, "Config,End\n");
13947 }
13948 
13949 void VmaRecorder::GetBasicParams(CallParams& outParams)
13950 {
13951  outParams.threadId = GetCurrentThreadId();
13952 
13953  LARGE_INTEGER counter;
13954  QueryPerformanceCounter(&counter);
13955  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
13956 }
13957 
13958 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
13959 {
13960  if(count)
13961  {
13962  fprintf(m_File, "%p", pItems[0]);
13963  for(uint64_t i = 1; i < count; ++i)
13964  {
13965  fprintf(m_File, " %p", pItems[i]);
13966  }
13967  }
13968 }
13969 
13970 void VmaRecorder::Flush()
13971 {
13972  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
13973  {
13974  fflush(m_File);
13975  }
13976 }
13977 
13978 #endif // #if VMA_RECORDING_ENABLED
13979 
13981 // VmaAllocator_T
13982 
13983 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
13984  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
13985  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
13986  m_hDevice(pCreateInfo->device),
13987  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
13988  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
13989  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
13990  m_PreferredLargeHeapBlockSize(0),
13991  m_PhysicalDevice(pCreateInfo->physicalDevice),
13992  m_CurrentFrameIndex(0),
13993  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
13994  m_NextPoolId(0)
13996  ,m_pRecorder(VMA_NULL)
13997 #endif
13998 {
13999  if(VMA_DEBUG_DETECT_CORRUPTION)
14000  {
14001  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
14002  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
14003  }
14004 
14005  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
14006 
14007 #if !(VMA_DEDICATED_ALLOCATION)
14009  {
14010  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14011  }
14012 #endif
14013 
14014  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
14015  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14016  memset(&m_MemProps, 0, sizeof(m_MemProps));
14017 
14018  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14019  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
14020 
14021  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14022  {
14023  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
14024  }
14025 
14026  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14027  {
14028  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14029  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14030  }
14031 
14032  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14033 
14034  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14035  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14036 
14037  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14038  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14039  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14040  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14041 
14042  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14043  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14044 
14045  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14046  {
14047  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14048  {
14049  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14050  if(limit != VK_WHOLE_SIZE)
14051  {
14052  m_HeapSizeLimit[heapIndex] = limit;
14053  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14054  {
14055  m_MemProps.memoryHeaps[heapIndex].size = limit;
14056  }
14057  }
14058  }
14059  }
14060 
14061  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14062  {
14063  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14064 
14065  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14066  this,
14067  memTypeIndex,
14068  preferredBlockSize,
14069  0,
14070  SIZE_MAX,
14071  GetBufferImageGranularity(),
14072  pCreateInfo->frameInUseCount,
14073  false, // isCustomPool
14074  false, // explicitBlockSize
14075  false); // linearAlgorithm
14076  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14077  // becase minBlockCount is 0.
14078  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14079 
14080  }
14081 }
14082 
14083 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14084 {
14085  VkResult res = VK_SUCCESS;
14086 
14087  if(pCreateInfo->pRecordSettings != VMA_NULL &&
14088  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14089  {
14090 #if VMA_RECORDING_ENABLED
14091  m_pRecorder = vma_new(this, VmaRecorder)();
14092  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14093  if(res != VK_SUCCESS)
14094  {
14095  return res;
14096  }
14097  m_pRecorder->WriteConfiguration(
14098  m_PhysicalDeviceProperties,
14099  m_MemProps,
14100  m_UseKhrDedicatedAllocation);
14101  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14102 #else
14103  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14104  return VK_ERROR_FEATURE_NOT_PRESENT;
14105 #endif
14106  }
14107 
14108  return res;
14109 }
14110 
14111 VmaAllocator_T::~VmaAllocator_T()
14112 {
14113 #if VMA_RECORDING_ENABLED
14114  if(m_pRecorder != VMA_NULL)
14115  {
14116  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14117  vma_delete(this, m_pRecorder);
14118  }
14119 #endif
14120 
14121  VMA_ASSERT(m_Pools.empty());
14122 
14123  for(size_t i = GetMemoryTypeCount(); i--; )
14124  {
14125  vma_delete(this, m_pDedicatedAllocations[i]);
14126  vma_delete(this, m_pBlockVectors[i]);
14127  }
14128 }
14129 
14130 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14131 {
14132 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14133  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
14134  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
14135  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
14136  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
14137  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
14138  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
14139  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
14140  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
14141  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
14142  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
14143  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
14144  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
14145  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
14146  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
14147  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
14148  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
14149  m_VulkanFunctions.vkCmdCopyBuffer = &vkCmdCopyBuffer;
14150 #if VMA_DEDICATED_ALLOCATION
14151  if(m_UseKhrDedicatedAllocation)
14152  {
14153  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14154  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14155  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14156  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14157  }
14158 #endif // #if VMA_DEDICATED_ALLOCATION
14159 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14160 
14161 #define VMA_COPY_IF_NOT_NULL(funcName) \
14162  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14163 
14164  if(pVulkanFunctions != VMA_NULL)
14165  {
14166  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14167  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14168  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14169  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14170  VMA_COPY_IF_NOT_NULL(vkMapMemory);
14171  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14172  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14173  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14174  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14175  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14176  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14177  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14178  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14179  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14180  VMA_COPY_IF_NOT_NULL(vkCreateImage);
14181  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14182  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14183 #if VMA_DEDICATED_ALLOCATION
14184  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14185  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14186 #endif
14187  }
14188 
14189 #undef VMA_COPY_IF_NOT_NULL
14190 
14191  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14192  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14193  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14194  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14195  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14196  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14197  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14198  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14199  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14200  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14201  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14202  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14203  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14204  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14205  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14206  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14207  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14208  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14209  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14210 #if VMA_DEDICATED_ALLOCATION
14211  if(m_UseKhrDedicatedAllocation)
14212  {
14213  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14214  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14215  }
14216 #endif
14217 }
14218 
14219 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14220 {
14221  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14222  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14223  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14224  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
14225 }
14226 
14227 VkResult VmaAllocator_T::AllocateMemoryOfType(
14228  VkDeviceSize size,
14229  VkDeviceSize alignment,
14230  bool dedicatedAllocation,
14231  VkBuffer dedicatedBuffer,
14232  VkImage dedicatedImage,
14233  const VmaAllocationCreateInfo& createInfo,
14234  uint32_t memTypeIndex,
14235  VmaSuballocationType suballocType,
14236  size_t allocationCount,
14237  VmaAllocation* pAllocations)
14238 {
14239  VMA_ASSERT(pAllocations != VMA_NULL);
14240  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, vkMemReq.size);
14241 
14242  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14243 
14244  // If memory type is not HOST_VISIBLE, disable MAPPED.
14245  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14246  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14247  {
14248  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14249  }
14250 
14251  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14252  VMA_ASSERT(blockVector);
14253 
14254  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14255  bool preferDedicatedMemory =
14256  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14257  dedicatedAllocation ||
14258  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14259  size > preferredBlockSize / 2;
14260 
14261  if(preferDedicatedMemory &&
14262  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14263  finalCreateInfo.pool == VK_NULL_HANDLE)
14264  {
14266  }
14267 
14268  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14269  {
14270  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14271  {
14272  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14273  }
14274  else
14275  {
14276  return AllocateDedicatedMemory(
14277  size,
14278  suballocType,
14279  memTypeIndex,
14280  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14281  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14282  finalCreateInfo.pUserData,
14283  dedicatedBuffer,
14284  dedicatedImage,
14285  allocationCount,
14286  pAllocations);
14287  }
14288  }
14289  else
14290  {
14291  VkResult res = blockVector->Allocate(
14292  VK_NULL_HANDLE, // hCurrentPool
14293  m_CurrentFrameIndex.load(),
14294  size,
14295  alignment,
14296  finalCreateInfo,
14297  suballocType,
14298  allocationCount,
14299  pAllocations);
14300  if(res == VK_SUCCESS)
14301  {
14302  return res;
14303  }
14304 
14305  // 5. Try dedicated memory.
14306  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14307  {
14308  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14309  }
14310  else
14311  {
14312  res = AllocateDedicatedMemory(
14313  size,
14314  suballocType,
14315  memTypeIndex,
14316  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14317  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14318  finalCreateInfo.pUserData,
14319  dedicatedBuffer,
14320  dedicatedImage,
14321  allocationCount,
14322  pAllocations);
14323  if(res == VK_SUCCESS)
14324  {
14325  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14326  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14327  return VK_SUCCESS;
14328  }
14329  else
14330  {
14331  // Everything failed: Return error code.
14332  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14333  return res;
14334  }
14335  }
14336  }
14337 }
14338 
14339 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14340  VkDeviceSize size,
14341  VmaSuballocationType suballocType,
14342  uint32_t memTypeIndex,
14343  bool map,
14344  bool isUserDataString,
14345  void* pUserData,
14346  VkBuffer dedicatedBuffer,
14347  VkImage dedicatedImage,
14348  size_t allocationCount,
14349  VmaAllocation* pAllocations)
14350 {
14351  VMA_ASSERT(allocationCount > 0 && pAllocations);
14352 
14353  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14354  allocInfo.memoryTypeIndex = memTypeIndex;
14355  allocInfo.allocationSize = size;
14356 
14357 #if VMA_DEDICATED_ALLOCATION
14358  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14359  if(m_UseKhrDedicatedAllocation)
14360  {
14361  if(dedicatedBuffer != VK_NULL_HANDLE)
14362  {
14363  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14364  dedicatedAllocInfo.buffer = dedicatedBuffer;
14365  allocInfo.pNext = &dedicatedAllocInfo;
14366  }
14367  else if(dedicatedImage != VK_NULL_HANDLE)
14368  {
14369  dedicatedAllocInfo.image = dedicatedImage;
14370  allocInfo.pNext = &dedicatedAllocInfo;
14371  }
14372  }
14373 #endif // #if VMA_DEDICATED_ALLOCATION
14374 
14375  size_t allocIndex;
14376  VkResult res;
14377  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14378  {
14379  res = AllocateDedicatedMemoryPage(
14380  size,
14381  suballocType,
14382  memTypeIndex,
14383  allocInfo,
14384  map,
14385  isUserDataString,
14386  pUserData,
14387  pAllocations + allocIndex);
14388  if(res != VK_SUCCESS)
14389  {
14390  break;
14391  }
14392  }
14393 
14394  if(res == VK_SUCCESS)
14395  {
14396  // Register them in m_pDedicatedAllocations.
14397  {
14398  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14399  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14400  VMA_ASSERT(pDedicatedAllocations);
14401  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14402  {
14403  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
14404  }
14405  }
14406 
14407  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14408  }
14409  else
14410  {
14411  // Free all already created allocations.
14412  while(allocIndex--)
14413  {
14414  VmaAllocation currAlloc = pAllocations[allocIndex];
14415  VkDeviceMemory hMemory = currAlloc->GetMemory();
14416 
14417  /*
14418  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14419  before vkFreeMemory.
14420 
14421  if(currAlloc->GetMappedData() != VMA_NULL)
14422  {
14423  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14424  }
14425  */
14426 
14427  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14428 
14429  currAlloc->SetUserData(this, VMA_NULL);
14430  vma_delete(this, currAlloc);
14431  }
14432 
14433  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14434  }
14435 
14436  return res;
14437 }
14438 
14439 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14440  VkDeviceSize size,
14441  VmaSuballocationType suballocType,
14442  uint32_t memTypeIndex,
14443  const VkMemoryAllocateInfo& allocInfo,
14444  bool map,
14445  bool isUserDataString,
14446  void* pUserData,
14447  VmaAllocation* pAllocation)
14448 {
14449  VkDeviceMemory hMemory = VK_NULL_HANDLE;
14450  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14451  if(res < 0)
14452  {
14453  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14454  return res;
14455  }
14456 
14457  void* pMappedData = VMA_NULL;
14458  if(map)
14459  {
14460  res = (*m_VulkanFunctions.vkMapMemory)(
14461  m_hDevice,
14462  hMemory,
14463  0,
14464  VK_WHOLE_SIZE,
14465  0,
14466  &pMappedData);
14467  if(res < 0)
14468  {
14469  VMA_DEBUG_LOG(" vkMapMemory FAILED");
14470  FreeVulkanMemory(memTypeIndex, size, hMemory);
14471  return res;
14472  }
14473  }
14474 
14475  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
14476  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14477  (*pAllocation)->SetUserData(this, pUserData);
14478  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14479  {
14480  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14481  }
14482 
14483  return VK_SUCCESS;
14484 }
14485 
14486 void VmaAllocator_T::GetBufferMemoryRequirements(
14487  VkBuffer hBuffer,
14488  VkMemoryRequirements& memReq,
14489  bool& requiresDedicatedAllocation,
14490  bool& prefersDedicatedAllocation) const
14491 {
14492 #if VMA_DEDICATED_ALLOCATION
14493  if(m_UseKhrDedicatedAllocation)
14494  {
14495  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14496  memReqInfo.buffer = hBuffer;
14497 
14498  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14499 
14500  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14501  memReq2.pNext = &memDedicatedReq;
14502 
14503  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14504 
14505  memReq = memReq2.memoryRequirements;
14506  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14507  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14508  }
14509  else
14510 #endif // #if VMA_DEDICATED_ALLOCATION
14511  {
14512  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14513  requiresDedicatedAllocation = false;
14514  prefersDedicatedAllocation = false;
14515  }
14516 }
14517 
14518 void VmaAllocator_T::GetImageMemoryRequirements(
14519  VkImage hImage,
14520  VkMemoryRequirements& memReq,
14521  bool& requiresDedicatedAllocation,
14522  bool& prefersDedicatedAllocation) const
14523 {
14524 #if VMA_DEDICATED_ALLOCATION
14525  if(m_UseKhrDedicatedAllocation)
14526  {
14527  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14528  memReqInfo.image = hImage;
14529 
14530  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14531 
14532  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14533  memReq2.pNext = &memDedicatedReq;
14534 
14535  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14536 
14537  memReq = memReq2.memoryRequirements;
14538  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14539  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14540  }
14541  else
14542 #endif // #if VMA_DEDICATED_ALLOCATION
14543  {
14544  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14545  requiresDedicatedAllocation = false;
14546  prefersDedicatedAllocation = false;
14547  }
14548 }
14549 
14550 VkResult VmaAllocator_T::AllocateMemory(
14551  const VkMemoryRequirements& vkMemReq,
14552  bool requiresDedicatedAllocation,
14553  bool prefersDedicatedAllocation,
14554  VkBuffer dedicatedBuffer,
14555  VkImage dedicatedImage,
14556  const VmaAllocationCreateInfo& createInfo,
14557  VmaSuballocationType suballocType,
14558  size_t allocationCount,
14559  VmaAllocation* pAllocations)
14560 {
14561  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14562 
14563  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14564 
14565  if(vkMemReq.size == 0)
14566  {
14567  return VK_ERROR_VALIDATION_FAILED_EXT;
14568  }
14569  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14570  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14571  {
14572  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14573  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14574  }
14575  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14577  {
14578  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14579  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14580  }
14581  if(requiresDedicatedAllocation)
14582  {
14583  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14584  {
14585  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14586  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14587  }
14588  if(createInfo.pool != VK_NULL_HANDLE)
14589  {
14590  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
14591  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14592  }
14593  }
14594  if((createInfo.pool != VK_NULL_HANDLE) &&
14595  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
14596  {
14597  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14598  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14599  }
14600 
14601  if(createInfo.pool != VK_NULL_HANDLE)
14602  {
14603  const VkDeviceSize alignmentForPool = VMA_MAX(
14604  vkMemReq.alignment,
14605  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
14606  return createInfo.pool->m_BlockVector.Allocate(
14607  createInfo.pool,
14608  m_CurrentFrameIndex.load(),
14609  vkMemReq.size,
14610  alignmentForPool,
14611  createInfo,
14612  suballocType,
14613  allocationCount,
14614  pAllocations);
14615  }
14616  else
14617  {
14618  // Bit mask of memory Vulkan types acceptable for this allocation.
14619  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14620  uint32_t memTypeIndex = UINT32_MAX;
14621  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14622  if(res == VK_SUCCESS)
14623  {
14624  VkDeviceSize alignmentForMemType = VMA_MAX(
14625  vkMemReq.alignment,
14626  GetMemoryTypeMinAlignment(memTypeIndex));
14627 
14628  res = AllocateMemoryOfType(
14629  vkMemReq.size,
14630  alignmentForMemType,
14631  requiresDedicatedAllocation || prefersDedicatedAllocation,
14632  dedicatedBuffer,
14633  dedicatedImage,
14634  createInfo,
14635  memTypeIndex,
14636  suballocType,
14637  allocationCount,
14638  pAllocations);
14639  // Succeeded on first try.
14640  if(res == VK_SUCCESS)
14641  {
14642  return res;
14643  }
14644  // Allocation from this memory type failed. Try other compatible memory types.
14645  else
14646  {
14647  for(;;)
14648  {
14649  // Remove old memTypeIndex from list of possibilities.
14650  memoryTypeBits &= ~(1u << memTypeIndex);
14651  // Find alternative memTypeIndex.
14652  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14653  if(res == VK_SUCCESS)
14654  {
14655  alignmentForMemType = VMA_MAX(
14656  vkMemReq.alignment,
14657  GetMemoryTypeMinAlignment(memTypeIndex));
14658 
14659  res = AllocateMemoryOfType(
14660  vkMemReq.size,
14661  alignmentForMemType,
14662  requiresDedicatedAllocation || prefersDedicatedAllocation,
14663  dedicatedBuffer,
14664  dedicatedImage,
14665  createInfo,
14666  memTypeIndex,
14667  suballocType,
14668  allocationCount,
14669  pAllocations);
14670  // Allocation from this alternative memory type succeeded.
14671  if(res == VK_SUCCESS)
14672  {
14673  return res;
14674  }
14675  // else: Allocation from this memory type failed. Try next one - next loop iteration.
14676  }
14677  // No other matching memory type index could be found.
14678  else
14679  {
14680  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
14681  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14682  }
14683  }
14684  }
14685  }
14686  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
14687  else
14688  return res;
14689  }
14690 }
14691 
14692 void VmaAllocator_T::FreeMemory(
14693  size_t allocationCount,
14694  const VmaAllocation* pAllocations)
14695 {
14696  VMA_ASSERT(pAllocations);
14697 
14698  for(size_t allocIndex = allocationCount; allocIndex--; )
14699  {
14700  VmaAllocation allocation = pAllocations[allocIndex];
14701 
14702  if(allocation != VK_NULL_HANDLE)
14703  {
14704  if(TouchAllocation(allocation))
14705  {
14706  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14707  {
14708  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14709  }
14710 
14711  switch(allocation->GetType())
14712  {
14713  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14714  {
14715  VmaBlockVector* pBlockVector = VMA_NULL;
14716  VmaPool hPool = allocation->GetPool();
14717  if(hPool != VK_NULL_HANDLE)
14718  {
14719  pBlockVector = &hPool->m_BlockVector;
14720  }
14721  else
14722  {
14723  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14724  pBlockVector = m_pBlockVectors[memTypeIndex];
14725  }
14726  pBlockVector->Free(allocation);
14727  }
14728  break;
14729  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14730  FreeDedicatedMemory(allocation);
14731  break;
14732  default:
14733  VMA_ASSERT(0);
14734  }
14735  }
14736 
14737  allocation->SetUserData(this, VMA_NULL);
14738  vma_delete(this, allocation);
14739  }
14740  }
14741 }
14742 
14743 VkResult VmaAllocator_T::ResizeAllocation(
14744  const VmaAllocation alloc,
14745  VkDeviceSize newSize)
14746 {
14747  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14748  {
14749  return VK_ERROR_VALIDATION_FAILED_EXT;
14750  }
14751  if(newSize == alloc->GetSize())
14752  {
14753  return VK_SUCCESS;
14754  }
14755 
14756  switch(alloc->GetType())
14757  {
14758  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14759  return VK_ERROR_FEATURE_NOT_PRESENT;
14760  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14761  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
14762  {
14763  alloc->ChangeSize(newSize);
14764  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
14765  return VK_SUCCESS;
14766  }
14767  else
14768  {
14769  return VK_ERROR_OUT_OF_POOL_MEMORY;
14770  }
14771  default:
14772  VMA_ASSERT(0);
14773  return VK_ERROR_VALIDATION_FAILED_EXT;
14774  }
14775 }
14776 
14777 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
14778 {
14779  // Initialize.
14780  InitStatInfo(pStats->total);
14781  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
14782  InitStatInfo(pStats->memoryType[i]);
14783  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14784  InitStatInfo(pStats->memoryHeap[i]);
14785 
14786  // Process default pools.
14787  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14788  {
14789  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14790  VMA_ASSERT(pBlockVector);
14791  pBlockVector->AddStats(pStats);
14792  }
14793 
14794  // Process custom pools.
14795  {
14796  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14797  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14798  {
14799  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
14800  }
14801  }
14802 
14803  // Process dedicated allocations.
14804  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14805  {
14806  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14807  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14808  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
14809  VMA_ASSERT(pDedicatedAllocVector);
14810  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
14811  {
14812  VmaStatInfo allocationStatInfo;
14813  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
14814  VmaAddStatInfo(pStats->total, allocationStatInfo);
14815  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
14816  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
14817  }
14818  }
14819 
14820  // Postprocess.
14821  VmaPostprocessCalcStatInfo(pStats->total);
14822  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
14823  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
14824  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
14825  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
14826 }
14827 
14828 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
14829 
14830 VkResult VmaAllocator_T::DefragmentationBegin(
14831  const VmaDefragmentationInfo2& info,
14832  VmaDefragmentationStats* pStats,
14833  VmaDefragmentationContext* pContext)
14834 {
14835  if(info.pAllocationsChanged != VMA_NULL)
14836  {
14837  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
14838  }
14839 
14840  *pContext = vma_new(this, VmaDefragmentationContext_T)(
14841  this, m_CurrentFrameIndex.load(), info.flags, pStats);
14842 
14843  (*pContext)->AddPools(info.poolCount, info.pPools);
14844  (*pContext)->AddAllocations(
14846 
14847  VkResult res = (*pContext)->Defragment(
14850  info.commandBuffer, pStats);
14851 
14852  if(res != VK_NOT_READY)
14853  {
14854  vma_delete(this, *pContext);
14855  *pContext = VMA_NULL;
14856  }
14857 
14858  return res;
14859 }
14860 
14861 VkResult VmaAllocator_T::DefragmentationEnd(
14862  VmaDefragmentationContext context)
14863 {
14864  vma_delete(this, context);
14865  return VK_SUCCESS;
14866 }
14867 
14868 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
14869 {
14870  if(hAllocation->CanBecomeLost())
14871  {
14872  /*
14873  Warning: This is a carefully designed algorithm.
14874  Do not modify unless you really know what you're doing :)
14875  */
14876  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14877  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14878  for(;;)
14879  {
14880  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14881  {
14882  pAllocationInfo->memoryType = UINT32_MAX;
14883  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
14884  pAllocationInfo->offset = 0;
14885  pAllocationInfo->size = hAllocation->GetSize();
14886  pAllocationInfo->pMappedData = VMA_NULL;
14887  pAllocationInfo->pUserData = hAllocation->GetUserData();
14888  return;
14889  }
14890  else if(localLastUseFrameIndex == localCurrFrameIndex)
14891  {
14892  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
14893  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
14894  pAllocationInfo->offset = hAllocation->GetOffset();
14895  pAllocationInfo->size = hAllocation->GetSize();
14896  pAllocationInfo->pMappedData = VMA_NULL;
14897  pAllocationInfo->pUserData = hAllocation->GetUserData();
14898  return;
14899  }
14900  else // Last use time earlier than current time.
14901  {
14902  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14903  {
14904  localLastUseFrameIndex = localCurrFrameIndex;
14905  }
14906  }
14907  }
14908  }
14909  else
14910  {
14911 #if VMA_STATS_STRING_ENABLED
14912  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14913  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14914  for(;;)
14915  {
14916  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14917  if(localLastUseFrameIndex == localCurrFrameIndex)
14918  {
14919  break;
14920  }
14921  else // Last use time earlier than current time.
14922  {
14923  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14924  {
14925  localLastUseFrameIndex = localCurrFrameIndex;
14926  }
14927  }
14928  }
14929 #endif
14930 
14931  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
14932  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
14933  pAllocationInfo->offset = hAllocation->GetOffset();
14934  pAllocationInfo->size = hAllocation->GetSize();
14935  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
14936  pAllocationInfo->pUserData = hAllocation->GetUserData();
14937  }
14938 }
14939 
14940 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
14941 {
14942  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
14943  if(hAllocation->CanBecomeLost())
14944  {
14945  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14946  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14947  for(;;)
14948  {
14949  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14950  {
14951  return false;
14952  }
14953  else if(localLastUseFrameIndex == localCurrFrameIndex)
14954  {
14955  return true;
14956  }
14957  else // Last use time earlier than current time.
14958  {
14959  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14960  {
14961  localLastUseFrameIndex = localCurrFrameIndex;
14962  }
14963  }
14964  }
14965  }
14966  else
14967  {
14968 #if VMA_STATS_STRING_ENABLED
14969  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14970  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14971  for(;;)
14972  {
14973  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14974  if(localLastUseFrameIndex == localCurrFrameIndex)
14975  {
14976  break;
14977  }
14978  else // Last use time earlier than current time.
14979  {
14980  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14981  {
14982  localLastUseFrameIndex = localCurrFrameIndex;
14983  }
14984  }
14985  }
14986 #endif
14987 
14988  return true;
14989  }
14990 }
14991 
14992 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
14993 {
14994  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
14995 
14996  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
14997 
14998  if(newCreateInfo.maxBlockCount == 0)
14999  {
15000  newCreateInfo.maxBlockCount = SIZE_MAX;
15001  }
15002  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
15003  {
15004  return VK_ERROR_INITIALIZATION_FAILED;
15005  }
15006 
15007  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
15008 
15009  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
15010 
15011  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15012  if(res != VK_SUCCESS)
15013  {
15014  vma_delete(this, *pPool);
15015  *pPool = VMA_NULL;
15016  return res;
15017  }
15018 
15019  // Add to m_Pools.
15020  {
15021  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15022  (*pPool)->SetId(m_NextPoolId++);
15023  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
15024  }
15025 
15026  return VK_SUCCESS;
15027 }
15028 
15029 void VmaAllocator_T::DestroyPool(VmaPool pool)
15030 {
15031  // Remove from m_Pools.
15032  {
15033  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15034  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15035  VMA_ASSERT(success && "Pool not found in Allocator.");
15036  }
15037 
15038  vma_delete(this, pool);
15039 }
15040 
15041 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15042 {
15043  pool->m_BlockVector.GetPoolStats(pPoolStats);
15044 }
15045 
15046 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15047 {
15048  m_CurrentFrameIndex.store(frameIndex);
15049 }
15050 
15051 void VmaAllocator_T::MakePoolAllocationsLost(
15052  VmaPool hPool,
15053  size_t* pLostAllocationCount)
15054 {
15055  hPool->m_BlockVector.MakePoolAllocationsLost(
15056  m_CurrentFrameIndex.load(),
15057  pLostAllocationCount);
15058 }
15059 
15060 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15061 {
15062  return hPool->m_BlockVector.CheckCorruption();
15063 }
15064 
15065 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15066 {
15067  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15068 
15069  // Process default pools.
15070  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15071  {
15072  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15073  {
15074  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15075  VMA_ASSERT(pBlockVector);
15076  VkResult localRes = pBlockVector->CheckCorruption();
15077  switch(localRes)
15078  {
15079  case VK_ERROR_FEATURE_NOT_PRESENT:
15080  break;
15081  case VK_SUCCESS:
15082  finalRes = VK_SUCCESS;
15083  break;
15084  default:
15085  return localRes;
15086  }
15087  }
15088  }
15089 
15090  // Process custom pools.
15091  {
15092  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15093  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15094  {
15095  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15096  {
15097  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15098  switch(localRes)
15099  {
15100  case VK_ERROR_FEATURE_NOT_PRESENT:
15101  break;
15102  case VK_SUCCESS:
15103  finalRes = VK_SUCCESS;
15104  break;
15105  default:
15106  return localRes;
15107  }
15108  }
15109  }
15110  }
15111 
15112  return finalRes;
15113 }
15114 
15115 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15116 {
15117  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
15118  (*pAllocation)->InitLost();
15119 }
15120 
15121 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15122 {
15123  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15124 
15125  VkResult res;
15126  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15127  {
15128  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15129  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
15130  {
15131  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15132  if(res == VK_SUCCESS)
15133  {
15134  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
15135  }
15136  }
15137  else
15138  {
15139  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
15140  }
15141  }
15142  else
15143  {
15144  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15145  }
15146 
15147  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15148  {
15149  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15150  }
15151 
15152  return res;
15153 }
15154 
15155 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15156 {
15157  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15158  {
15159  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15160  }
15161 
15162  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15163 
15164  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15165  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15166  {
15167  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15168  m_HeapSizeLimit[heapIndex] += size;
15169  }
15170 }
15171 
15172 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15173 {
15174  if(hAllocation->CanBecomeLost())
15175  {
15176  return VK_ERROR_MEMORY_MAP_FAILED;
15177  }
15178 
15179  switch(hAllocation->GetType())
15180  {
15181  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15182  {
15183  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15184  char *pBytes = VMA_NULL;
15185  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15186  if(res == VK_SUCCESS)
15187  {
15188  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15189  hAllocation->BlockAllocMap();
15190  }
15191  return res;
15192  }
15193  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15194  return hAllocation->DedicatedAllocMap(this, ppData);
15195  default:
15196  VMA_ASSERT(0);
15197  return VK_ERROR_MEMORY_MAP_FAILED;
15198  }
15199 }
15200 
15201 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15202 {
15203  switch(hAllocation->GetType())
15204  {
15205  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15206  {
15207  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15208  hAllocation->BlockAllocUnmap();
15209  pBlock->Unmap(this, 1);
15210  }
15211  break;
15212  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15213  hAllocation->DedicatedAllocUnmap(this);
15214  break;
15215  default:
15216  VMA_ASSERT(0);
15217  }
15218 }
15219 
15220 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
15221 {
15222  VkResult res = VK_SUCCESS;
15223  switch(hAllocation->GetType())
15224  {
15225  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15226  res = GetVulkanFunctions().vkBindBufferMemory(
15227  m_hDevice,
15228  hBuffer,
15229  hAllocation->GetMemory(),
15230  0); //memoryOffset
15231  break;
15232  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15233  {
15234  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15235  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
15236  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
15237  break;
15238  }
15239  default:
15240  VMA_ASSERT(0);
15241  }
15242  return res;
15243 }
15244 
15245 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
15246 {
15247  VkResult res = VK_SUCCESS;
15248  switch(hAllocation->GetType())
15249  {
15250  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15251  res = GetVulkanFunctions().vkBindImageMemory(
15252  m_hDevice,
15253  hImage,
15254  hAllocation->GetMemory(),
15255  0); //memoryOffset
15256  break;
15257  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15258  {
15259  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15260  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
15261  res = pBlock->BindImageMemory(this, hAllocation, hImage);
15262  break;
15263  }
15264  default:
15265  VMA_ASSERT(0);
15266  }
15267  return res;
15268 }
15269 
15270 void VmaAllocator_T::FlushOrInvalidateAllocation(
15271  VmaAllocation hAllocation,
15272  VkDeviceSize offset, VkDeviceSize size,
15273  VMA_CACHE_OPERATION op)
15274 {
15275  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
15276  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15277  {
15278  const VkDeviceSize allocationSize = hAllocation->GetSize();
15279  VMA_ASSERT(offset <= allocationSize);
15280 
15281  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15282 
15283  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
15284  memRange.memory = hAllocation->GetMemory();
15285 
15286  switch(hAllocation->GetType())
15287  {
15288  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15289  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15290  if(size == VK_WHOLE_SIZE)
15291  {
15292  memRange.size = allocationSize - memRange.offset;
15293  }
15294  else
15295  {
15296  VMA_ASSERT(offset + size <= allocationSize);
15297  memRange.size = VMA_MIN(
15298  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
15299  allocationSize - memRange.offset);
15300  }
15301  break;
15302 
15303  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15304  {
15305  // 1. Still within this allocation.
15306  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15307  if(size == VK_WHOLE_SIZE)
15308  {
15309  size = allocationSize - offset;
15310  }
15311  else
15312  {
15313  VMA_ASSERT(offset + size <= allocationSize);
15314  }
15315  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
15316 
15317  // 2. Adjust to whole block.
15318  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
15319  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15320  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
15321  memRange.offset += allocationOffset;
15322  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
15323 
15324  break;
15325  }
15326 
15327  default:
15328  VMA_ASSERT(0);
15329  }
15330 
15331  switch(op)
15332  {
15333  case VMA_CACHE_FLUSH:
15334  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15335  break;
15336  case VMA_CACHE_INVALIDATE:
15337  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15338  break;
15339  default:
15340  VMA_ASSERT(0);
15341  }
15342  }
15343  // else: Just ignore this call.
15344 }
15345 
15346 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
15347 {
15348  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15349 
15350  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15351  {
15352  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15353  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15354  VMA_ASSERT(pDedicatedAllocations);
15355  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
15356  VMA_ASSERT(success);
15357  }
15358 
15359  VkDeviceMemory hMemory = allocation->GetMemory();
15360 
15361  /*
15362  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15363  before vkFreeMemory.
15364 
15365  if(allocation->GetMappedData() != VMA_NULL)
15366  {
15367  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15368  }
15369  */
15370 
15371  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15372 
15373  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15374 }
15375 
15376 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15377 {
15378  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15379  !hAllocation->CanBecomeLost() &&
15380  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15381  {
15382  void* pData = VMA_NULL;
15383  VkResult res = Map(hAllocation, &pData);
15384  if(res == VK_SUCCESS)
15385  {
15386  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15387  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15388  Unmap(hAllocation);
15389  }
15390  else
15391  {
15392  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15393  }
15394  }
15395 }
15396 
15397 #if VMA_STATS_STRING_ENABLED
15398 
15399 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15400 {
15401  bool dedicatedAllocationsStarted = false;
15402  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15403  {
15404  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15405  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15406  VMA_ASSERT(pDedicatedAllocVector);
15407  if(pDedicatedAllocVector->empty() == false)
15408  {
15409  if(dedicatedAllocationsStarted == false)
15410  {
15411  dedicatedAllocationsStarted = true;
15412  json.WriteString("DedicatedAllocations");
15413  json.BeginObject();
15414  }
15415 
15416  json.BeginString("Type ");
15417  json.ContinueString(memTypeIndex);
15418  json.EndString();
15419 
15420  json.BeginArray();
15421 
15422  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15423  {
15424  json.BeginObject(true);
15425  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
15426  hAlloc->PrintParameters(json);
15427  json.EndObject();
15428  }
15429 
15430  json.EndArray();
15431  }
15432  }
15433  if(dedicatedAllocationsStarted)
15434  {
15435  json.EndObject();
15436  }
15437 
15438  {
15439  bool allocationsStarted = false;
15440  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15441  {
15442  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
15443  {
15444  if(allocationsStarted == false)
15445  {
15446  allocationsStarted = true;
15447  json.WriteString("DefaultPools");
15448  json.BeginObject();
15449  }
15450 
15451  json.BeginString("Type ");
15452  json.ContinueString(memTypeIndex);
15453  json.EndString();
15454 
15455  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15456  }
15457  }
15458  if(allocationsStarted)
15459  {
15460  json.EndObject();
15461  }
15462  }
15463 
15464  // Custom pools
15465  {
15466  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15467  const size_t poolCount = m_Pools.size();
15468  if(poolCount > 0)
15469  {
15470  json.WriteString("Pools");
15471  json.BeginObject();
15472  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15473  {
15474  json.BeginString();
15475  json.ContinueString(m_Pools[poolIndex]->GetId());
15476  json.EndString();
15477 
15478  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15479  }
15480  json.EndObject();
15481  }
15482  }
15483 }
15484 
15485 #endif // #if VMA_STATS_STRING_ENABLED
15486 
15488 // Public interface
15489 
15490 VkResult vmaCreateAllocator(
15491  const VmaAllocatorCreateInfo* pCreateInfo,
15492  VmaAllocator* pAllocator)
15493 {
15494  VMA_ASSERT(pCreateInfo && pAllocator);
15495  VMA_DEBUG_LOG("vmaCreateAllocator");
15496  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
15497  return (*pAllocator)->Init(pCreateInfo);
15498 }
15499 
15500 void vmaDestroyAllocator(
15501  VmaAllocator allocator)
15502 {
15503  if(allocator != VK_NULL_HANDLE)
15504  {
15505  VMA_DEBUG_LOG("vmaDestroyAllocator");
15506  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15507  vma_delete(&allocationCallbacks, allocator);
15508  }
15509 }
15510 
15512  VmaAllocator allocator,
15513  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15514 {
15515  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15516  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15517 }
15518 
15520  VmaAllocator allocator,
15521  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15522 {
15523  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15524  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15525 }
15526 
15528  VmaAllocator allocator,
15529  uint32_t memoryTypeIndex,
15530  VkMemoryPropertyFlags* pFlags)
15531 {
15532  VMA_ASSERT(allocator && pFlags);
15533  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15534  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15535 }
15536 
15538  VmaAllocator allocator,
15539  uint32_t frameIndex)
15540 {
15541  VMA_ASSERT(allocator);
15542  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15543 
15544  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15545 
15546  allocator->SetCurrentFrameIndex(frameIndex);
15547 }
15548 
15549 void vmaCalculateStats(
15550  VmaAllocator allocator,
15551  VmaStats* pStats)
15552 {
15553  VMA_ASSERT(allocator && pStats);
15554  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15555  allocator->CalculateStats(pStats);
15556 }
15557 
15558 #if VMA_STATS_STRING_ENABLED
15559 
15560 void vmaBuildStatsString(
15561  VmaAllocator allocator,
15562  char** ppStatsString,
15563  VkBool32 detailedMap)
15564 {
15565  VMA_ASSERT(allocator && ppStatsString);
15566  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15567 
15568  VmaStringBuilder sb(allocator);
15569  {
15570  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15571  json.BeginObject();
15572 
15573  VmaStats stats;
15574  allocator->CalculateStats(&stats);
15575 
15576  json.WriteString("Total");
15577  VmaPrintStatInfo(json, stats.total);
15578 
15579  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15580  {
15581  json.BeginString("Heap ");
15582  json.ContinueString(heapIndex);
15583  json.EndString();
15584  json.BeginObject();
15585 
15586  json.WriteString("Size");
15587  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15588 
15589  json.WriteString("Flags");
15590  json.BeginArray(true);
15591  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15592  {
15593  json.WriteString("DEVICE_LOCAL");
15594  }
15595  json.EndArray();
15596 
15597  if(stats.memoryHeap[heapIndex].blockCount > 0)
15598  {
15599  json.WriteString("Stats");
15600  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
15601  }
15602 
15603  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15604  {
15605  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15606  {
15607  json.BeginString("Type ");
15608  json.ContinueString(typeIndex);
15609  json.EndString();
15610 
15611  json.BeginObject();
15612 
15613  json.WriteString("Flags");
15614  json.BeginArray(true);
15615  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15616  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15617  {
15618  json.WriteString("DEVICE_LOCAL");
15619  }
15620  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15621  {
15622  json.WriteString("HOST_VISIBLE");
15623  }
15624  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15625  {
15626  json.WriteString("HOST_COHERENT");
15627  }
15628  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15629  {
15630  json.WriteString("HOST_CACHED");
15631  }
15632  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15633  {
15634  json.WriteString("LAZILY_ALLOCATED");
15635  }
15636  json.EndArray();
15637 
15638  if(stats.memoryType[typeIndex].blockCount > 0)
15639  {
15640  json.WriteString("Stats");
15641  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
15642  }
15643 
15644  json.EndObject();
15645  }
15646  }
15647 
15648  json.EndObject();
15649  }
15650  if(detailedMap == VK_TRUE)
15651  {
15652  allocator->PrintDetailedMap(json);
15653  }
15654 
15655  json.EndObject();
15656  }
15657 
15658  const size_t len = sb.GetLength();
15659  char* const pChars = vma_new_array(allocator, char, len + 1);
15660  if(len > 0)
15661  {
15662  memcpy(pChars, sb.GetData(), len);
15663  }
15664  pChars[len] = '\0';
15665  *ppStatsString = pChars;
15666 }
15667 
15668 void vmaFreeStatsString(
15669  VmaAllocator allocator,
15670  char* pStatsString)
15671 {
15672  if(pStatsString != VMA_NULL)
15673  {
15674  VMA_ASSERT(allocator);
15675  size_t len = strlen(pStatsString);
15676  vma_delete_array(allocator, pStatsString, len + 1);
15677  }
15678 }
15679 
15680 #endif // #if VMA_STATS_STRING_ENABLED
15681 
15682 /*
15683 This function is not protected by any mutex because it just reads immutable data.
15684 */
15685 VkResult vmaFindMemoryTypeIndex(
15686  VmaAllocator allocator,
15687  uint32_t memoryTypeBits,
15688  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15689  uint32_t* pMemoryTypeIndex)
15690 {
15691  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15692  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15693  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15694 
15695  if(pAllocationCreateInfo->memoryTypeBits != 0)
15696  {
15697  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
15698  }
15699 
15700  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
15701  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
15702 
15703  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
15704  if(mapped)
15705  {
15706  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15707  }
15708 
15709  // Convert usage to requiredFlags and preferredFlags.
15710  switch(pAllocationCreateInfo->usage)
15711  {
15713  break;
15715  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15716  {
15717  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15718  }
15719  break;
15721  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15722  break;
15724  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15725  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15726  {
15727  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15728  }
15729  break;
15731  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15732  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
15733  break;
15734  default:
15735  break;
15736  }
15737 
15738  *pMemoryTypeIndex = UINT32_MAX;
15739  uint32_t minCost = UINT32_MAX;
15740  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
15741  memTypeIndex < allocator->GetMemoryTypeCount();
15742  ++memTypeIndex, memTypeBit <<= 1)
15743  {
15744  // This memory type is acceptable according to memoryTypeBits bitmask.
15745  if((memTypeBit & memoryTypeBits) != 0)
15746  {
15747  const VkMemoryPropertyFlags currFlags =
15748  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
15749  // This memory type contains requiredFlags.
15750  if((requiredFlags & ~currFlags) == 0)
15751  {
15752  // Calculate cost as number of bits from preferredFlags not present in this memory type.
15753  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
15754  // Remember memory type with lowest cost.
15755  if(currCost < minCost)
15756  {
15757  *pMemoryTypeIndex = memTypeIndex;
15758  if(currCost == 0)
15759  {
15760  return VK_SUCCESS;
15761  }
15762  minCost = currCost;
15763  }
15764  }
15765  }
15766  }
15767  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
15768 }
15769 
15771  VmaAllocator allocator,
15772  const VkBufferCreateInfo* pBufferCreateInfo,
15773  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15774  uint32_t* pMemoryTypeIndex)
15775 {
15776  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15777  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
15778  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15779  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15780 
15781  const VkDevice hDev = allocator->m_hDevice;
15782  VkBuffer hBuffer = VK_NULL_HANDLE;
15783  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
15784  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
15785  if(res == VK_SUCCESS)
15786  {
15787  VkMemoryRequirements memReq = {};
15788  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
15789  hDev, hBuffer, &memReq);
15790 
15791  res = vmaFindMemoryTypeIndex(
15792  allocator,
15793  memReq.memoryTypeBits,
15794  pAllocationCreateInfo,
15795  pMemoryTypeIndex);
15796 
15797  allocator->GetVulkanFunctions().vkDestroyBuffer(
15798  hDev, hBuffer, allocator->GetAllocationCallbacks());
15799  }
15800  return res;
15801 }
15802 
15804  VmaAllocator allocator,
15805  const VkImageCreateInfo* pImageCreateInfo,
15806  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15807  uint32_t* pMemoryTypeIndex)
15808 {
15809  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15810  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
15811  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15812  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15813 
15814  const VkDevice hDev = allocator->m_hDevice;
15815  VkImage hImage = VK_NULL_HANDLE;
15816  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
15817  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
15818  if(res == VK_SUCCESS)
15819  {
15820  VkMemoryRequirements memReq = {};
15821  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
15822  hDev, hImage, &memReq);
15823 
15824  res = vmaFindMemoryTypeIndex(
15825  allocator,
15826  memReq.memoryTypeBits,
15827  pAllocationCreateInfo,
15828  pMemoryTypeIndex);
15829 
15830  allocator->GetVulkanFunctions().vkDestroyImage(
15831  hDev, hImage, allocator->GetAllocationCallbacks());
15832  }
15833  return res;
15834 }
15835 
15836 VkResult vmaCreatePool(
15837  VmaAllocator allocator,
15838  const VmaPoolCreateInfo* pCreateInfo,
15839  VmaPool* pPool)
15840 {
15841  VMA_ASSERT(allocator && pCreateInfo && pPool);
15842 
15843  VMA_DEBUG_LOG("vmaCreatePool");
15844 
15845  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15846 
15847  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
15848 
15849 #if VMA_RECORDING_ENABLED
15850  if(allocator->GetRecorder() != VMA_NULL)
15851  {
15852  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
15853  }
15854 #endif
15855 
15856  return res;
15857 }
15858 
15859 void vmaDestroyPool(
15860  VmaAllocator allocator,
15861  VmaPool pool)
15862 {
15863  VMA_ASSERT(allocator);
15864 
15865  if(pool == VK_NULL_HANDLE)
15866  {
15867  return;
15868  }
15869 
15870  VMA_DEBUG_LOG("vmaDestroyPool");
15871 
15872  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15873 
15874 #if VMA_RECORDING_ENABLED
15875  if(allocator->GetRecorder() != VMA_NULL)
15876  {
15877  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
15878  }
15879 #endif
15880 
15881  allocator->DestroyPool(pool);
15882 }
15883 
15884 void vmaGetPoolStats(
15885  VmaAllocator allocator,
15886  VmaPool pool,
15887  VmaPoolStats* pPoolStats)
15888 {
15889  VMA_ASSERT(allocator && pool && pPoolStats);
15890 
15891  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15892 
15893  allocator->GetPoolStats(pool, pPoolStats);
15894 }
15895 
15897  VmaAllocator allocator,
15898  VmaPool pool,
15899  size_t* pLostAllocationCount)
15900 {
15901  VMA_ASSERT(allocator && pool);
15902 
15903  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15904 
15905 #if VMA_RECORDING_ENABLED
15906  if(allocator->GetRecorder() != VMA_NULL)
15907  {
15908  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
15909  }
15910 #endif
15911 
15912  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
15913 }
15914 
15915 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
15916 {
15917  VMA_ASSERT(allocator && pool);
15918 
15919  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15920 
15921  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
15922 
15923  return allocator->CheckPoolCorruption(pool);
15924 }
15925 
15926 VkResult vmaAllocateMemory(
15927  VmaAllocator allocator,
15928  const VkMemoryRequirements* pVkMemoryRequirements,
15929  const VmaAllocationCreateInfo* pCreateInfo,
15930  VmaAllocation* pAllocation,
15931  VmaAllocationInfo* pAllocationInfo)
15932 {
15933  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
15934 
15935  VMA_DEBUG_LOG("vmaAllocateMemory");
15936 
15937  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15938 
15939  VkResult result = allocator->AllocateMemory(
15940  *pVkMemoryRequirements,
15941  false, // requiresDedicatedAllocation
15942  false, // prefersDedicatedAllocation
15943  VK_NULL_HANDLE, // dedicatedBuffer
15944  VK_NULL_HANDLE, // dedicatedImage
15945  *pCreateInfo,
15946  VMA_SUBALLOCATION_TYPE_UNKNOWN,
15947  1, // allocationCount
15948  pAllocation);
15949 
15950 #if VMA_RECORDING_ENABLED
15951  if(allocator->GetRecorder() != VMA_NULL)
15952  {
15953  allocator->GetRecorder()->RecordAllocateMemory(
15954  allocator->GetCurrentFrameIndex(),
15955  *pVkMemoryRequirements,
15956  *pCreateInfo,
15957  *pAllocation);
15958  }
15959 #endif
15960 
15961  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
15962  {
15963  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
15964  }
15965 
15966  return result;
15967 }
15968 
15969 VkResult vmaAllocateMemoryPages(
15970  VmaAllocator allocator,
15971  const VkMemoryRequirements* pVkMemoryRequirements,
15972  const VmaAllocationCreateInfo* pCreateInfo,
15973  size_t allocationCount,
15974  VmaAllocation* pAllocations,
15975  VmaAllocationInfo* pAllocationInfo)
15976 {
15977  if(allocationCount == 0)
15978  {
15979  return VK_SUCCESS;
15980  }
15981 
15982  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
15983 
15984  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
15985 
15986  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15987 
15988  VkResult result = allocator->AllocateMemory(
15989  *pVkMemoryRequirements,
15990  false, // requiresDedicatedAllocation
15991  false, // prefersDedicatedAllocation
15992  VK_NULL_HANDLE, // dedicatedBuffer
15993  VK_NULL_HANDLE, // dedicatedImage
15994  *pCreateInfo,
15995  VMA_SUBALLOCATION_TYPE_UNKNOWN,
15996  allocationCount,
15997  pAllocations);
15998 
15999 #if VMA_RECORDING_ENABLED
16000  if(allocator->GetRecorder() != VMA_NULL)
16001  {
16002  allocator->GetRecorder()->RecordAllocateMemoryPages(
16003  allocator->GetCurrentFrameIndex(),
16004  *pVkMemoryRequirements,
16005  *pCreateInfo,
16006  (uint64_t)allocationCount,
16007  pAllocations);
16008  }
16009 #endif
16010 
16011  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16012  {
16013  for(size_t i = 0; i < allocationCount; ++i)
16014  {
16015  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16016  }
16017  }
16018 
16019  return result;
16020 }
16021 
16023  VmaAllocator allocator,
16024  VkBuffer buffer,
16025  const VmaAllocationCreateInfo* pCreateInfo,
16026  VmaAllocation* pAllocation,
16027  VmaAllocationInfo* pAllocationInfo)
16028 {
16029  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16030 
16031  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16032 
16033  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16034 
16035  VkMemoryRequirements vkMemReq = {};
16036  bool requiresDedicatedAllocation = false;
16037  bool prefersDedicatedAllocation = false;
16038  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16039  requiresDedicatedAllocation,
16040  prefersDedicatedAllocation);
16041 
16042  VkResult result = allocator->AllocateMemory(
16043  vkMemReq,
16044  requiresDedicatedAllocation,
16045  prefersDedicatedAllocation,
16046  buffer, // dedicatedBuffer
16047  VK_NULL_HANDLE, // dedicatedImage
16048  *pCreateInfo,
16049  VMA_SUBALLOCATION_TYPE_BUFFER,
16050  1, // allocationCount
16051  pAllocation);
16052 
16053 #if VMA_RECORDING_ENABLED
16054  if(allocator->GetRecorder() != VMA_NULL)
16055  {
16056  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16057  allocator->GetCurrentFrameIndex(),
16058  vkMemReq,
16059  requiresDedicatedAllocation,
16060  prefersDedicatedAllocation,
16061  *pCreateInfo,
16062  *pAllocation);
16063  }
16064 #endif
16065 
16066  if(pAllocationInfo && result == VK_SUCCESS)
16067  {
16068  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16069  }
16070 
16071  return result;
16072 }
16073 
16074 VkResult vmaAllocateMemoryForImage(
16075  VmaAllocator allocator,
16076  VkImage image,
16077  const VmaAllocationCreateInfo* pCreateInfo,
16078  VmaAllocation* pAllocation,
16079  VmaAllocationInfo* pAllocationInfo)
16080 {
16081  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16082 
16083  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
16084 
16085  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16086 
16087  VkMemoryRequirements vkMemReq = {};
16088  bool requiresDedicatedAllocation = false;
16089  bool prefersDedicatedAllocation = false;
16090  allocator->GetImageMemoryRequirements(image, vkMemReq,
16091  requiresDedicatedAllocation, prefersDedicatedAllocation);
16092 
16093  VkResult result = allocator->AllocateMemory(
16094  vkMemReq,
16095  requiresDedicatedAllocation,
16096  prefersDedicatedAllocation,
16097  VK_NULL_HANDLE, // dedicatedBuffer
16098  image, // dedicatedImage
16099  *pCreateInfo,
16100  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16101  1, // allocationCount
16102  pAllocation);
16103 
16104 #if VMA_RECORDING_ENABLED
16105  if(allocator->GetRecorder() != VMA_NULL)
16106  {
16107  allocator->GetRecorder()->RecordAllocateMemoryForImage(
16108  allocator->GetCurrentFrameIndex(),
16109  vkMemReq,
16110  requiresDedicatedAllocation,
16111  prefersDedicatedAllocation,
16112  *pCreateInfo,
16113  *pAllocation);
16114  }
16115 #endif
16116 
16117  if(pAllocationInfo && result == VK_SUCCESS)
16118  {
16119  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16120  }
16121 
16122  return result;
16123 }
16124 
16125 void vmaFreeMemory(
16126  VmaAllocator allocator,
16127  VmaAllocation allocation)
16128 {
16129  VMA_ASSERT(allocator);
16130 
16131  if(allocation == VK_NULL_HANDLE)
16132  {
16133  return;
16134  }
16135 
16136  VMA_DEBUG_LOG("vmaFreeMemory");
16137 
16138  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16139 
16140 #if VMA_RECORDING_ENABLED
16141  if(allocator->GetRecorder() != VMA_NULL)
16142  {
16143  allocator->GetRecorder()->RecordFreeMemory(
16144  allocator->GetCurrentFrameIndex(),
16145  allocation);
16146  }
16147 #endif
16148 
16149  allocator->FreeMemory(
16150  1, // allocationCount
16151  &allocation);
16152 }
16153 
16154 void vmaFreeMemoryPages(
16155  VmaAllocator allocator,
16156  size_t allocationCount,
16157  VmaAllocation* pAllocations)
16158 {
16159  if(allocationCount == 0)
16160  {
16161  return;
16162  }
16163 
16164  VMA_ASSERT(allocator);
16165 
16166  VMA_DEBUG_LOG("vmaFreeMemoryPages");
16167 
16168  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16169 
16170 #if VMA_RECORDING_ENABLED
16171  if(allocator->GetRecorder() != VMA_NULL)
16172  {
16173  allocator->GetRecorder()->RecordFreeMemoryPages(
16174  allocator->GetCurrentFrameIndex(),
16175  (uint64_t)allocationCount,
16176  pAllocations);
16177  }
16178 #endif
16179 
16180  allocator->FreeMemory(allocationCount, pAllocations);
16181 }
16182 
16183 VkResult vmaResizeAllocation(
16184  VmaAllocator allocator,
16185  VmaAllocation allocation,
16186  VkDeviceSize newSize)
16187 {
16188  VMA_ASSERT(allocator && allocation);
16189 
16190  VMA_DEBUG_LOG("vmaResizeAllocation");
16191 
16192  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16193 
16194 #if VMA_RECORDING_ENABLED
16195  if(allocator->GetRecorder() != VMA_NULL)
16196  {
16197  allocator->GetRecorder()->RecordResizeAllocation(
16198  allocator->GetCurrentFrameIndex(),
16199  allocation,
16200  newSize);
16201  }
16202 #endif
16203 
16204  return allocator->ResizeAllocation(allocation, newSize);
16205 }
16206 
16208  VmaAllocator allocator,
16209  VmaAllocation allocation,
16210  VmaAllocationInfo* pAllocationInfo)
16211 {
16212  VMA_ASSERT(allocator && allocation && pAllocationInfo);
16213 
16214  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16215 
16216 #if VMA_RECORDING_ENABLED
16217  if(allocator->GetRecorder() != VMA_NULL)
16218  {
16219  allocator->GetRecorder()->RecordGetAllocationInfo(
16220  allocator->GetCurrentFrameIndex(),
16221  allocation);
16222  }
16223 #endif
16224 
16225  allocator->GetAllocationInfo(allocation, pAllocationInfo);
16226 }
16227 
16228 VkBool32 vmaTouchAllocation(
16229  VmaAllocator allocator,
16230  VmaAllocation allocation)
16231 {
16232  VMA_ASSERT(allocator && allocation);
16233 
16234  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16235 
16236 #if VMA_RECORDING_ENABLED
16237  if(allocator->GetRecorder() != VMA_NULL)
16238  {
16239  allocator->GetRecorder()->RecordTouchAllocation(
16240  allocator->GetCurrentFrameIndex(),
16241  allocation);
16242  }
16243 #endif
16244 
16245  return allocator->TouchAllocation(allocation);
16246 }
16247 
16249  VmaAllocator allocator,
16250  VmaAllocation allocation,
16251  void* pUserData)
16252 {
16253  VMA_ASSERT(allocator && allocation);
16254 
16255  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16256 
16257  allocation->SetUserData(allocator, pUserData);
16258 
16259 #if VMA_RECORDING_ENABLED
16260  if(allocator->GetRecorder() != VMA_NULL)
16261  {
16262  allocator->GetRecorder()->RecordSetAllocationUserData(
16263  allocator->GetCurrentFrameIndex(),
16264  allocation,
16265  pUserData);
16266  }
16267 #endif
16268 }
16269 
16271  VmaAllocator allocator,
16272  VmaAllocation* pAllocation)
16273 {
16274  VMA_ASSERT(allocator && pAllocation);
16275 
16276  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
16277 
16278  allocator->CreateLostAllocation(pAllocation);
16279 
16280 #if VMA_RECORDING_ENABLED
16281  if(allocator->GetRecorder() != VMA_NULL)
16282  {
16283  allocator->GetRecorder()->RecordCreateLostAllocation(
16284  allocator->GetCurrentFrameIndex(),
16285  *pAllocation);
16286  }
16287 #endif
16288 }
16289 
16290 VkResult vmaMapMemory(
16291  VmaAllocator allocator,
16292  VmaAllocation allocation,
16293  void** ppData)
16294 {
16295  VMA_ASSERT(allocator && allocation && ppData);
16296 
16297  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16298 
16299  VkResult res = allocator->Map(allocation, ppData);
16300 
16301 #if VMA_RECORDING_ENABLED
16302  if(allocator->GetRecorder() != VMA_NULL)
16303  {
16304  allocator->GetRecorder()->RecordMapMemory(
16305  allocator->GetCurrentFrameIndex(),
16306  allocation);
16307  }
16308 #endif
16309 
16310  return res;
16311 }
16312 
16313 void vmaUnmapMemory(
16314  VmaAllocator allocator,
16315  VmaAllocation allocation)
16316 {
16317  VMA_ASSERT(allocator && allocation);
16318 
16319  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16320 
16321 #if VMA_RECORDING_ENABLED
16322  if(allocator->GetRecorder() != VMA_NULL)
16323  {
16324  allocator->GetRecorder()->RecordUnmapMemory(
16325  allocator->GetCurrentFrameIndex(),
16326  allocation);
16327  }
16328 #endif
16329 
16330  allocator->Unmap(allocation);
16331 }
16332 
16333 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16334 {
16335  VMA_ASSERT(allocator && allocation);
16336 
16337  VMA_DEBUG_LOG("vmaFlushAllocation");
16338 
16339  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16340 
16341  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
16342 
16343 #if VMA_RECORDING_ENABLED
16344  if(allocator->GetRecorder() != VMA_NULL)
16345  {
16346  allocator->GetRecorder()->RecordFlushAllocation(
16347  allocator->GetCurrentFrameIndex(),
16348  allocation, offset, size);
16349  }
16350 #endif
16351 }
16352 
16353 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16354 {
16355  VMA_ASSERT(allocator && allocation);
16356 
16357  VMA_DEBUG_LOG("vmaInvalidateAllocation");
16358 
16359  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16360 
16361  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
16362 
16363 #if VMA_RECORDING_ENABLED
16364  if(allocator->GetRecorder() != VMA_NULL)
16365  {
16366  allocator->GetRecorder()->RecordInvalidateAllocation(
16367  allocator->GetCurrentFrameIndex(),
16368  allocation, offset, size);
16369  }
16370 #endif
16371 }
16372 
16373 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
16374 {
16375  VMA_ASSERT(allocator);
16376 
16377  VMA_DEBUG_LOG("vmaCheckCorruption");
16378 
16379  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16380 
16381  return allocator->CheckCorruption(memoryTypeBits);
16382 }
16383 
16384 VkResult vmaDefragment(
16385  VmaAllocator allocator,
16386  VmaAllocation* pAllocations,
16387  size_t allocationCount,
16388  VkBool32* pAllocationsChanged,
16389  const VmaDefragmentationInfo *pDefragmentationInfo,
16390  VmaDefragmentationStats* pDefragmentationStats)
16391 {
16392  // Deprecated interface, reimplemented using new one.
16393 
16394  VmaDefragmentationInfo2 info2 = {};
16395  info2.allocationCount = (uint32_t)allocationCount;
16396  info2.pAllocations = pAllocations;
16397  info2.pAllocationsChanged = pAllocationsChanged;
16398  if(pDefragmentationInfo != VMA_NULL)
16399  {
16400  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
16401  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
16402  }
16403  else
16404  {
16405  info2.maxCpuAllocationsToMove = UINT32_MAX;
16406  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
16407  }
16408  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
16409 
16411  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
16412  if(res == VK_NOT_READY)
16413  {
16414  res = vmaDefragmentationEnd( allocator, ctx);
16415  }
16416  return res;
16417 }
16418 
16419 VkResult vmaDefragmentationBegin(
16420  VmaAllocator allocator,
16421  const VmaDefragmentationInfo2* pInfo,
16422  VmaDefragmentationStats* pStats,
16423  VmaDefragmentationContext *pContext)
16424 {
16425  VMA_ASSERT(allocator && pInfo && pContext);
16426 
16427  // Degenerate case: Nothing to defragment.
16428  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
16429  {
16430  return VK_SUCCESS;
16431  }
16432 
16433  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
16434  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
16435  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
16436  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
16437 
16438  VMA_DEBUG_LOG("vmaDefragmentationBegin");
16439 
16440  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16441 
16442  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
16443 
16444 #if VMA_RECORDING_ENABLED
16445  if(allocator->GetRecorder() != VMA_NULL)
16446  {
16447  allocator->GetRecorder()->RecordDefragmentationBegin(
16448  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
16449  }
16450 #endif
16451 
16452  return res;
16453 }
16454 
16455 VkResult vmaDefragmentationEnd(
16456  VmaAllocator allocator,
16457  VmaDefragmentationContext context)
16458 {
16459  VMA_ASSERT(allocator);
16460 
16461  VMA_DEBUG_LOG("vmaDefragmentationEnd");
16462 
16463  if(context != VK_NULL_HANDLE)
16464  {
16465  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16466 
16467 #if VMA_RECORDING_ENABLED
16468  if(allocator->GetRecorder() != VMA_NULL)
16469  {
16470  allocator->GetRecorder()->RecordDefragmentationEnd(
16471  allocator->GetCurrentFrameIndex(), context);
16472  }
16473 #endif
16474 
16475  return allocator->DefragmentationEnd(context);
16476  }
16477  else
16478  {
16479  return VK_SUCCESS;
16480  }
16481 }
16482 
16483 VkResult vmaBindBufferMemory(
16484  VmaAllocator allocator,
16485  VmaAllocation allocation,
16486  VkBuffer buffer)
16487 {
16488  VMA_ASSERT(allocator && allocation && buffer);
16489 
16490  VMA_DEBUG_LOG("vmaBindBufferMemory");
16491 
16492  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16493 
16494  return allocator->BindBufferMemory(allocation, buffer);
16495 }
16496 
16497 VkResult vmaBindImageMemory(
16498  VmaAllocator allocator,
16499  VmaAllocation allocation,
16500  VkImage image)
16501 {
16502  VMA_ASSERT(allocator && allocation && image);
16503 
16504  VMA_DEBUG_LOG("vmaBindImageMemory");
16505 
16506  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16507 
16508  return allocator->BindImageMemory(allocation, image);
16509 }
16510 
16511 VkResult vmaCreateBuffer(
16512  VmaAllocator allocator,
16513  const VkBufferCreateInfo* pBufferCreateInfo,
16514  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16515  VkBuffer* pBuffer,
16516  VmaAllocation* pAllocation,
16517  VmaAllocationInfo* pAllocationInfo)
16518 {
16519  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16520 
16521  if(pBufferCreateInfo->size == 0)
16522  {
16523  return VK_ERROR_VALIDATION_FAILED_EXT;
16524  }
16525 
16526  VMA_DEBUG_LOG("vmaCreateBuffer");
16527 
16528  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16529 
16530  *pBuffer = VK_NULL_HANDLE;
16531  *pAllocation = VK_NULL_HANDLE;
16532 
16533  // 1. Create VkBuffer.
16534  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16535  allocator->m_hDevice,
16536  pBufferCreateInfo,
16537  allocator->GetAllocationCallbacks(),
16538  pBuffer);
16539  if(res >= 0)
16540  {
16541  // 2. vkGetBufferMemoryRequirements.
16542  VkMemoryRequirements vkMemReq = {};
16543  bool requiresDedicatedAllocation = false;
16544  bool prefersDedicatedAllocation = false;
16545  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16546  requiresDedicatedAllocation, prefersDedicatedAllocation);
16547 
16548  // Make sure alignment requirements for specific buffer usages reported
16549  // in Physical Device Properties are included in alignment reported by memory requirements.
16550  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16551  {
16552  VMA_ASSERT(vkMemReq.alignment %
16553  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16554  }
16555  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16556  {
16557  VMA_ASSERT(vkMemReq.alignment %
16558  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16559  }
16560  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16561  {
16562  VMA_ASSERT(vkMemReq.alignment %
16563  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16564  }
16565 
16566  // 3. Allocate memory using allocator.
16567  res = allocator->AllocateMemory(
16568  vkMemReq,
16569  requiresDedicatedAllocation,
16570  prefersDedicatedAllocation,
16571  *pBuffer, // dedicatedBuffer
16572  VK_NULL_HANDLE, // dedicatedImage
16573  *pAllocationCreateInfo,
16574  VMA_SUBALLOCATION_TYPE_BUFFER,
16575  1, // allocationCount
16576  pAllocation);
16577 
16578 #if VMA_RECORDING_ENABLED
16579  if(allocator->GetRecorder() != VMA_NULL)
16580  {
16581  allocator->GetRecorder()->RecordCreateBuffer(
16582  allocator->GetCurrentFrameIndex(),
16583  *pBufferCreateInfo,
16584  *pAllocationCreateInfo,
16585  *pAllocation);
16586  }
16587 #endif
16588 
16589  if(res >= 0)
16590  {
16591  // 3. Bind buffer with memory.
16592  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
16593  if(res >= 0)
16594  {
16595  // All steps succeeded.
16596  #if VMA_STATS_STRING_ENABLED
16597  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16598  #endif
16599  if(pAllocationInfo != VMA_NULL)
16600  {
16601  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16602  }
16603 
16604  return VK_SUCCESS;
16605  }
16606  allocator->FreeMemory(
16607  1, // allocationCount
16608  pAllocation);
16609  *pAllocation = VK_NULL_HANDLE;
16610  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16611  *pBuffer = VK_NULL_HANDLE;
16612  return res;
16613  }
16614  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16615  *pBuffer = VK_NULL_HANDLE;
16616  return res;
16617  }
16618  return res;
16619 }
16620 
16621 void vmaDestroyBuffer(
16622  VmaAllocator allocator,
16623  VkBuffer buffer,
16624  VmaAllocation allocation)
16625 {
16626  VMA_ASSERT(allocator);
16627 
16628  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16629  {
16630  return;
16631  }
16632 
16633  VMA_DEBUG_LOG("vmaDestroyBuffer");
16634 
16635  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16636 
16637 #if VMA_RECORDING_ENABLED
16638  if(allocator->GetRecorder() != VMA_NULL)
16639  {
16640  allocator->GetRecorder()->RecordDestroyBuffer(
16641  allocator->GetCurrentFrameIndex(),
16642  allocation);
16643  }
16644 #endif
16645 
16646  if(buffer != VK_NULL_HANDLE)
16647  {
16648  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16649  }
16650 
16651  if(allocation != VK_NULL_HANDLE)
16652  {
16653  allocator->FreeMemory(
16654  1, // allocationCount
16655  &allocation);
16656  }
16657 }
16658 
16659 VkResult vmaCreateImage(
16660  VmaAllocator allocator,
16661  const VkImageCreateInfo* pImageCreateInfo,
16662  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16663  VkImage* pImage,
16664  VmaAllocation* pAllocation,
16665  VmaAllocationInfo* pAllocationInfo)
16666 {
16667  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16668 
16669  if(pImageCreateInfo->extent.width == 0 ||
16670  pImageCreateInfo->extent.height == 0 ||
16671  pImageCreateInfo->extent.depth == 0 ||
16672  pImageCreateInfo->mipLevels == 0 ||
16673  pImageCreateInfo->arrayLayers == 0)
16674  {
16675  return VK_ERROR_VALIDATION_FAILED_EXT;
16676  }
16677 
16678  VMA_DEBUG_LOG("vmaCreateImage");
16679 
16680  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16681 
16682  *pImage = VK_NULL_HANDLE;
16683  *pAllocation = VK_NULL_HANDLE;
16684 
16685  // 1. Create VkImage.
16686  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16687  allocator->m_hDevice,
16688  pImageCreateInfo,
16689  allocator->GetAllocationCallbacks(),
16690  pImage);
16691  if(res >= 0)
16692  {
16693  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16694  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16695  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16696 
16697  // 2. Allocate memory using allocator.
16698  VkMemoryRequirements vkMemReq = {};
16699  bool requiresDedicatedAllocation = false;
16700  bool prefersDedicatedAllocation = false;
16701  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16702  requiresDedicatedAllocation, prefersDedicatedAllocation);
16703 
16704  res = allocator->AllocateMemory(
16705  vkMemReq,
16706  requiresDedicatedAllocation,
16707  prefersDedicatedAllocation,
16708  VK_NULL_HANDLE, // dedicatedBuffer
16709  *pImage, // dedicatedImage
16710  *pAllocationCreateInfo,
16711  suballocType,
16712  1, // allocationCount
16713  pAllocation);
16714 
16715 #if VMA_RECORDING_ENABLED
16716  if(allocator->GetRecorder() != VMA_NULL)
16717  {
16718  allocator->GetRecorder()->RecordCreateImage(
16719  allocator->GetCurrentFrameIndex(),
16720  *pImageCreateInfo,
16721  *pAllocationCreateInfo,
16722  *pAllocation);
16723  }
16724 #endif
16725 
16726  if(res >= 0)
16727  {
16728  // 3. Bind image with memory.
16729  res = allocator->BindImageMemory(*pAllocation, *pImage);
16730  if(res >= 0)
16731  {
16732  // All steps succeeded.
16733  #if VMA_STATS_STRING_ENABLED
16734  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
16735  #endif
16736  if(pAllocationInfo != VMA_NULL)
16737  {
16738  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16739  }
16740 
16741  return VK_SUCCESS;
16742  }
16743  allocator->FreeMemory(
16744  1, // allocationCount
16745  pAllocation);
16746  *pAllocation = VK_NULL_HANDLE;
16747  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16748  *pImage = VK_NULL_HANDLE;
16749  return res;
16750  }
16751  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16752  *pImage = VK_NULL_HANDLE;
16753  return res;
16754  }
16755  return res;
16756 }
16757 
16758 void vmaDestroyImage(
16759  VmaAllocator allocator,
16760  VkImage image,
16761  VmaAllocation allocation)
16762 {
16763  VMA_ASSERT(allocator);
16764 
16765  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16766  {
16767  return;
16768  }
16769 
16770  VMA_DEBUG_LOG("vmaDestroyImage");
16771 
16772  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16773 
16774 #if VMA_RECORDING_ENABLED
16775  if(allocator->GetRecorder() != VMA_NULL)
16776  {
16777  allocator->GetRecorder()->RecordDestroyImage(
16778  allocator->GetCurrentFrameIndex(),
16779  allocation);
16780  }
16781 #endif
16782 
16783  if(image != VK_NULL_HANDLE)
16784  {
16785  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
16786  }
16787  if(allocation != VK_NULL_HANDLE)
16788  {
16789  allocator->FreeMemory(
16790  1, // allocationCount
16791  &allocation);
16792  }
16793 }
16794 
16795 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1753
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2051
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1811
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:2848
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1785
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2376
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1765
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2013
Definition: vk_mem_alloc.h:2111
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:2801
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1757
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2476
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1808
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2884
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2265
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1652
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2357
Definition: vk_mem_alloc.h:2088
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:2804
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1746
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2164
Definition: vk_mem_alloc.h:2040
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1820
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2293
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
Definition: vk_mem_alloc.h:1874
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1805
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2044
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1946
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1762
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:2838
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1945
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2888
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1837
VmaStatInfo total
Definition: vk_mem_alloc.h:1955
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2896
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2148
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2879
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1763
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1688
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1814
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2307
Definition: vk_mem_alloc.h:2301
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:1769
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1881
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2486
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1758
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1783
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2185
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2327
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2363
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1744
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2310
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2853
VmaMemoryUsage
Definition: vk_mem_alloc.h:1991
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:2813
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2874
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:2892
Definition: vk_mem_alloc.h:2030
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2172
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1761
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1951
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1694
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:2792
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
Definition: vk_mem_alloc.h:2790
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:2819
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1715
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1787
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1720
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2894
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2159
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2373
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1754
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1934
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:2322
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1707
Definition: vk_mem_alloc.h:2297
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2095
Represents Opaque object that represents started defragmentation process.
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1947
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1711
Definition: vk_mem_alloc.h:2122
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2313
Definition: vk_mem_alloc.h:2039
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1760
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2154
Definition: vk_mem_alloc.h:2145
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1937
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1756
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2335
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1823
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2366
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2143
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2843
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2178
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1862
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1953
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2075
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1946
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1767
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1793
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:2789
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:2867
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1709
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1766
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2349
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1759
Definition: vk_mem_alloc.h:2106
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1801
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2500
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:1817
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1946
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1943
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2354
Parameters for defragmentation.
Definition: vk_mem_alloc.h:2798
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
Definition: vk_mem_alloc.h:2115
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:2481
Definition: vk_mem_alloc.h:2129
Definition: vk_mem_alloc.h:2141
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2890
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1752
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1941
Definition: vk_mem_alloc.h:1996
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2303
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1790
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1939
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1764
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1768
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2062
Definition: vk_mem_alloc.h:2136
Definition: vk_mem_alloc.h:2023
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2495
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1742
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1755
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2282
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Tries to resize an allocation in place, if there is enough free memory after it.
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2462
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2126
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2247
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1947
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1777
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1954
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2360
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1947
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:2858
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2467
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:2822