Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1635 /*
1636 Define this macro to 0/1 to disable/enable support for recording functionality,
1637 available through VmaAllocatorCreateInfo::pRecordSettings.
1638 */
1639 #ifndef VMA_RECORDING_ENABLED
1640  #ifdef _WIN32
1641  #define VMA_RECORDING_ENABLED 1
1642  #else
1643  #define VMA_RECORDING_ENABLED 0
1644  #endif
1645 #endif
1646 
1647 #ifndef NOMINMAX
1648  #define NOMINMAX // For windows.h
1649 #endif
1650 
1651 #ifndef VULKAN_H_
1652  #include <vulkan/vulkan.h>
1653 #endif
1654 
1655 #if VMA_RECORDING_ENABLED
1656  #include <windows.h>
1657 #endif
1658 
1659 #if !defined(VMA_DEDICATED_ALLOCATION)
1660  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1661  #define VMA_DEDICATED_ALLOCATION 1
1662  #else
1663  #define VMA_DEDICATED_ALLOCATION 0
1664  #endif
1665 #endif
1666 
1676 VK_DEFINE_HANDLE(VmaAllocator)
1677 
1678 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1680  VmaAllocator allocator,
1681  uint32_t memoryType,
1682  VkDeviceMemory memory,
1683  VkDeviceSize size);
1685 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1686  VmaAllocator allocator,
1687  uint32_t memoryType,
1688  VkDeviceMemory memory,
1689  VkDeviceSize size);
1690 
1704 
1734 
1737 typedef VkFlags VmaAllocatorCreateFlags;
1738 
1743 typedef struct VmaVulkanFunctions {
1744  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1745  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1746  PFN_vkAllocateMemory vkAllocateMemory;
1747  PFN_vkFreeMemory vkFreeMemory;
1748  PFN_vkMapMemory vkMapMemory;
1749  PFN_vkUnmapMemory vkUnmapMemory;
1750  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1751  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1752  PFN_vkBindBufferMemory vkBindBufferMemory;
1753  PFN_vkBindImageMemory vkBindImageMemory;
1754  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1755  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1756  PFN_vkCreateBuffer vkCreateBuffer;
1757  PFN_vkDestroyBuffer vkDestroyBuffer;
1758  PFN_vkCreateImage vkCreateImage;
1759  PFN_vkDestroyImage vkDestroyImage;
1760  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1761 #if VMA_DEDICATED_ALLOCATION
1762  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1763  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1764 #endif
1766 
1768 typedef enum VmaRecordFlagBits {
1775 
1778 typedef VkFlags VmaRecordFlags;
1779 
1781 typedef struct VmaRecordSettings
1782 {
1792  const char* pFilePath;
1794 
1797 {
1801 
1802  VkPhysicalDevice physicalDevice;
1804 
1805  VkDevice device;
1807 
1810 
1811  const VkAllocationCallbacks* pAllocationCallbacks;
1813 
1853  const VkDeviceSize* pHeapSizeLimit;
1874 
1876 VkResult vmaCreateAllocator(
1877  const VmaAllocatorCreateInfo* pCreateInfo,
1878  VmaAllocator* pAllocator);
1879 
1881 void vmaDestroyAllocator(
1882  VmaAllocator allocator);
1883 
1889  VmaAllocator allocator,
1890  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1891 
1897  VmaAllocator allocator,
1898  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1899 
1907  VmaAllocator allocator,
1908  uint32_t memoryTypeIndex,
1909  VkMemoryPropertyFlags* pFlags);
1910 
1920  VmaAllocator allocator,
1921  uint32_t frameIndex);
1922 
1925 typedef struct VmaStatInfo
1926 {
1928  uint32_t blockCount;
1934  VkDeviceSize usedBytes;
1936  VkDeviceSize unusedBytes;
1939 } VmaStatInfo;
1940 
1942 typedef struct VmaStats
1943 {
1944  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1945  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1947 } VmaStats;
1948 
1950 void vmaCalculateStats(
1951  VmaAllocator allocator,
1952  VmaStats* pStats);
1953 
1954 #ifndef VMA_STATS_STRING_ENABLED
1955 #define VMA_STATS_STRING_ENABLED 1
1956 #endif
1957 
1958 #if VMA_STATS_STRING_ENABLED
1959 
1961 
1963 void vmaBuildStatsString(
1964  VmaAllocator allocator,
1965  char** ppStatsString,
1966  VkBool32 detailedMap);
1967 
1968 void vmaFreeStatsString(
1969  VmaAllocator allocator,
1970  char* pStatsString);
1971 
1972 #endif // #if VMA_STATS_STRING_ENABLED
1973 
1982 VK_DEFINE_HANDLE(VmaPool)
1983 
1984 typedef enum VmaMemoryUsage
1985 {
2034 } VmaMemoryUsage;
2035 
2045 
2106 
2122 
2132 
2139 
2143 
2145 {
2158  VkMemoryPropertyFlags requiredFlags;
2163  VkMemoryPropertyFlags preferredFlags;
2171  uint32_t memoryTypeBits;
2184  void* pUserData;
2186 
2203 VkResult vmaFindMemoryTypeIndex(
2204  VmaAllocator allocator,
2205  uint32_t memoryTypeBits,
2206  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2207  uint32_t* pMemoryTypeIndex);
2208 
2222  VmaAllocator allocator,
2223  const VkBufferCreateInfo* pBufferCreateInfo,
2224  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2225  uint32_t* pMemoryTypeIndex);
2226 
2240  VmaAllocator allocator,
2241  const VkImageCreateInfo* pImageCreateInfo,
2242  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2243  uint32_t* pMemoryTypeIndex);
2244 
2265 
2282 
2293 
2299 
2302 typedef VkFlags VmaPoolCreateFlags;
2303 
2306 typedef struct VmaPoolCreateInfo {
2321  VkDeviceSize blockSize;
2350 
2353 typedef struct VmaPoolStats {
2356  VkDeviceSize size;
2359  VkDeviceSize unusedSize;
2372  VkDeviceSize unusedRangeSizeMax;
2375  size_t blockCount;
2376 } VmaPoolStats;
2377 
2384 VkResult vmaCreatePool(
2385  VmaAllocator allocator,
2386  const VmaPoolCreateInfo* pCreateInfo,
2387  VmaPool* pPool);
2388 
2391 void vmaDestroyPool(
2392  VmaAllocator allocator,
2393  VmaPool pool);
2394 
2401 void vmaGetPoolStats(
2402  VmaAllocator allocator,
2403  VmaPool pool,
2404  VmaPoolStats* pPoolStats);
2405 
2413  VmaAllocator allocator,
2414  VmaPool pool,
2415  size_t* pLostAllocationCount);
2416 
2431 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2432 
2457 VK_DEFINE_HANDLE(VmaAllocation)
2458 
2459 
2461 typedef struct VmaAllocationInfo {
2466  uint32_t memoryType;
2475  VkDeviceMemory deviceMemory;
2480  VkDeviceSize offset;
2485  VkDeviceSize size;
2499  void* pUserData;
2501 
2512 VkResult vmaAllocateMemory(
2513  VmaAllocator allocator,
2514  const VkMemoryRequirements* pVkMemoryRequirements,
2515  const VmaAllocationCreateInfo* pCreateInfo,
2516  VmaAllocation* pAllocation,
2517  VmaAllocationInfo* pAllocationInfo);
2518 
2538 VkResult vmaAllocateMemoryPages(
2539  VmaAllocator allocator,
2540  const VkMemoryRequirements* pVkMemoryRequirements,
2541  const VmaAllocationCreateInfo* pCreateInfo,
2542  size_t allocationCount,
2543  VmaAllocation* pAllocations,
2544  VmaAllocationInfo* pAllocationInfo);
2545 
2553  VmaAllocator allocator,
2554  VkBuffer buffer,
2555  const VmaAllocationCreateInfo* pCreateInfo,
2556  VmaAllocation* pAllocation,
2557  VmaAllocationInfo* pAllocationInfo);
2558 
2560 VkResult vmaAllocateMemoryForImage(
2561  VmaAllocator allocator,
2562  VkImage image,
2563  const VmaAllocationCreateInfo* pCreateInfo,
2564  VmaAllocation* pAllocation,
2565  VmaAllocationInfo* pAllocationInfo);
2566 
2571 void vmaFreeMemory(
2572  VmaAllocator allocator,
2573  VmaAllocation allocation);
2574 
2585 void vmaFreeMemoryPages(
2586  VmaAllocator allocator,
2587  size_t allocationCount,
2588  VmaAllocation* pAllocations);
2589 
2610 VkResult vmaResizeAllocation(
2611  VmaAllocator allocator,
2612  VmaAllocation allocation,
2613  VkDeviceSize newSize);
2614 
2632  VmaAllocator allocator,
2633  VmaAllocation allocation,
2634  VmaAllocationInfo* pAllocationInfo);
2635 
2650 VkBool32 vmaTouchAllocation(
2651  VmaAllocator allocator,
2652  VmaAllocation allocation);
2653 
2668  VmaAllocator allocator,
2669  VmaAllocation allocation,
2670  void* pUserData);
2671 
2683  VmaAllocator allocator,
2684  VmaAllocation* pAllocation);
2685 
2720 VkResult vmaMapMemory(
2721  VmaAllocator allocator,
2722  VmaAllocation allocation,
2723  void** ppData);
2724 
2729 void vmaUnmapMemory(
2730  VmaAllocator allocator,
2731  VmaAllocation allocation);
2732 
2749 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2750 
2767 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2768 
2785 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2786 
2793 VK_DEFINE_HANDLE(VmaDefragmentationContext)
2794 
2795 typedef enum VmaDefragmentationFlagBits {
2799 typedef VkFlags VmaDefragmentationFlags;
2800 
2805 typedef struct VmaDefragmentationInfo2 {
2829  uint32_t poolCount;
2850  VkDeviceSize maxCpuBytesToMove;
2860  VkDeviceSize maxGpuBytesToMove;
2874  VkCommandBuffer commandBuffer;
2876 
2881 typedef struct VmaDefragmentationInfo {
2886  VkDeviceSize maxBytesToMove;
2893 
2895 typedef struct VmaDefragmentationStats {
2897  VkDeviceSize bytesMoved;
2899  VkDeviceSize bytesFreed;
2905 
2935 VkResult vmaDefragmentationBegin(
2936  VmaAllocator allocator,
2937  const VmaDefragmentationInfo2* pInfo,
2938  VmaDefragmentationStats* pStats,
2939  VmaDefragmentationContext *pContext);
2940 
2946 VkResult vmaDefragmentationEnd(
2947  VmaAllocator allocator,
2948  VmaDefragmentationContext context);
2949 
2990 VkResult vmaDefragment(
2991  VmaAllocator allocator,
2992  VmaAllocation* pAllocations,
2993  size_t allocationCount,
2994  VkBool32* pAllocationsChanged,
2995  const VmaDefragmentationInfo *pDefragmentationInfo,
2996  VmaDefragmentationStats* pDefragmentationStats);
2997 
3010 VkResult vmaBindBufferMemory(
3011  VmaAllocator allocator,
3012  VmaAllocation allocation,
3013  VkBuffer buffer);
3014 
3027 VkResult vmaBindImageMemory(
3028  VmaAllocator allocator,
3029  VmaAllocation allocation,
3030  VkImage image);
3031 
3058 VkResult vmaCreateBuffer(
3059  VmaAllocator allocator,
3060  const VkBufferCreateInfo* pBufferCreateInfo,
3061  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3062  VkBuffer* pBuffer,
3063  VmaAllocation* pAllocation,
3064  VmaAllocationInfo* pAllocationInfo);
3065 
3077 void vmaDestroyBuffer(
3078  VmaAllocator allocator,
3079  VkBuffer buffer,
3080  VmaAllocation allocation);
3081 
3083 VkResult vmaCreateImage(
3084  VmaAllocator allocator,
3085  const VkImageCreateInfo* pImageCreateInfo,
3086  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3087  VkImage* pImage,
3088  VmaAllocation* pAllocation,
3089  VmaAllocationInfo* pAllocationInfo);
3090 
3102 void vmaDestroyImage(
3103  VmaAllocator allocator,
3104  VkImage image,
3105  VmaAllocation allocation);
3106 
3107 #ifdef __cplusplus
3108 }
3109 #endif
3110 
3111 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3112 
3113 // For Visual Studio IntelliSense.
3114 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3115 #define VMA_IMPLEMENTATION
3116 #endif
3117 
3118 #ifdef VMA_IMPLEMENTATION
3119 #undef VMA_IMPLEMENTATION
3120 
3121 #include <cstdint>
3122 #include <cstdlib>
3123 #include <cstring>
3124 
3125 /*******************************************************************************
3126 CONFIGURATION SECTION
3127 
3128 Define some of these macros before each #include of this header or change them
3129 here if you need other then default behavior depending on your environment.
3130 */
3131 
3132 /*
3133 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3134 internally, like:
3135 
3136  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3137 
3138 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3139 VmaAllocatorCreateInfo::pVulkanFunctions.
3140 */
3141 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3142 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3143 #endif
3144 
3145 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3146 //#define VMA_USE_STL_CONTAINERS 1
3147 
3148 /* Set this macro to 1 to make the library including and using STL containers:
3149 std::pair, std::vector, std::list, std::unordered_map.
3150 
3151 Set it to 0 or undefined to make the library using its own implementation of
3152 the containers.
3153 */
3154 #if VMA_USE_STL_CONTAINERS
3155  #define VMA_USE_STL_VECTOR 1
3156  #define VMA_USE_STL_UNORDERED_MAP 1
3157  #define VMA_USE_STL_LIST 1
3158 #endif
3159 
3160 #ifndef VMA_USE_STL_SHARED_MUTEX
3161  // Compiler conforms to C++17.
3162  #if __cplusplus >= 201703L
3163  #define VMA_USE_STL_SHARED_MUTEX 1
3164  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3165  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3166  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3167  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3168  #define VMA_USE_STL_SHARED_MUTEX 1
3169  #else
3170  #define VMA_USE_STL_SHARED_MUTEX 0
3171  #endif
3172 #endif
3173 
3174 /*
3175 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
3176 Library has its own container implementation.
3177 */
3178 #if VMA_USE_STL_VECTOR
3179  #include <vector>
3180 #endif
3181 
3182 #if VMA_USE_STL_UNORDERED_MAP
3183  #include <unordered_map>
3184 #endif
3185 
3186 #if VMA_USE_STL_LIST
3187  #include <list>
3188 #endif
3189 
3190 /*
3191 Following headers are used in this CONFIGURATION section only, so feel free to
3192 remove them if not needed.
3193 */
3194 #include <cassert> // for assert
3195 #include <algorithm> // for min, max
3196 #include <mutex>
3197 
3198 #ifndef VMA_NULL
3199  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3200  #define VMA_NULL nullptr
3201 #endif
3202 
3203 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3204 #include <cstdlib>
3205 void *aligned_alloc(size_t alignment, size_t size)
3206 {
3207  // alignment must be >= sizeof(void*)
3208  if(alignment < sizeof(void*))
3209  {
3210  alignment = sizeof(void*);
3211  }
3212 
3213  return memalign(alignment, size);
3214 }
3215 #elif defined(__APPLE__) || defined(__ANDROID__)
3216 #include <cstdlib>
3217 void *aligned_alloc(size_t alignment, size_t size)
3218 {
3219  // alignment must be >= sizeof(void*)
3220  if(alignment < sizeof(void*))
3221  {
3222  alignment = sizeof(void*);
3223  }
3224 
3225  void *pointer;
3226  if(posix_memalign(&pointer, alignment, size) == 0)
3227  return pointer;
3228  return VMA_NULL;
3229 }
3230 #endif
3231 
3232 // If your compiler is not compatible with C++11 and definition of
3233 // aligned_alloc() function is missing, uncommeting following line may help:
3234 
3235 //#include <malloc.h>
3236 
3237 // Normal assert to check for programmer's errors, especially in Debug configuration.
3238 #ifndef VMA_ASSERT
3239  #ifdef _DEBUG
3240  #define VMA_ASSERT(expr) assert(expr)
3241  #else
3242  #define VMA_ASSERT(expr)
3243  #endif
3244 #endif
3245 
3246 // Assert that will be called very often, like inside data structures e.g. operator[].
3247 // Making it non-empty can make program slow.
3248 #ifndef VMA_HEAVY_ASSERT
3249  #ifdef _DEBUG
3250  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3251  #else
3252  #define VMA_HEAVY_ASSERT(expr)
3253  #endif
3254 #endif
3255 
3256 #ifndef VMA_ALIGN_OF
3257  #define VMA_ALIGN_OF(type) (__alignof(type))
3258 #endif
3259 
3260 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3261  #if defined(_WIN32)
3262  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3263  #else
3264  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3265  #endif
3266 #endif
3267 
3268 #ifndef VMA_SYSTEM_FREE
3269  #if defined(_WIN32)
3270  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3271  #else
3272  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3273  #endif
3274 #endif
3275 
3276 #ifndef VMA_MIN
3277  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3278 #endif
3279 
3280 #ifndef VMA_MAX
3281  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3282 #endif
3283 
3284 #ifndef VMA_SWAP
3285  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3286 #endif
3287 
3288 #ifndef VMA_SORT
3289  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3290 #endif
3291 
3292 #ifndef VMA_DEBUG_LOG
3293  #define VMA_DEBUG_LOG(format, ...)
3294  /*
3295  #define VMA_DEBUG_LOG(format, ...) do { \
3296  printf(format, __VA_ARGS__); \
3297  printf("\n"); \
3298  } while(false)
3299  */
3300 #endif
3301 
3302 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3303 #if VMA_STATS_STRING_ENABLED
3304  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3305  {
3306  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3307  }
3308  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3309  {
3310  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3311  }
3312  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3313  {
3314  snprintf(outStr, strLen, "%p", ptr);
3315  }
3316 #endif
3317 
3318 #ifndef VMA_MUTEX
3319  class VmaMutex
3320  {
3321  public:
3322  void Lock() { m_Mutex.lock(); }
3323  void Unlock() { m_Mutex.unlock(); }
3324  private:
3325  std::mutex m_Mutex;
3326  };
3327  #define VMA_MUTEX VmaMutex
3328 #endif
3329 
3330 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3331 #ifndef VMA_RW_MUTEX
3332  #if VMA_USE_STL_SHARED_MUTEX
3333  // Use std::shared_mutex from C++17.
3334  #include <shared_mutex>
3335  class VmaRWMutex
3336  {
3337  public:
3338  void LockRead() { m_Mutex.lock_shared(); }
3339  void UnlockRead() { m_Mutex.unlock_shared(); }
3340  void LockWrite() { m_Mutex.lock(); }
3341  void UnlockWrite() { m_Mutex.unlock(); }
3342  private:
3343  std::shared_mutex m_Mutex;
3344  };
3345  #define VMA_RW_MUTEX VmaRWMutex
3346  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
3347  // Use SRWLOCK from WinAPI.
3348  // Minimum supported client = Windows Vista, server = Windows Server 2008.
3349  class VmaRWMutex
3350  {
3351  public:
3352  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3353  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3354  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3355  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3356  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3357  private:
3358  SRWLOCK m_Lock;
3359  };
3360  #define VMA_RW_MUTEX VmaRWMutex
3361  #else
3362  // Less efficient fallback: Use normal mutex.
3363  class VmaRWMutex
3364  {
3365  public:
3366  void LockRead() { m_Mutex.Lock(); }
3367  void UnlockRead() { m_Mutex.Unlock(); }
3368  void LockWrite() { m_Mutex.Lock(); }
3369  void UnlockWrite() { m_Mutex.Unlock(); }
3370  private:
3371  VMA_MUTEX m_Mutex;
3372  };
3373  #define VMA_RW_MUTEX VmaRWMutex
3374  #endif // #if VMA_USE_STL_SHARED_MUTEX
3375 #endif // #ifndef VMA_RW_MUTEX
3376 
3377 /*
3378 If providing your own implementation, you need to implement a subset of std::atomic:
3379 
3380 - Constructor(uint32_t desired)
3381 - uint32_t load() const
3382 - void store(uint32_t desired)
3383 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3384 */
3385 #ifndef VMA_ATOMIC_UINT32
3386  #include <atomic>
3387  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3388 #endif
3389 
3390 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3391 
3395  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3396 #endif
3397 
3398 #ifndef VMA_DEBUG_ALIGNMENT
3399 
3403  #define VMA_DEBUG_ALIGNMENT (1)
3404 #endif
3405 
3406 #ifndef VMA_DEBUG_MARGIN
3407 
3411  #define VMA_DEBUG_MARGIN (0)
3412 #endif
3413 
3414 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3415 
3419  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3420 #endif
3421 
3422 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3423 
3428  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3429 #endif
3430 
3431 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3432 
3436  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3437 #endif
3438 
3439 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3440 
3444  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3445 #endif
3446 
3447 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3448  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3450 #endif
3451 
3452 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3453  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3455 #endif
3456 
3457 #ifndef VMA_CLASS_NO_COPY
3458  #define VMA_CLASS_NO_COPY(className) \
3459  private: \
3460  className(const className&) = delete; \
3461  className& operator=(const className&) = delete;
3462 #endif
3463 
3464 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3465 
3466 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3467 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3468 
3469 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3470 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3471 
3472 /*******************************************************************************
3473 END OF CONFIGURATION
3474 */
3475 
3476 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3477 
3478 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3479  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3480 
3481 // Returns number of bits set to 1 in (v).
3482 static inline uint32_t VmaCountBitsSet(uint32_t v)
3483 {
3484  uint32_t c = v - ((v >> 1) & 0x55555555);
3485  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3486  c = ((c >> 4) + c) & 0x0F0F0F0F;
3487  c = ((c >> 8) + c) & 0x00FF00FF;
3488  c = ((c >> 16) + c) & 0x0000FFFF;
3489  return c;
3490 }
3491 
3492 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3493 // Use types like uint32_t, uint64_t as T.
3494 template <typename T>
3495 static inline T VmaAlignUp(T val, T align)
3496 {
3497  return (val + align - 1) / align * align;
3498 }
3499 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3500 // Use types like uint32_t, uint64_t as T.
3501 template <typename T>
3502 static inline T VmaAlignDown(T val, T align)
3503 {
3504  return val / align * align;
3505 }
3506 
3507 // Division with mathematical rounding to nearest number.
3508 template <typename T>
3509 static inline T VmaRoundDiv(T x, T y)
3510 {
3511  return (x + (y / (T)2)) / y;
3512 }
3513 
3514 /*
3515 Returns true if given number is a power of two.
3516 T must be unsigned integer number or signed integer but always nonnegative.
3517 For 0 returns true.
3518 */
3519 template <typename T>
3520 inline bool VmaIsPow2(T x)
3521 {
3522  return (x & (x-1)) == 0;
3523 }
3524 
3525 // Returns smallest power of 2 greater or equal to v.
3526 static inline uint32_t VmaNextPow2(uint32_t v)
3527 {
3528  v--;
3529  v |= v >> 1;
3530  v |= v >> 2;
3531  v |= v >> 4;
3532  v |= v >> 8;
3533  v |= v >> 16;
3534  v++;
3535  return v;
3536 }
3537 static inline uint64_t VmaNextPow2(uint64_t v)
3538 {
3539  v--;
3540  v |= v >> 1;
3541  v |= v >> 2;
3542  v |= v >> 4;
3543  v |= v >> 8;
3544  v |= v >> 16;
3545  v |= v >> 32;
3546  v++;
3547  return v;
3548 }
3549 
3550 // Returns largest power of 2 less or equal to v.
3551 static inline uint32_t VmaPrevPow2(uint32_t v)
3552 {
3553  v |= v >> 1;
3554  v |= v >> 2;
3555  v |= v >> 4;
3556  v |= v >> 8;
3557  v |= v >> 16;
3558  v = v ^ (v >> 1);
3559  return v;
3560 }
3561 static inline uint64_t VmaPrevPow2(uint64_t v)
3562 {
3563  v |= v >> 1;
3564  v |= v >> 2;
3565  v |= v >> 4;
3566  v |= v >> 8;
3567  v |= v >> 16;
3568  v |= v >> 32;
3569  v = v ^ (v >> 1);
3570  return v;
3571 }
3572 
3573 static inline bool VmaStrIsEmpty(const char* pStr)
3574 {
3575  return pStr == VMA_NULL || *pStr == '\0';
3576 }
3577 
3578 #if VMA_STATS_STRING_ENABLED
3579 
3580 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3581 {
3582  switch(algorithm)
3583  {
3585  return "Linear";
3587  return "Buddy";
3588  case 0:
3589  return "Default";
3590  default:
3591  VMA_ASSERT(0);
3592  return "";
3593  }
3594 }
3595 
3596 #endif // #if VMA_STATS_STRING_ENABLED
3597 
3598 #ifndef VMA_SORT
3599 
3600 template<typename Iterator, typename Compare>
3601 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3602 {
3603  Iterator centerValue = end; --centerValue;
3604  Iterator insertIndex = beg;
3605  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3606  {
3607  if(cmp(*memTypeIndex, *centerValue))
3608  {
3609  if(insertIndex != memTypeIndex)
3610  {
3611  VMA_SWAP(*memTypeIndex, *insertIndex);
3612  }
3613  ++insertIndex;
3614  }
3615  }
3616  if(insertIndex != centerValue)
3617  {
3618  VMA_SWAP(*insertIndex, *centerValue);
3619  }
3620  return insertIndex;
3621 }
3622 
3623 template<typename Iterator, typename Compare>
3624 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3625 {
3626  if(beg < end)
3627  {
3628  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3629  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3630  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3631  }
3632 }
3633 
3634 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3635 
3636 #endif // #ifndef VMA_SORT
3637 
3638 /*
3639 Returns true if two memory blocks occupy overlapping pages.
3640 ResourceA must be in less memory offset than ResourceB.
3641 
3642 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3643 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3644 */
3645 static inline bool VmaBlocksOnSamePage(
3646  VkDeviceSize resourceAOffset,
3647  VkDeviceSize resourceASize,
3648  VkDeviceSize resourceBOffset,
3649  VkDeviceSize pageSize)
3650 {
3651  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3652  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3653  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3654  VkDeviceSize resourceBStart = resourceBOffset;
3655  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3656  return resourceAEndPage == resourceBStartPage;
3657 }
3658 
3659 enum VmaSuballocationType
3660 {
3661  VMA_SUBALLOCATION_TYPE_FREE = 0,
3662  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3663  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3664  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3665  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3666  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3667  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3668 };
3669 
3670 /*
3671 Returns true if given suballocation types could conflict and must respect
3672 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3673 or linear image and another one is optimal image. If type is unknown, behave
3674 conservatively.
3675 */
3676 static inline bool VmaIsBufferImageGranularityConflict(
3677  VmaSuballocationType suballocType1,
3678  VmaSuballocationType suballocType2)
3679 {
3680  if(suballocType1 > suballocType2)
3681  {
3682  VMA_SWAP(suballocType1, suballocType2);
3683  }
3684 
3685  switch(suballocType1)
3686  {
3687  case VMA_SUBALLOCATION_TYPE_FREE:
3688  return false;
3689  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3690  return true;
3691  case VMA_SUBALLOCATION_TYPE_BUFFER:
3692  return
3693  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3694  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3695  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3696  return
3697  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3698  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3699  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3700  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3701  return
3702  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3703  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3704  return false;
3705  default:
3706  VMA_ASSERT(0);
3707  return true;
3708  }
3709 }
3710 
3711 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3712 {
3713  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3714  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3715  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3716  {
3717  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3718  }
3719 }
3720 
3721 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3722 {
3723  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3724  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3725  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3726  {
3727  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3728  {
3729  return false;
3730  }
3731  }
3732  return true;
3733 }
3734 
3735 /*
3736 Fills structure with parameters of an example buffer to be used for transfers
3737 during GPU memory defragmentation.
3738 */
3739 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
3740 {
3741  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
3742  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
3743  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
3744  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
3745 }
3746 
3747 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3748 struct VmaMutexLock
3749 {
3750  VMA_CLASS_NO_COPY(VmaMutexLock)
3751 public:
3752  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
3753  m_pMutex(useMutex ? &mutex : VMA_NULL)
3754  { if(m_pMutex) { m_pMutex->Lock(); } }
3755  ~VmaMutexLock()
3756  { if(m_pMutex) { m_pMutex->Unlock(); } }
3757 private:
3758  VMA_MUTEX* m_pMutex;
3759 };
3760 
3761 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3762 struct VmaMutexLockRead
3763 {
3764  VMA_CLASS_NO_COPY(VmaMutexLockRead)
3765 public:
3766  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3767  m_pMutex(useMutex ? &mutex : VMA_NULL)
3768  { if(m_pMutex) { m_pMutex->LockRead(); } }
3769  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
3770 private:
3771  VMA_RW_MUTEX* m_pMutex;
3772 };
3773 
3774 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3775 struct VmaMutexLockWrite
3776 {
3777  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3778 public:
3779  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
3780  m_pMutex(useMutex ? &mutex : VMA_NULL)
3781  { if(m_pMutex) { m_pMutex->LockWrite(); } }
3782  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3783 private:
3784  VMA_RW_MUTEX* m_pMutex;
3785 };
3786 
3787 #if VMA_DEBUG_GLOBAL_MUTEX
3788  static VMA_MUTEX gDebugGlobalMutex;
3789  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3790 #else
3791  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3792 #endif
3793 
3794 // Minimum size of a free suballocation to register it in the free suballocation collection.
3795 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3796 
3797 /*
3798 Performs binary search and returns iterator to first element that is greater or
3799 equal to (key), according to comparison (cmp).
3800 
3801 Cmp should return true if first argument is less than second argument.
3802 
3803 Returned value is the found element, if present in the collection or place where
3804 new element with value (key) should be inserted.
3805 */
3806 template <typename CmpLess, typename IterT, typename KeyT>
3807 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3808 {
3809  size_t down = 0, up = (end - beg);
3810  while(down < up)
3811  {
3812  const size_t mid = (down + up) / 2;
3813  if(cmp(*(beg+mid), key))
3814  {
3815  down = mid + 1;
3816  }
3817  else
3818  {
3819  up = mid;
3820  }
3821  }
3822  return beg + down;
3823 }
3824 
3825 /*
3826 Returns true if all pointers in the array are not-null and unique.
3827 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3828 T must be pointer type, e.g. VmaAllocation, VmaPool.
3829 */
3830 template<typename T>
3831 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3832 {
3833  for(uint32_t i = 0; i < count; ++i)
3834  {
3835  const T iPtr = arr[i];
3836  if(iPtr == VMA_NULL)
3837  {
3838  return false;
3839  }
3840  for(uint32_t j = i + 1; j < count; ++j)
3841  {
3842  if(iPtr == arr[j])
3843  {
3844  return false;
3845  }
3846  }
3847  }
3848  return true;
3849 }
3850 
3852 // Memory allocation
3853 
3854 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3855 {
3856  if((pAllocationCallbacks != VMA_NULL) &&
3857  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3858  {
3859  return (*pAllocationCallbacks->pfnAllocation)(
3860  pAllocationCallbacks->pUserData,
3861  size,
3862  alignment,
3863  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3864  }
3865  else
3866  {
3867  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3868  }
3869 }
3870 
3871 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3872 {
3873  if((pAllocationCallbacks != VMA_NULL) &&
3874  (pAllocationCallbacks->pfnFree != VMA_NULL))
3875  {
3876  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3877  }
3878  else
3879  {
3880  VMA_SYSTEM_FREE(ptr);
3881  }
3882 }
3883 
3884 template<typename T>
3885 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3886 {
3887  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3888 }
3889 
3890 template<typename T>
3891 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3892 {
3893  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3894 }
3895 
3896 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3897 
3898 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3899 
3900 template<typename T>
3901 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3902 {
3903  ptr->~T();
3904  VmaFree(pAllocationCallbacks, ptr);
3905 }
3906 
3907 template<typename T>
3908 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3909 {
3910  if(ptr != VMA_NULL)
3911  {
3912  for(size_t i = count; i--; )
3913  {
3914  ptr[i].~T();
3915  }
3916  VmaFree(pAllocationCallbacks, ptr);
3917  }
3918 }
3919 
3920 // STL-compatible allocator.
3921 template<typename T>
3922 class VmaStlAllocator
3923 {
3924 public:
3925  const VkAllocationCallbacks* const m_pCallbacks;
3926  typedef T value_type;
3927 
3928  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3929  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3930 
3931  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3932  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3933 
3934  template<typename U>
3935  bool operator==(const VmaStlAllocator<U>& rhs) const
3936  {
3937  return m_pCallbacks == rhs.m_pCallbacks;
3938  }
3939  template<typename U>
3940  bool operator!=(const VmaStlAllocator<U>& rhs) const
3941  {
3942  return m_pCallbacks != rhs.m_pCallbacks;
3943  }
3944 
3945  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3946 };
3947 
3948 #if VMA_USE_STL_VECTOR
3949 
3950 #define VmaVector std::vector
3951 
3952 template<typename T, typename allocatorT>
3953 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3954 {
3955  vec.insert(vec.begin() + index, item);
3956 }
3957 
3958 template<typename T, typename allocatorT>
3959 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3960 {
3961  vec.erase(vec.begin() + index);
3962 }
3963 
3964 #else // #if VMA_USE_STL_VECTOR
3965 
3966 /* Class with interface compatible with subset of std::vector.
3967 T must be POD because constructors and destructors are not called and memcpy is
3968 used for these objects. */
3969 template<typename T, typename AllocatorT>
3970 class VmaVector
3971 {
3972 public:
3973  typedef T value_type;
3974 
3975  VmaVector(const AllocatorT& allocator) :
3976  m_Allocator(allocator),
3977  m_pArray(VMA_NULL),
3978  m_Count(0),
3979  m_Capacity(0)
3980  {
3981  }
3982 
3983  VmaVector(size_t count, const AllocatorT& allocator) :
3984  m_Allocator(allocator),
3985  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3986  m_Count(count),
3987  m_Capacity(count)
3988  {
3989  }
3990 
3991  VmaVector(const VmaVector<T, AllocatorT>& src) :
3992  m_Allocator(src.m_Allocator),
3993  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3994  m_Count(src.m_Count),
3995  m_Capacity(src.m_Count)
3996  {
3997  if(m_Count != 0)
3998  {
3999  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4000  }
4001  }
4002 
4003  ~VmaVector()
4004  {
4005  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4006  }
4007 
4008  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
4009  {
4010  if(&rhs != this)
4011  {
4012  resize(rhs.m_Count);
4013  if(m_Count != 0)
4014  {
4015  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4016  }
4017  }
4018  return *this;
4019  }
4020 
4021  bool empty() const { return m_Count == 0; }
4022  size_t size() const { return m_Count; }
4023  T* data() { return m_pArray; }
4024  const T* data() const { return m_pArray; }
4025 
4026  T& operator[](size_t index)
4027  {
4028  VMA_HEAVY_ASSERT(index < m_Count);
4029  return m_pArray[index];
4030  }
4031  const T& operator[](size_t index) const
4032  {
4033  VMA_HEAVY_ASSERT(index < m_Count);
4034  return m_pArray[index];
4035  }
4036 
4037  T& front()
4038  {
4039  VMA_HEAVY_ASSERT(m_Count > 0);
4040  return m_pArray[0];
4041  }
4042  const T& front() const
4043  {
4044  VMA_HEAVY_ASSERT(m_Count > 0);
4045  return m_pArray[0];
4046  }
4047  T& back()
4048  {
4049  VMA_HEAVY_ASSERT(m_Count > 0);
4050  return m_pArray[m_Count - 1];
4051  }
4052  const T& back() const
4053  {
4054  VMA_HEAVY_ASSERT(m_Count > 0);
4055  return m_pArray[m_Count - 1];
4056  }
4057 
4058  void reserve(size_t newCapacity, bool freeMemory = false)
4059  {
4060  newCapacity = VMA_MAX(newCapacity, m_Count);
4061 
4062  if((newCapacity < m_Capacity) && !freeMemory)
4063  {
4064  newCapacity = m_Capacity;
4065  }
4066 
4067  if(newCapacity != m_Capacity)
4068  {
4069  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4070  if(m_Count != 0)
4071  {
4072  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4073  }
4074  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4075  m_Capacity = newCapacity;
4076  m_pArray = newArray;
4077  }
4078  }
4079 
4080  void resize(size_t newCount, bool freeMemory = false)
4081  {
4082  size_t newCapacity = m_Capacity;
4083  if(newCount > m_Capacity)
4084  {
4085  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4086  }
4087  else if(freeMemory)
4088  {
4089  newCapacity = newCount;
4090  }
4091 
4092  if(newCapacity != m_Capacity)
4093  {
4094  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4095  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4096  if(elementsToCopy != 0)
4097  {
4098  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4099  }
4100  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4101  m_Capacity = newCapacity;
4102  m_pArray = newArray;
4103  }
4104 
4105  m_Count = newCount;
4106  }
4107 
4108  void clear(bool freeMemory = false)
4109  {
4110  resize(0, freeMemory);
4111  }
4112 
4113  void insert(size_t index, const T& src)
4114  {
4115  VMA_HEAVY_ASSERT(index <= m_Count);
4116  const size_t oldCount = size();
4117  resize(oldCount + 1);
4118  if(index < oldCount)
4119  {
4120  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4121  }
4122  m_pArray[index] = src;
4123  }
4124 
4125  void remove(size_t index)
4126  {
4127  VMA_HEAVY_ASSERT(index < m_Count);
4128  const size_t oldCount = size();
4129  if(index < oldCount - 1)
4130  {
4131  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4132  }
4133  resize(oldCount - 1);
4134  }
4135 
4136  void push_back(const T& src)
4137  {
4138  const size_t newIndex = size();
4139  resize(newIndex + 1);
4140  m_pArray[newIndex] = src;
4141  }
4142 
4143  void pop_back()
4144  {
4145  VMA_HEAVY_ASSERT(m_Count > 0);
4146  resize(size() - 1);
4147  }
4148 
4149  void push_front(const T& src)
4150  {
4151  insert(0, src);
4152  }
4153 
4154  void pop_front()
4155  {
4156  VMA_HEAVY_ASSERT(m_Count > 0);
4157  remove(0);
4158  }
4159 
4160  typedef T* iterator;
4161 
4162  iterator begin() { return m_pArray; }
4163  iterator end() { return m_pArray + m_Count; }
4164 
4165 private:
4166  AllocatorT m_Allocator;
4167  T* m_pArray;
4168  size_t m_Count;
4169  size_t m_Capacity;
4170 };
4171 
4172 template<typename T, typename allocatorT>
4173 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4174 {
4175  vec.insert(index, item);
4176 }
4177 
4178 template<typename T, typename allocatorT>
4179 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4180 {
4181  vec.remove(index);
4182 }
4183 
4184 #endif // #if VMA_USE_STL_VECTOR
4185 
4186 template<typename CmpLess, typename VectorT>
4187 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4188 {
4189  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4190  vector.data(),
4191  vector.data() + vector.size(),
4192  value,
4193  CmpLess()) - vector.data();
4194  VmaVectorInsert(vector, indexToInsert, value);
4195  return indexToInsert;
4196 }
4197 
4198 template<typename CmpLess, typename VectorT>
4199 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4200 {
4201  CmpLess comparator;
4202  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4203  vector.begin(),
4204  vector.end(),
4205  value,
4206  comparator);
4207  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4208  {
4209  size_t indexToRemove = it - vector.begin();
4210  VmaVectorRemove(vector, indexToRemove);
4211  return true;
4212  }
4213  return false;
4214 }
4215 
4216 template<typename CmpLess, typename IterT, typename KeyT>
4217 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
4218 {
4219  CmpLess comparator;
4220  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4221  beg, end, value, comparator);
4222  if(it == end ||
4223  (!comparator(*it, value) && !comparator(value, *it)))
4224  {
4225  return it;
4226  }
4227  return end;
4228 }
4229 
4231 // class VmaPoolAllocator
4232 
4233 /*
4234 Allocator for objects of type T using a list of arrays (pools) to speed up
4235 allocation. Number of elements that can be allocated is not bounded because
4236 allocator can create multiple blocks.
4237 */
4238 template<typename T>
4239 class VmaPoolAllocator
4240 {
4241  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4242 public:
4243  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4244  ~VmaPoolAllocator();
4245  void Clear();
4246  T* Alloc();
4247  void Free(T* ptr);
4248 
4249 private:
4250  union Item
4251  {
4252  uint32_t NextFreeIndex;
4253  T Value;
4254  };
4255 
4256  struct ItemBlock
4257  {
4258  Item* pItems;
4259  uint32_t Capacity;
4260  uint32_t FirstFreeIndex;
4261  };
4262 
4263  const VkAllocationCallbacks* m_pAllocationCallbacks;
4264  const uint32_t m_FirstBlockCapacity;
4265  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4266 
4267  ItemBlock& CreateNewBlock();
4268 };
4269 
4270 template<typename T>
4271 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
4272  m_pAllocationCallbacks(pAllocationCallbacks),
4273  m_FirstBlockCapacity(firstBlockCapacity),
4274  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4275 {
4276  VMA_ASSERT(m_FirstBlockCapacity > 1);
4277 }
4278 
4279 template<typename T>
4280 VmaPoolAllocator<T>::~VmaPoolAllocator()
4281 {
4282  Clear();
4283 }
4284 
4285 template<typename T>
4286 void VmaPoolAllocator<T>::Clear()
4287 {
4288  for(size_t i = m_ItemBlocks.size(); i--; )
4289  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4290  m_ItemBlocks.clear();
4291 }
4292 
4293 template<typename T>
4294 T* VmaPoolAllocator<T>::Alloc()
4295 {
4296  for(size_t i = m_ItemBlocks.size(); i--; )
4297  {
4298  ItemBlock& block = m_ItemBlocks[i];
4299  // This block has some free items: Use first one.
4300  if(block.FirstFreeIndex != UINT32_MAX)
4301  {
4302  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4303  block.FirstFreeIndex = pItem->NextFreeIndex;
4304  return &pItem->Value;
4305  }
4306  }
4307 
4308  // No block has free item: Create new one and use it.
4309  ItemBlock& newBlock = CreateNewBlock();
4310  Item* const pItem = &newBlock.pItems[0];
4311  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4312  return &pItem->Value;
4313 }
4314 
4315 template<typename T>
4316 void VmaPoolAllocator<T>::Free(T* ptr)
4317 {
4318  // Search all memory blocks to find ptr.
4319  for(size_t i = m_ItemBlocks.size(); i--; )
4320  {
4321  ItemBlock& block = m_ItemBlocks[i];
4322 
4323  // Casting to union.
4324  Item* pItemPtr;
4325  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4326 
4327  // Check if pItemPtr is in address range of this block.
4328  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4329  {
4330  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4331  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4332  block.FirstFreeIndex = index;
4333  return;
4334  }
4335  }
4336  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4337 }
4338 
4339 template<typename T>
4340 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4341 {
4342  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4343  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4344 
4345  const ItemBlock newBlock = {
4346  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4347  newBlockCapacity,
4348  0 };
4349 
4350  m_ItemBlocks.push_back(newBlock);
4351 
4352  // Setup singly-linked list of all free items in this block.
4353  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4354  newBlock.pItems[i].NextFreeIndex = i + 1;
4355  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4356  return m_ItemBlocks.back();
4357 }
4358 
4360 // class VmaRawList, VmaList
4361 
4362 #if VMA_USE_STL_LIST
4363 
4364 #define VmaList std::list
4365 
4366 #else // #if VMA_USE_STL_LIST
4367 
4368 template<typename T>
4369 struct VmaListItem
4370 {
4371  VmaListItem* pPrev;
4372  VmaListItem* pNext;
4373  T Value;
4374 };
4375 
4376 // Doubly linked list.
4377 template<typename T>
4378 class VmaRawList
4379 {
4380  VMA_CLASS_NO_COPY(VmaRawList)
4381 public:
4382  typedef VmaListItem<T> ItemType;
4383 
4384  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4385  ~VmaRawList();
4386  void Clear();
4387 
4388  size_t GetCount() const { return m_Count; }
4389  bool IsEmpty() const { return m_Count == 0; }
4390 
4391  ItemType* Front() { return m_pFront; }
4392  const ItemType* Front() const { return m_pFront; }
4393  ItemType* Back() { return m_pBack; }
4394  const ItemType* Back() const { return m_pBack; }
4395 
4396  ItemType* PushBack();
4397  ItemType* PushFront();
4398  ItemType* PushBack(const T& value);
4399  ItemType* PushFront(const T& value);
4400  void PopBack();
4401  void PopFront();
4402 
4403  // Item can be null - it means PushBack.
4404  ItemType* InsertBefore(ItemType* pItem);
4405  // Item can be null - it means PushFront.
4406  ItemType* InsertAfter(ItemType* pItem);
4407 
4408  ItemType* InsertBefore(ItemType* pItem, const T& value);
4409  ItemType* InsertAfter(ItemType* pItem, const T& value);
4410 
4411  void Remove(ItemType* pItem);
4412 
4413 private:
4414  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4415  VmaPoolAllocator<ItemType> m_ItemAllocator;
4416  ItemType* m_pFront;
4417  ItemType* m_pBack;
4418  size_t m_Count;
4419 };
4420 
4421 template<typename T>
4422 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4423  m_pAllocationCallbacks(pAllocationCallbacks),
4424  m_ItemAllocator(pAllocationCallbacks, 128),
4425  m_pFront(VMA_NULL),
4426  m_pBack(VMA_NULL),
4427  m_Count(0)
4428 {
4429 }
4430 
4431 template<typename T>
4432 VmaRawList<T>::~VmaRawList()
4433 {
4434  // Intentionally not calling Clear, because that would be unnecessary
4435  // computations to return all items to m_ItemAllocator as free.
4436 }
4437 
4438 template<typename T>
4439 void VmaRawList<T>::Clear()
4440 {
4441  if(IsEmpty() == false)
4442  {
4443  ItemType* pItem = m_pBack;
4444  while(pItem != VMA_NULL)
4445  {
4446  ItemType* const pPrevItem = pItem->pPrev;
4447  m_ItemAllocator.Free(pItem);
4448  pItem = pPrevItem;
4449  }
4450  m_pFront = VMA_NULL;
4451  m_pBack = VMA_NULL;
4452  m_Count = 0;
4453  }
4454 }
4455 
4456 template<typename T>
4457 VmaListItem<T>* VmaRawList<T>::PushBack()
4458 {
4459  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4460  pNewItem->pNext = VMA_NULL;
4461  if(IsEmpty())
4462  {
4463  pNewItem->pPrev = VMA_NULL;
4464  m_pFront = pNewItem;
4465  m_pBack = pNewItem;
4466  m_Count = 1;
4467  }
4468  else
4469  {
4470  pNewItem->pPrev = m_pBack;
4471  m_pBack->pNext = pNewItem;
4472  m_pBack = pNewItem;
4473  ++m_Count;
4474  }
4475  return pNewItem;
4476 }
4477 
4478 template<typename T>
4479 VmaListItem<T>* VmaRawList<T>::PushFront()
4480 {
4481  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4482  pNewItem->pPrev = VMA_NULL;
4483  if(IsEmpty())
4484  {
4485  pNewItem->pNext = VMA_NULL;
4486  m_pFront = pNewItem;
4487  m_pBack = pNewItem;
4488  m_Count = 1;
4489  }
4490  else
4491  {
4492  pNewItem->pNext = m_pFront;
4493  m_pFront->pPrev = pNewItem;
4494  m_pFront = pNewItem;
4495  ++m_Count;
4496  }
4497  return pNewItem;
4498 }
4499 
4500 template<typename T>
4501 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4502 {
4503  ItemType* const pNewItem = PushBack();
4504  pNewItem->Value = value;
4505  return pNewItem;
4506 }
4507 
4508 template<typename T>
4509 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4510 {
4511  ItemType* const pNewItem = PushFront();
4512  pNewItem->Value = value;
4513  return pNewItem;
4514 }
4515 
4516 template<typename T>
4517 void VmaRawList<T>::PopBack()
4518 {
4519  VMA_HEAVY_ASSERT(m_Count > 0);
4520  ItemType* const pBackItem = m_pBack;
4521  ItemType* const pPrevItem = pBackItem->pPrev;
4522  if(pPrevItem != VMA_NULL)
4523  {
4524  pPrevItem->pNext = VMA_NULL;
4525  }
4526  m_pBack = pPrevItem;
4527  m_ItemAllocator.Free(pBackItem);
4528  --m_Count;
4529 }
4530 
4531 template<typename T>
4532 void VmaRawList<T>::PopFront()
4533 {
4534  VMA_HEAVY_ASSERT(m_Count > 0);
4535  ItemType* const pFrontItem = m_pFront;
4536  ItemType* const pNextItem = pFrontItem->pNext;
4537  if(pNextItem != VMA_NULL)
4538  {
4539  pNextItem->pPrev = VMA_NULL;
4540  }
4541  m_pFront = pNextItem;
4542  m_ItemAllocator.Free(pFrontItem);
4543  --m_Count;
4544 }
4545 
4546 template<typename T>
4547 void VmaRawList<T>::Remove(ItemType* pItem)
4548 {
4549  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4550  VMA_HEAVY_ASSERT(m_Count > 0);
4551 
4552  if(pItem->pPrev != VMA_NULL)
4553  {
4554  pItem->pPrev->pNext = pItem->pNext;
4555  }
4556  else
4557  {
4558  VMA_HEAVY_ASSERT(m_pFront == pItem);
4559  m_pFront = pItem->pNext;
4560  }
4561 
4562  if(pItem->pNext != VMA_NULL)
4563  {
4564  pItem->pNext->pPrev = pItem->pPrev;
4565  }
4566  else
4567  {
4568  VMA_HEAVY_ASSERT(m_pBack == pItem);
4569  m_pBack = pItem->pPrev;
4570  }
4571 
4572  m_ItemAllocator.Free(pItem);
4573  --m_Count;
4574 }
4575 
4576 template<typename T>
4577 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4578 {
4579  if(pItem != VMA_NULL)
4580  {
4581  ItemType* const prevItem = pItem->pPrev;
4582  ItemType* const newItem = m_ItemAllocator.Alloc();
4583  newItem->pPrev = prevItem;
4584  newItem->pNext = pItem;
4585  pItem->pPrev = newItem;
4586  if(prevItem != VMA_NULL)
4587  {
4588  prevItem->pNext = newItem;
4589  }
4590  else
4591  {
4592  VMA_HEAVY_ASSERT(m_pFront == pItem);
4593  m_pFront = newItem;
4594  }
4595  ++m_Count;
4596  return newItem;
4597  }
4598  else
4599  return PushBack();
4600 }
4601 
4602 template<typename T>
4603 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4604 {
4605  if(pItem != VMA_NULL)
4606  {
4607  ItemType* const nextItem = pItem->pNext;
4608  ItemType* const newItem = m_ItemAllocator.Alloc();
4609  newItem->pNext = nextItem;
4610  newItem->pPrev = pItem;
4611  pItem->pNext = newItem;
4612  if(nextItem != VMA_NULL)
4613  {
4614  nextItem->pPrev = newItem;
4615  }
4616  else
4617  {
4618  VMA_HEAVY_ASSERT(m_pBack == pItem);
4619  m_pBack = newItem;
4620  }
4621  ++m_Count;
4622  return newItem;
4623  }
4624  else
4625  return PushFront();
4626 }
4627 
4628 template<typename T>
4629 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4630 {
4631  ItemType* const newItem = InsertBefore(pItem);
4632  newItem->Value = value;
4633  return newItem;
4634 }
4635 
4636 template<typename T>
4637 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4638 {
4639  ItemType* const newItem = InsertAfter(pItem);
4640  newItem->Value = value;
4641  return newItem;
4642 }
4643 
4644 template<typename T, typename AllocatorT>
4645 class VmaList
4646 {
4647  VMA_CLASS_NO_COPY(VmaList)
4648 public:
4649  class iterator
4650  {
4651  public:
4652  iterator() :
4653  m_pList(VMA_NULL),
4654  m_pItem(VMA_NULL)
4655  {
4656  }
4657 
4658  T& operator*() const
4659  {
4660  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4661  return m_pItem->Value;
4662  }
4663  T* operator->() const
4664  {
4665  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4666  return &m_pItem->Value;
4667  }
4668 
4669  iterator& operator++()
4670  {
4671  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4672  m_pItem = m_pItem->pNext;
4673  return *this;
4674  }
4675  iterator& operator--()
4676  {
4677  if(m_pItem != VMA_NULL)
4678  {
4679  m_pItem = m_pItem->pPrev;
4680  }
4681  else
4682  {
4683  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4684  m_pItem = m_pList->Back();
4685  }
4686  return *this;
4687  }
4688 
4689  iterator operator++(int)
4690  {
4691  iterator result = *this;
4692  ++*this;
4693  return result;
4694  }
4695  iterator operator--(int)
4696  {
4697  iterator result = *this;
4698  --*this;
4699  return result;
4700  }
4701 
4702  bool operator==(const iterator& rhs) const
4703  {
4704  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4705  return m_pItem == rhs.m_pItem;
4706  }
4707  bool operator!=(const iterator& rhs) const
4708  {
4709  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4710  return m_pItem != rhs.m_pItem;
4711  }
4712 
4713  private:
4714  VmaRawList<T>* m_pList;
4715  VmaListItem<T>* m_pItem;
4716 
4717  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4718  m_pList(pList),
4719  m_pItem(pItem)
4720  {
4721  }
4722 
4723  friend class VmaList<T, AllocatorT>;
4724  };
4725 
4726  class const_iterator
4727  {
4728  public:
4729  const_iterator() :
4730  m_pList(VMA_NULL),
4731  m_pItem(VMA_NULL)
4732  {
4733  }
4734 
4735  const_iterator(const iterator& src) :
4736  m_pList(src.m_pList),
4737  m_pItem(src.m_pItem)
4738  {
4739  }
4740 
4741  const T& operator*() const
4742  {
4743  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4744  return m_pItem->Value;
4745  }
4746  const T* operator->() const
4747  {
4748  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4749  return &m_pItem->Value;
4750  }
4751 
4752  const_iterator& operator++()
4753  {
4754  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4755  m_pItem = m_pItem->pNext;
4756  return *this;
4757  }
4758  const_iterator& operator--()
4759  {
4760  if(m_pItem != VMA_NULL)
4761  {
4762  m_pItem = m_pItem->pPrev;
4763  }
4764  else
4765  {
4766  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4767  m_pItem = m_pList->Back();
4768  }
4769  return *this;
4770  }
4771 
4772  const_iterator operator++(int)
4773  {
4774  const_iterator result = *this;
4775  ++*this;
4776  return result;
4777  }
4778  const_iterator operator--(int)
4779  {
4780  const_iterator result = *this;
4781  --*this;
4782  return result;
4783  }
4784 
4785  bool operator==(const const_iterator& rhs) const
4786  {
4787  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4788  return m_pItem == rhs.m_pItem;
4789  }
4790  bool operator!=(const const_iterator& rhs) const
4791  {
4792  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4793  return m_pItem != rhs.m_pItem;
4794  }
4795 
4796  private:
4797  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4798  m_pList(pList),
4799  m_pItem(pItem)
4800  {
4801  }
4802 
4803  const VmaRawList<T>* m_pList;
4804  const VmaListItem<T>* m_pItem;
4805 
4806  friend class VmaList<T, AllocatorT>;
4807  };
4808 
4809  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4810 
4811  bool empty() const { return m_RawList.IsEmpty(); }
4812  size_t size() const { return m_RawList.GetCount(); }
4813 
4814  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4815  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4816 
4817  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4818  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4819 
4820  void clear() { m_RawList.Clear(); }
4821  void push_back(const T& value) { m_RawList.PushBack(value); }
4822  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4823  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4824 
4825 private:
4826  VmaRawList<T> m_RawList;
4827 };
4828 
4829 #endif // #if VMA_USE_STL_LIST
4830 
4832 // class VmaMap
4833 
4834 // Unused in this version.
4835 #if 0
4836 
4837 #if VMA_USE_STL_UNORDERED_MAP
4838 
4839 #define VmaPair std::pair
4840 
4841 #define VMA_MAP_TYPE(KeyT, ValueT) \
4842  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4843 
4844 #else // #if VMA_USE_STL_UNORDERED_MAP
4845 
4846 template<typename T1, typename T2>
4847 struct VmaPair
4848 {
4849  T1 first;
4850  T2 second;
4851 
4852  VmaPair() : first(), second() { }
4853  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4854 };
4855 
4856 /* Class compatible with subset of interface of std::unordered_map.
4857 KeyT, ValueT must be POD because they will be stored in VmaVector.
4858 */
4859 template<typename KeyT, typename ValueT>
4860 class VmaMap
4861 {
4862 public:
4863  typedef VmaPair<KeyT, ValueT> PairType;
4864  typedef PairType* iterator;
4865 
4866  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4867 
4868  iterator begin() { return m_Vector.begin(); }
4869  iterator end() { return m_Vector.end(); }
4870 
4871  void insert(const PairType& pair);
4872  iterator find(const KeyT& key);
4873  void erase(iterator it);
4874 
4875 private:
4876  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4877 };
4878 
4879 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4880 
4881 template<typename FirstT, typename SecondT>
4882 struct VmaPairFirstLess
4883 {
4884  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4885  {
4886  return lhs.first < rhs.first;
4887  }
4888  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4889  {
4890  return lhs.first < rhsFirst;
4891  }
4892 };
4893 
4894 template<typename KeyT, typename ValueT>
4895 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4896 {
4897  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4898  m_Vector.data(),
4899  m_Vector.data() + m_Vector.size(),
4900  pair,
4901  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4902  VmaVectorInsert(m_Vector, indexToInsert, pair);
4903 }
4904 
4905 template<typename KeyT, typename ValueT>
4906 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4907 {
4908  PairType* it = VmaBinaryFindFirstNotLess(
4909  m_Vector.data(),
4910  m_Vector.data() + m_Vector.size(),
4911  key,
4912  VmaPairFirstLess<KeyT, ValueT>());
4913  if((it != m_Vector.end()) && (it->first == key))
4914  {
4915  return it;
4916  }
4917  else
4918  {
4919  return m_Vector.end();
4920  }
4921 }
4922 
4923 template<typename KeyT, typename ValueT>
4924 void VmaMap<KeyT, ValueT>::erase(iterator it)
4925 {
4926  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4927 }
4928 
4929 #endif // #if VMA_USE_STL_UNORDERED_MAP
4930 
4931 #endif // #if 0
4932 
4934 
4935 class VmaDeviceMemoryBlock;
4936 
4937 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4938 
4939 struct VmaAllocation_T
4940 {
4941 private:
4942  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4943 
4944  enum FLAGS
4945  {
4946  FLAG_USER_DATA_STRING = 0x01,
4947  };
4948 
4949 public:
4950  enum ALLOCATION_TYPE
4951  {
4952  ALLOCATION_TYPE_NONE,
4953  ALLOCATION_TYPE_BLOCK,
4954  ALLOCATION_TYPE_DEDICATED,
4955  };
4956 
4957  /*
4958  This struct cannot have constructor or destructor. It must be POD because it is
4959  allocated using VmaPoolAllocator.
4960  */
4961 
4962  void Ctor(uint32_t currentFrameIndex, bool userDataString)
4963  {
4964  m_Alignment = 1;
4965  m_Size = 0;
4966  m_pUserData = VMA_NULL;
4967  m_LastUseFrameIndex = currentFrameIndex;
4968  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
4969  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
4970  m_MapCount = 0;
4971  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
4972 
4973 #if VMA_STATS_STRING_ENABLED
4974  m_CreationFrameIndex = currentFrameIndex;
4975  m_BufferImageUsage = 0;
4976 #endif
4977  }
4978 
4979  void Dtor()
4980  {
4981  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4982 
4983  // Check if owned string was freed.
4984  VMA_ASSERT(m_pUserData == VMA_NULL);
4985  }
4986 
4987  void InitBlockAllocation(
4988  VmaDeviceMemoryBlock* block,
4989  VkDeviceSize offset,
4990  VkDeviceSize alignment,
4991  VkDeviceSize size,
4992  VmaSuballocationType suballocationType,
4993  bool mapped,
4994  bool canBecomeLost)
4995  {
4996  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4997  VMA_ASSERT(block != VMA_NULL);
4998  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4999  m_Alignment = alignment;
5000  m_Size = size;
5001  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5002  m_SuballocationType = (uint8_t)suballocationType;
5003  m_BlockAllocation.m_Block = block;
5004  m_BlockAllocation.m_Offset = offset;
5005  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
5006  }
5007 
5008  void InitLost()
5009  {
5010  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5011  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
5012  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5013  m_BlockAllocation.m_Block = VMA_NULL;
5014  m_BlockAllocation.m_Offset = 0;
5015  m_BlockAllocation.m_CanBecomeLost = true;
5016  }
5017 
5018  void ChangeBlockAllocation(
5019  VmaAllocator hAllocator,
5020  VmaDeviceMemoryBlock* block,
5021  VkDeviceSize offset);
5022 
5023  void ChangeSize(VkDeviceSize newSize);
5024  void ChangeOffset(VkDeviceSize newOffset);
5025 
5026  // pMappedData not null means allocation is created with MAPPED flag.
5027  void InitDedicatedAllocation(
5028  uint32_t memoryTypeIndex,
5029  VkDeviceMemory hMemory,
5030  VmaSuballocationType suballocationType,
5031  void* pMappedData,
5032  VkDeviceSize size)
5033  {
5034  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5035  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
5036  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
5037  m_Alignment = 0;
5038  m_Size = size;
5039  m_SuballocationType = (uint8_t)suballocationType;
5040  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5041  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
5042  m_DedicatedAllocation.m_hMemory = hMemory;
5043  m_DedicatedAllocation.m_pMappedData = pMappedData;
5044  }
5045 
5046  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5047  VkDeviceSize GetAlignment() const { return m_Alignment; }
5048  VkDeviceSize GetSize() const { return m_Size; }
5049  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5050  void* GetUserData() const { return m_pUserData; }
5051  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5052  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5053 
5054  VmaDeviceMemoryBlock* GetBlock() const
5055  {
5056  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5057  return m_BlockAllocation.m_Block;
5058  }
5059  VkDeviceSize GetOffset() const;
5060  VkDeviceMemory GetMemory() const;
5061  uint32_t GetMemoryTypeIndex() const;
5062  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5063  void* GetMappedData() const;
5064  bool CanBecomeLost() const;
5065 
5066  uint32_t GetLastUseFrameIndex() const
5067  {
5068  return m_LastUseFrameIndex.load();
5069  }
5070  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5071  {
5072  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5073  }
5074  /*
5075  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5076  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5077  - Else, returns false.
5078 
5079  If hAllocation is already lost, assert - you should not call it then.
5080  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5081  */
5082  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5083 
5084  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5085  {
5086  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5087  outInfo.blockCount = 1;
5088  outInfo.allocationCount = 1;
5089  outInfo.unusedRangeCount = 0;
5090  outInfo.usedBytes = m_Size;
5091  outInfo.unusedBytes = 0;
5092  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5093  outInfo.unusedRangeSizeMin = UINT64_MAX;
5094  outInfo.unusedRangeSizeMax = 0;
5095  }
5096 
5097  void BlockAllocMap();
5098  void BlockAllocUnmap();
5099  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5100  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5101 
5102 #if VMA_STATS_STRING_ENABLED
5103  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5104  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5105 
5106  void InitBufferImageUsage(uint32_t bufferImageUsage)
5107  {
5108  VMA_ASSERT(m_BufferImageUsage == 0);
5109  m_BufferImageUsage = bufferImageUsage;
5110  }
5111 
5112  void PrintParameters(class VmaJsonWriter& json) const;
5113 #endif
5114 
5115 private:
5116  VkDeviceSize m_Alignment;
5117  VkDeviceSize m_Size;
5118  void* m_pUserData;
5119  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5120  uint8_t m_Type; // ALLOCATION_TYPE
5121  uint8_t m_SuballocationType; // VmaSuballocationType
5122  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5123  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5124  uint8_t m_MapCount;
5125  uint8_t m_Flags; // enum FLAGS
5126 
5127  // Allocation out of VmaDeviceMemoryBlock.
5128  struct BlockAllocation
5129  {
5130  VmaDeviceMemoryBlock* m_Block;
5131  VkDeviceSize m_Offset;
5132  bool m_CanBecomeLost;
5133  };
5134 
5135  // Allocation for an object that has its own private VkDeviceMemory.
5136  struct DedicatedAllocation
5137  {
5138  uint32_t m_MemoryTypeIndex;
5139  VkDeviceMemory m_hMemory;
5140  void* m_pMappedData; // Not null means memory is mapped.
5141  };
5142 
5143  union
5144  {
5145  // Allocation out of VmaDeviceMemoryBlock.
5146  BlockAllocation m_BlockAllocation;
5147  // Allocation for an object that has its own private VkDeviceMemory.
5148  DedicatedAllocation m_DedicatedAllocation;
5149  };
5150 
5151 #if VMA_STATS_STRING_ENABLED
5152  uint32_t m_CreationFrameIndex;
5153  uint32_t m_BufferImageUsage; // 0 if unknown.
5154 #endif
5155 
5156  void FreeUserDataString(VmaAllocator hAllocator);
5157 };
5158 
5159 /*
5160 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5161 allocated memory block or free.
5162 */
5163 struct VmaSuballocation
5164 {
5165  VkDeviceSize offset;
5166  VkDeviceSize size;
5167  VmaAllocation hAllocation;
5168  VmaSuballocationType type;
5169 };
5170 
5171 // Comparator for offsets.
5172 struct VmaSuballocationOffsetLess
5173 {
5174  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5175  {
5176  return lhs.offset < rhs.offset;
5177  }
5178 };
5179 struct VmaSuballocationOffsetGreater
5180 {
5181  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5182  {
5183  return lhs.offset > rhs.offset;
5184  }
5185 };
5186 
5187 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5188 
5189 // Cost of one additional allocation lost, as equivalent in bytes.
5190 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5191 
5192 enum class VmaAllocationRequestType
5193 {
5194  Normal,
5195  // Used by "Linear" algorithm.
5196  UpperAddress,
5197  EndOf1st,
5198  EndOf2nd,
5199 };
5200 
5201 /*
5202 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5203 
5204 If canMakeOtherLost was false:
5205 - item points to a FREE suballocation.
5206 - itemsToMakeLostCount is 0.
5207 
5208 If canMakeOtherLost was true:
5209 - item points to first of sequence of suballocations, which are either FREE,
5210  or point to VmaAllocations that can become lost.
5211 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5212  the requested allocation to succeed.
5213 */
5214 struct VmaAllocationRequest
5215 {
5216  VkDeviceSize offset;
5217  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5218  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5219  VmaSuballocationList::iterator item;
5220  size_t itemsToMakeLostCount;
5221  void* customData;
5222  VmaAllocationRequestType type;
5223 
5224  VkDeviceSize CalcCost() const
5225  {
5226  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5227  }
5228 };
5229 
5230 /*
5231 Data structure used for bookkeeping of allocations and unused ranges of memory
5232 in a single VkDeviceMemory block.
5233 */
5234 class VmaBlockMetadata
5235 {
5236 public:
5237  VmaBlockMetadata(VmaAllocator hAllocator);
5238  virtual ~VmaBlockMetadata() { }
5239  virtual void Init(VkDeviceSize size) { m_Size = size; }
5240 
5241  // Validates all data structures inside this object. If not valid, returns false.
5242  virtual bool Validate() const = 0;
5243  VkDeviceSize GetSize() const { return m_Size; }
5244  virtual size_t GetAllocationCount() const = 0;
5245  virtual VkDeviceSize GetSumFreeSize() const = 0;
5246  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5247  // Returns true if this block is empty - contains only single free suballocation.
5248  virtual bool IsEmpty() const = 0;
5249 
5250  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5251  // Shouldn't modify blockCount.
5252  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5253 
5254 #if VMA_STATS_STRING_ENABLED
5255  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5256 #endif
5257 
5258  // Tries to find a place for suballocation with given parameters inside this block.
5259  // If succeeded, fills pAllocationRequest and returns true.
5260  // If failed, returns false.
5261  virtual bool CreateAllocationRequest(
5262  uint32_t currentFrameIndex,
5263  uint32_t frameInUseCount,
5264  VkDeviceSize bufferImageGranularity,
5265  VkDeviceSize allocSize,
5266  VkDeviceSize allocAlignment,
5267  bool upperAddress,
5268  VmaSuballocationType allocType,
5269  bool canMakeOtherLost,
5270  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5271  uint32_t strategy,
5272  VmaAllocationRequest* pAllocationRequest) = 0;
5273 
5274  virtual bool MakeRequestedAllocationsLost(
5275  uint32_t currentFrameIndex,
5276  uint32_t frameInUseCount,
5277  VmaAllocationRequest* pAllocationRequest) = 0;
5278 
5279  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5280 
5281  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5282 
5283  // Makes actual allocation based on request. Request must already be checked and valid.
5284  virtual void Alloc(
5285  const VmaAllocationRequest& request,
5286  VmaSuballocationType type,
5287  VkDeviceSize allocSize,
5288  VmaAllocation hAllocation) = 0;
5289 
5290  // Frees suballocation assigned to given memory region.
5291  virtual void Free(const VmaAllocation allocation) = 0;
5292  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5293 
5294  // Tries to resize (grow or shrink) space for given allocation, in place.
5295  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
5296 
5297 protected:
5298  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5299 
5300 #if VMA_STATS_STRING_ENABLED
5301  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5302  VkDeviceSize unusedBytes,
5303  size_t allocationCount,
5304  size_t unusedRangeCount) const;
5305  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5306  VkDeviceSize offset,
5307  VmaAllocation hAllocation) const;
5308  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5309  VkDeviceSize offset,
5310  VkDeviceSize size) const;
5311  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5312 #endif
5313 
5314 private:
5315  VkDeviceSize m_Size;
5316  const VkAllocationCallbacks* m_pAllocationCallbacks;
5317 };
5318 
5319 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5320  VMA_ASSERT(0 && "Validation failed: " #cond); \
5321  return false; \
5322  } } while(false)
5323 
5324 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5325 {
5326  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5327 public:
5328  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5329  virtual ~VmaBlockMetadata_Generic();
5330  virtual void Init(VkDeviceSize size);
5331 
5332  virtual bool Validate() const;
5333  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5334  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5335  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5336  virtual bool IsEmpty() const;
5337 
5338  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5339  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5340 
5341 #if VMA_STATS_STRING_ENABLED
5342  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5343 #endif
5344 
5345  virtual bool CreateAllocationRequest(
5346  uint32_t currentFrameIndex,
5347  uint32_t frameInUseCount,
5348  VkDeviceSize bufferImageGranularity,
5349  VkDeviceSize allocSize,
5350  VkDeviceSize allocAlignment,
5351  bool upperAddress,
5352  VmaSuballocationType allocType,
5353  bool canMakeOtherLost,
5354  uint32_t strategy,
5355  VmaAllocationRequest* pAllocationRequest);
5356 
5357  virtual bool MakeRequestedAllocationsLost(
5358  uint32_t currentFrameIndex,
5359  uint32_t frameInUseCount,
5360  VmaAllocationRequest* pAllocationRequest);
5361 
5362  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5363 
5364  virtual VkResult CheckCorruption(const void* pBlockData);
5365 
5366  virtual void Alloc(
5367  const VmaAllocationRequest& request,
5368  VmaSuballocationType type,
5369  VkDeviceSize allocSize,
5370  VmaAllocation hAllocation);
5371 
5372  virtual void Free(const VmaAllocation allocation);
5373  virtual void FreeAtOffset(VkDeviceSize offset);
5374 
5375  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
5376 
5378  // For defragmentation
5379 
5380  bool IsBufferImageGranularityConflictPossible(
5381  VkDeviceSize bufferImageGranularity,
5382  VmaSuballocationType& inOutPrevSuballocType) const;
5383 
5384 private:
5385  friend class VmaDefragmentationAlgorithm_Generic;
5386  friend class VmaDefragmentationAlgorithm_Fast;
5387 
5388  uint32_t m_FreeCount;
5389  VkDeviceSize m_SumFreeSize;
5390  VmaSuballocationList m_Suballocations;
5391  // Suballocations that are free and have size greater than certain threshold.
5392  // Sorted by size, ascending.
5393  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5394 
5395  bool ValidateFreeSuballocationList() const;
5396 
5397  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5398  // If yes, fills pOffset and returns true. If no, returns false.
5399  bool CheckAllocation(
5400  uint32_t currentFrameIndex,
5401  uint32_t frameInUseCount,
5402  VkDeviceSize bufferImageGranularity,
5403  VkDeviceSize allocSize,
5404  VkDeviceSize allocAlignment,
5405  VmaSuballocationType allocType,
5406  VmaSuballocationList::const_iterator suballocItem,
5407  bool canMakeOtherLost,
5408  VkDeviceSize* pOffset,
5409  size_t* itemsToMakeLostCount,
5410  VkDeviceSize* pSumFreeSize,
5411  VkDeviceSize* pSumItemSize) const;
5412  // Given free suballocation, it merges it with following one, which must also be free.
5413  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5414  // Releases given suballocation, making it free.
5415  // Merges it with adjacent free suballocations if applicable.
5416  // Returns iterator to new free suballocation at this place.
5417  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5418  // Given free suballocation, it inserts it into sorted list of
5419  // m_FreeSuballocationsBySize if it's suitable.
5420  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5421  // Given free suballocation, it removes it from sorted list of
5422  // m_FreeSuballocationsBySize if it's suitable.
5423  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5424 };
5425 
5426 /*
5427 Allocations and their references in internal data structure look like this:
5428 
5429 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5430 
5431  0 +-------+
5432  | |
5433  | |
5434  | |
5435  +-------+
5436  | Alloc | 1st[m_1stNullItemsBeginCount]
5437  +-------+
5438  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5439  +-------+
5440  | ... |
5441  +-------+
5442  | Alloc | 1st[1st.size() - 1]
5443  +-------+
5444  | |
5445  | |
5446  | |
5447 GetSize() +-------+
5448 
5449 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5450 
5451  0 +-------+
5452  | Alloc | 2nd[0]
5453  +-------+
5454  | Alloc | 2nd[1]
5455  +-------+
5456  | ... |
5457  +-------+
5458  | Alloc | 2nd[2nd.size() - 1]
5459  +-------+
5460  | |
5461  | |
5462  | |
5463  +-------+
5464  | Alloc | 1st[m_1stNullItemsBeginCount]
5465  +-------+
5466  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5467  +-------+
5468  | ... |
5469  +-------+
5470  | Alloc | 1st[1st.size() - 1]
5471  +-------+
5472  | |
5473 GetSize() +-------+
5474 
5475 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5476 
5477  0 +-------+
5478  | |
5479  | |
5480  | |
5481  +-------+
5482  | Alloc | 1st[m_1stNullItemsBeginCount]
5483  +-------+
5484  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5485  +-------+
5486  | ... |
5487  +-------+
5488  | Alloc | 1st[1st.size() - 1]
5489  +-------+
5490  | |
5491  | |
5492  | |
5493  +-------+
5494  | Alloc | 2nd[2nd.size() - 1]
5495  +-------+
5496  | ... |
5497  +-------+
5498  | Alloc | 2nd[1]
5499  +-------+
5500  | Alloc | 2nd[0]
5501 GetSize() +-------+
5502 
5503 */
5504 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5505 {
5506  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5507 public:
5508  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5509  virtual ~VmaBlockMetadata_Linear();
5510  virtual void Init(VkDeviceSize size);
5511 
5512  virtual bool Validate() const;
5513  virtual size_t GetAllocationCount() const;
5514  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5515  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5516  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5517 
5518  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5519  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5520 
5521 #if VMA_STATS_STRING_ENABLED
5522  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5523 #endif
5524 
5525  virtual bool CreateAllocationRequest(
5526  uint32_t currentFrameIndex,
5527  uint32_t frameInUseCount,
5528  VkDeviceSize bufferImageGranularity,
5529  VkDeviceSize allocSize,
5530  VkDeviceSize allocAlignment,
5531  bool upperAddress,
5532  VmaSuballocationType allocType,
5533  bool canMakeOtherLost,
5534  uint32_t strategy,
5535  VmaAllocationRequest* pAllocationRequest);
5536 
5537  virtual bool MakeRequestedAllocationsLost(
5538  uint32_t currentFrameIndex,
5539  uint32_t frameInUseCount,
5540  VmaAllocationRequest* pAllocationRequest);
5541 
5542  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5543 
5544  virtual VkResult CheckCorruption(const void* pBlockData);
5545 
5546  virtual void Alloc(
5547  const VmaAllocationRequest& request,
5548  VmaSuballocationType type,
5549  VkDeviceSize allocSize,
5550  VmaAllocation hAllocation);
5551 
5552  virtual void Free(const VmaAllocation allocation);
5553  virtual void FreeAtOffset(VkDeviceSize offset);
5554 
5555 private:
5556  /*
5557  There are two suballocation vectors, used in ping-pong way.
5558  The one with index m_1stVectorIndex is called 1st.
5559  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5560  2nd can be non-empty only when 1st is not empty.
5561  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5562  */
5563  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5564 
5565  enum SECOND_VECTOR_MODE
5566  {
5567  SECOND_VECTOR_EMPTY,
5568  /*
5569  Suballocations in 2nd vector are created later than the ones in 1st, but they
5570  all have smaller offset.
5571  */
5572  SECOND_VECTOR_RING_BUFFER,
5573  /*
5574  Suballocations in 2nd vector are upper side of double stack.
5575  They all have offsets higher than those in 1st vector.
5576  Top of this stack means smaller offsets, but higher indices in this vector.
5577  */
5578  SECOND_VECTOR_DOUBLE_STACK,
5579  };
5580 
5581  VkDeviceSize m_SumFreeSize;
5582  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5583  uint32_t m_1stVectorIndex;
5584  SECOND_VECTOR_MODE m_2ndVectorMode;
5585 
5586  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5587  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5588  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5589  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5590 
5591  // Number of items in 1st vector with hAllocation = null at the beginning.
5592  size_t m_1stNullItemsBeginCount;
5593  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5594  size_t m_1stNullItemsMiddleCount;
5595  // Number of items in 2nd vector with hAllocation = null.
5596  size_t m_2ndNullItemsCount;
5597 
5598  bool ShouldCompact1st() const;
5599  void CleanupAfterFree();
5600 
5601  bool CreateAllocationRequest_LowerAddress(
5602  uint32_t currentFrameIndex,
5603  uint32_t frameInUseCount,
5604  VkDeviceSize bufferImageGranularity,
5605  VkDeviceSize allocSize,
5606  VkDeviceSize allocAlignment,
5607  VmaSuballocationType allocType,
5608  bool canMakeOtherLost,
5609  uint32_t strategy,
5610  VmaAllocationRequest* pAllocationRequest);
5611  bool CreateAllocationRequest_UpperAddress(
5612  uint32_t currentFrameIndex,
5613  uint32_t frameInUseCount,
5614  VkDeviceSize bufferImageGranularity,
5615  VkDeviceSize allocSize,
5616  VkDeviceSize allocAlignment,
5617  VmaSuballocationType allocType,
5618  bool canMakeOtherLost,
5619  uint32_t strategy,
5620  VmaAllocationRequest* pAllocationRequest);
5621 };
5622 
5623 /*
5624 - GetSize() is the original size of allocated memory block.
5625 - m_UsableSize is this size aligned down to a power of two.
5626  All allocations and calculations happen relative to m_UsableSize.
5627 - GetUnusableSize() is the difference between them.
5628  It is repoted as separate, unused range, not available for allocations.
5629 
5630 Node at level 0 has size = m_UsableSize.
5631 Each next level contains nodes with size 2 times smaller than current level.
5632 m_LevelCount is the maximum number of levels to use in the current object.
5633 */
5634 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5635 {
5636  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5637 public:
5638  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5639  virtual ~VmaBlockMetadata_Buddy();
5640  virtual void Init(VkDeviceSize size);
5641 
5642  virtual bool Validate() const;
5643  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5644  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5645  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5646  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5647 
5648  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5649  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5650 
5651 #if VMA_STATS_STRING_ENABLED
5652  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5653 #endif
5654 
5655  virtual bool CreateAllocationRequest(
5656  uint32_t currentFrameIndex,
5657  uint32_t frameInUseCount,
5658  VkDeviceSize bufferImageGranularity,
5659  VkDeviceSize allocSize,
5660  VkDeviceSize allocAlignment,
5661  bool upperAddress,
5662  VmaSuballocationType allocType,
5663  bool canMakeOtherLost,
5664  uint32_t strategy,
5665  VmaAllocationRequest* pAllocationRequest);
5666 
5667  virtual bool MakeRequestedAllocationsLost(
5668  uint32_t currentFrameIndex,
5669  uint32_t frameInUseCount,
5670  VmaAllocationRequest* pAllocationRequest);
5671 
5672  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5673 
5674  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5675 
5676  virtual void Alloc(
5677  const VmaAllocationRequest& request,
5678  VmaSuballocationType type,
5679  VkDeviceSize allocSize,
5680  VmaAllocation hAllocation);
5681 
5682  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5683  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5684 
5685 private:
5686  static const VkDeviceSize MIN_NODE_SIZE = 32;
5687  static const size_t MAX_LEVELS = 30;
5688 
5689  struct ValidationContext
5690  {
5691  size_t calculatedAllocationCount;
5692  size_t calculatedFreeCount;
5693  VkDeviceSize calculatedSumFreeSize;
5694 
5695  ValidationContext() :
5696  calculatedAllocationCount(0),
5697  calculatedFreeCount(0),
5698  calculatedSumFreeSize(0) { }
5699  };
5700 
5701  struct Node
5702  {
5703  VkDeviceSize offset;
5704  enum TYPE
5705  {
5706  TYPE_FREE,
5707  TYPE_ALLOCATION,
5708  TYPE_SPLIT,
5709  TYPE_COUNT
5710  } type;
5711  Node* parent;
5712  Node* buddy;
5713 
5714  union
5715  {
5716  struct
5717  {
5718  Node* prev;
5719  Node* next;
5720  } free;
5721  struct
5722  {
5723  VmaAllocation alloc;
5724  } allocation;
5725  struct
5726  {
5727  Node* leftChild;
5728  } split;
5729  };
5730  };
5731 
5732  // Size of the memory block aligned down to a power of two.
5733  VkDeviceSize m_UsableSize;
5734  uint32_t m_LevelCount;
5735 
5736  Node* m_Root;
5737  struct {
5738  Node* front;
5739  Node* back;
5740  } m_FreeList[MAX_LEVELS];
5741  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5742  size_t m_AllocationCount;
5743  // Number of nodes in the tree with type == TYPE_FREE.
5744  size_t m_FreeCount;
5745  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5746  VkDeviceSize m_SumFreeSize;
5747 
5748  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5749  void DeleteNode(Node* node);
5750  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5751  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5752  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5753  // Alloc passed just for validation. Can be null.
5754  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5755  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5756  // Adds node to the front of FreeList at given level.
5757  // node->type must be FREE.
5758  // node->free.prev, next can be undefined.
5759  void AddToFreeListFront(uint32_t level, Node* node);
5760  // Removes node from FreeList at given level.
5761  // node->type must be FREE.
5762  // node->free.prev, next stay untouched.
5763  void RemoveFromFreeList(uint32_t level, Node* node);
5764 
5765 #if VMA_STATS_STRING_ENABLED
5766  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5767 #endif
5768 };
5769 
5770 /*
5771 Represents a single block of device memory (`VkDeviceMemory`) with all the
5772 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5773 
5774 Thread-safety: This class must be externally synchronized.
5775 */
5776 class VmaDeviceMemoryBlock
5777 {
5778  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5779 public:
5780  VmaBlockMetadata* m_pMetadata;
5781 
5782  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5783 
5784  ~VmaDeviceMemoryBlock()
5785  {
5786  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5787  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5788  }
5789 
5790  // Always call after construction.
5791  void Init(
5792  VmaAllocator hAllocator,
5793  VmaPool hParentPool,
5794  uint32_t newMemoryTypeIndex,
5795  VkDeviceMemory newMemory,
5796  VkDeviceSize newSize,
5797  uint32_t id,
5798  uint32_t algorithm);
5799  // Always call before destruction.
5800  void Destroy(VmaAllocator allocator);
5801 
5802  VmaPool GetParentPool() const { return m_hParentPool; }
5803  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5804  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5805  uint32_t GetId() const { return m_Id; }
5806  void* GetMappedData() const { return m_pMappedData; }
5807 
5808  // Validates all data structures inside this object. If not valid, returns false.
5809  bool Validate() const;
5810 
5811  VkResult CheckCorruption(VmaAllocator hAllocator);
5812 
5813  // ppData can be null.
5814  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5815  void Unmap(VmaAllocator hAllocator, uint32_t count);
5816 
5817  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5818  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5819 
5820  VkResult BindBufferMemory(
5821  const VmaAllocator hAllocator,
5822  const VmaAllocation hAllocation,
5823  VkBuffer hBuffer);
5824  VkResult BindImageMemory(
5825  const VmaAllocator hAllocator,
5826  const VmaAllocation hAllocation,
5827  VkImage hImage);
5828 
5829 private:
5830  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
5831  uint32_t m_MemoryTypeIndex;
5832  uint32_t m_Id;
5833  VkDeviceMemory m_hMemory;
5834 
5835  /*
5836  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5837  Also protects m_MapCount, m_pMappedData.
5838  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5839  */
5840  VMA_MUTEX m_Mutex;
5841  uint32_t m_MapCount;
5842  void* m_pMappedData;
5843 };
5844 
5845 struct VmaPointerLess
5846 {
5847  bool operator()(const void* lhs, const void* rhs) const
5848  {
5849  return lhs < rhs;
5850  }
5851 };
5852 
5853 struct VmaDefragmentationMove
5854 {
5855  size_t srcBlockIndex;
5856  size_t dstBlockIndex;
5857  VkDeviceSize srcOffset;
5858  VkDeviceSize dstOffset;
5859  VkDeviceSize size;
5860 };
5861 
5862 class VmaDefragmentationAlgorithm;
5863 
5864 /*
5865 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5866 Vulkan memory type.
5867 
5868 Synchronized internally with a mutex.
5869 */
5870 struct VmaBlockVector
5871 {
5872  VMA_CLASS_NO_COPY(VmaBlockVector)
5873 public:
5874  VmaBlockVector(
5875  VmaAllocator hAllocator,
5876  VmaPool hParentPool,
5877  uint32_t memoryTypeIndex,
5878  VkDeviceSize preferredBlockSize,
5879  size_t minBlockCount,
5880  size_t maxBlockCount,
5881  VkDeviceSize bufferImageGranularity,
5882  uint32_t frameInUseCount,
5883  bool isCustomPool,
5884  bool explicitBlockSize,
5885  uint32_t algorithm);
5886  ~VmaBlockVector();
5887 
5888  VkResult CreateMinBlocks();
5889 
5890  VmaPool GetParentPool() const { return m_hParentPool; }
5891  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5892  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5893  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5894  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5895  uint32_t GetAlgorithm() const { return m_Algorithm; }
5896 
5897  void GetPoolStats(VmaPoolStats* pStats);
5898 
5899  bool IsEmpty() const { return m_Blocks.empty(); }
5900  bool IsCorruptionDetectionEnabled() const;
5901 
5902  VkResult Allocate(
5903  uint32_t currentFrameIndex,
5904  VkDeviceSize size,
5905  VkDeviceSize alignment,
5906  const VmaAllocationCreateInfo& createInfo,
5907  VmaSuballocationType suballocType,
5908  size_t allocationCount,
5909  VmaAllocation* pAllocations);
5910 
5911  void Free(
5912  VmaAllocation hAllocation);
5913 
5914  // Adds statistics of this BlockVector to pStats.
5915  void AddStats(VmaStats* pStats);
5916 
5917 #if VMA_STATS_STRING_ENABLED
5918  void PrintDetailedMap(class VmaJsonWriter& json);
5919 #endif
5920 
5921  void MakePoolAllocationsLost(
5922  uint32_t currentFrameIndex,
5923  size_t* pLostAllocationCount);
5924  VkResult CheckCorruption();
5925 
5926  // Saves results in pCtx->res.
5927  void Defragment(
5928  class VmaBlockVectorDefragmentationContext* pCtx,
5929  VmaDefragmentationStats* pStats,
5930  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
5931  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
5932  VkCommandBuffer commandBuffer);
5933  void DefragmentationEnd(
5934  class VmaBlockVectorDefragmentationContext* pCtx,
5935  VmaDefragmentationStats* pStats);
5936 
5938  // To be used only while the m_Mutex is locked. Used during defragmentation.
5939 
5940  size_t GetBlockCount() const { return m_Blocks.size(); }
5941  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
5942  size_t CalcAllocationCount() const;
5943  bool IsBufferImageGranularityConflictPossible() const;
5944 
5945 private:
5946  friend class VmaDefragmentationAlgorithm_Generic;
5947 
5948  const VmaAllocator m_hAllocator;
5949  const VmaPool m_hParentPool;
5950  const uint32_t m_MemoryTypeIndex;
5951  const VkDeviceSize m_PreferredBlockSize;
5952  const size_t m_MinBlockCount;
5953  const size_t m_MaxBlockCount;
5954  const VkDeviceSize m_BufferImageGranularity;
5955  const uint32_t m_FrameInUseCount;
5956  const bool m_IsCustomPool;
5957  const bool m_ExplicitBlockSize;
5958  const uint32_t m_Algorithm;
5959  /* There can be at most one allocation that is completely empty - a
5960  hysteresis to avoid pessimistic case of alternating creation and destruction
5961  of a VkDeviceMemory. */
5962  bool m_HasEmptyBlock;
5963  VMA_RW_MUTEX m_Mutex;
5964  // Incrementally sorted by sumFreeSize, ascending.
5965  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5966  uint32_t m_NextBlockId;
5967 
5968  VkDeviceSize CalcMaxBlockSize() const;
5969 
5970  // Finds and removes given block from vector.
5971  void Remove(VmaDeviceMemoryBlock* pBlock);
5972 
5973  // Performs single step in sorting m_Blocks. They may not be fully sorted
5974  // after this call.
5975  void IncrementallySortBlocks();
5976 
5977  VkResult AllocatePage(
5978  uint32_t currentFrameIndex,
5979  VkDeviceSize size,
5980  VkDeviceSize alignment,
5981  const VmaAllocationCreateInfo& createInfo,
5982  VmaSuballocationType suballocType,
5983  VmaAllocation* pAllocation);
5984 
5985  // To be used only without CAN_MAKE_OTHER_LOST flag.
5986  VkResult AllocateFromBlock(
5987  VmaDeviceMemoryBlock* pBlock,
5988  uint32_t currentFrameIndex,
5989  VkDeviceSize size,
5990  VkDeviceSize alignment,
5991  VmaAllocationCreateFlags allocFlags,
5992  void* pUserData,
5993  VmaSuballocationType suballocType,
5994  uint32_t strategy,
5995  VmaAllocation* pAllocation);
5996 
5997  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5998 
5999  // Saves result to pCtx->res.
6000  void ApplyDefragmentationMovesCpu(
6001  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6002  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
6003  // Saves result to pCtx->res.
6004  void ApplyDefragmentationMovesGpu(
6005  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6006  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6007  VkCommandBuffer commandBuffer);
6008 
6009  /*
6010  Used during defragmentation. pDefragmentationStats is optional. It's in/out
6011  - updated with new data.
6012  */
6013  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
6014 };
6015 
6016 struct VmaPool_T
6017 {
6018  VMA_CLASS_NO_COPY(VmaPool_T)
6019 public:
6020  VmaBlockVector m_BlockVector;
6021 
6022  VmaPool_T(
6023  VmaAllocator hAllocator,
6024  const VmaPoolCreateInfo& createInfo,
6025  VkDeviceSize preferredBlockSize);
6026  ~VmaPool_T();
6027 
6028  uint32_t GetId() const { return m_Id; }
6029  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
6030 
6031 #if VMA_STATS_STRING_ENABLED
6032  //void PrintDetailedMap(class VmaStringBuilder& sb);
6033 #endif
6034 
6035 private:
6036  uint32_t m_Id;
6037 };
6038 
6039 /*
6040 Performs defragmentation:
6041 
6042 - Updates `pBlockVector->m_pMetadata`.
6043 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
6044 - Does not move actual data, only returns requested moves as `moves`.
6045 */
6046 class VmaDefragmentationAlgorithm
6047 {
6048  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
6049 public:
6050  VmaDefragmentationAlgorithm(
6051  VmaAllocator hAllocator,
6052  VmaBlockVector* pBlockVector,
6053  uint32_t currentFrameIndex) :
6054  m_hAllocator(hAllocator),
6055  m_pBlockVector(pBlockVector),
6056  m_CurrentFrameIndex(currentFrameIndex)
6057  {
6058  }
6059  virtual ~VmaDefragmentationAlgorithm()
6060  {
6061  }
6062 
6063  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
6064  virtual void AddAll() = 0;
6065 
6066  virtual VkResult Defragment(
6067  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6068  VkDeviceSize maxBytesToMove,
6069  uint32_t maxAllocationsToMove) = 0;
6070 
6071  virtual VkDeviceSize GetBytesMoved() const = 0;
6072  virtual uint32_t GetAllocationsMoved() const = 0;
6073 
6074 protected:
6075  VmaAllocator const m_hAllocator;
6076  VmaBlockVector* const m_pBlockVector;
6077  const uint32_t m_CurrentFrameIndex;
6078 
6079  struct AllocationInfo
6080  {
6081  VmaAllocation m_hAllocation;
6082  VkBool32* m_pChanged;
6083 
6084  AllocationInfo() :
6085  m_hAllocation(VK_NULL_HANDLE),
6086  m_pChanged(VMA_NULL)
6087  {
6088  }
6089  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6090  m_hAllocation(hAlloc),
6091  m_pChanged(pChanged)
6092  {
6093  }
6094  };
6095 };
6096 
6097 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6098 {
6099  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6100 public:
6101  VmaDefragmentationAlgorithm_Generic(
6102  VmaAllocator hAllocator,
6103  VmaBlockVector* pBlockVector,
6104  uint32_t currentFrameIndex,
6105  bool overlappingMoveSupported);
6106  virtual ~VmaDefragmentationAlgorithm_Generic();
6107 
6108  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6109  virtual void AddAll() { m_AllAllocations = true; }
6110 
6111  virtual VkResult Defragment(
6112  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6113  VkDeviceSize maxBytesToMove,
6114  uint32_t maxAllocationsToMove);
6115 
6116  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6117  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6118 
6119 private:
6120  uint32_t m_AllocationCount;
6121  bool m_AllAllocations;
6122 
6123  VkDeviceSize m_BytesMoved;
6124  uint32_t m_AllocationsMoved;
6125 
6126  struct AllocationInfoSizeGreater
6127  {
6128  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6129  {
6130  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6131  }
6132  };
6133 
6134  struct AllocationInfoOffsetGreater
6135  {
6136  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6137  {
6138  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6139  }
6140  };
6141 
6142  struct BlockInfo
6143  {
6144  size_t m_OriginalBlockIndex;
6145  VmaDeviceMemoryBlock* m_pBlock;
6146  bool m_HasNonMovableAllocations;
6147  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6148 
6149  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6150  m_OriginalBlockIndex(SIZE_MAX),
6151  m_pBlock(VMA_NULL),
6152  m_HasNonMovableAllocations(true),
6153  m_Allocations(pAllocationCallbacks)
6154  {
6155  }
6156 
6157  void CalcHasNonMovableAllocations()
6158  {
6159  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6160  const size_t defragmentAllocCount = m_Allocations.size();
6161  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6162  }
6163 
6164  void SortAllocationsBySizeDescending()
6165  {
6166  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6167  }
6168 
6169  void SortAllocationsByOffsetDescending()
6170  {
6171  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6172  }
6173  };
6174 
6175  struct BlockPointerLess
6176  {
6177  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6178  {
6179  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6180  }
6181  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6182  {
6183  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6184  }
6185  };
6186 
6187  // 1. Blocks with some non-movable allocations go first.
6188  // 2. Blocks with smaller sumFreeSize go first.
6189  struct BlockInfoCompareMoveDestination
6190  {
6191  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6192  {
6193  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6194  {
6195  return true;
6196  }
6197  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6198  {
6199  return false;
6200  }
6201  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6202  {
6203  return true;
6204  }
6205  return false;
6206  }
6207  };
6208 
6209  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6210  BlockInfoVector m_Blocks;
6211 
6212  VkResult DefragmentRound(
6213  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6214  VkDeviceSize maxBytesToMove,
6215  uint32_t maxAllocationsToMove);
6216 
6217  size_t CalcBlocksWithNonMovableCount() const;
6218 
6219  static bool MoveMakesSense(
6220  size_t dstBlockIndex, VkDeviceSize dstOffset,
6221  size_t srcBlockIndex, VkDeviceSize srcOffset);
6222 };
6223 
6224 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6225 {
6226  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6227 public:
6228  VmaDefragmentationAlgorithm_Fast(
6229  VmaAllocator hAllocator,
6230  VmaBlockVector* pBlockVector,
6231  uint32_t currentFrameIndex,
6232  bool overlappingMoveSupported);
6233  virtual ~VmaDefragmentationAlgorithm_Fast();
6234 
6235  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6236  virtual void AddAll() { m_AllAllocations = true; }
6237 
6238  virtual VkResult Defragment(
6239  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6240  VkDeviceSize maxBytesToMove,
6241  uint32_t maxAllocationsToMove);
6242 
6243  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6244  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6245 
6246 private:
6247  struct BlockInfo
6248  {
6249  size_t origBlockIndex;
6250  };
6251 
6252  class FreeSpaceDatabase
6253  {
6254  public:
6255  FreeSpaceDatabase()
6256  {
6257  FreeSpace s = {};
6258  s.blockInfoIndex = SIZE_MAX;
6259  for(size_t i = 0; i < MAX_COUNT; ++i)
6260  {
6261  m_FreeSpaces[i] = s;
6262  }
6263  }
6264 
6265  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6266  {
6267  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6268  {
6269  return;
6270  }
6271 
6272  // Find first invalid or the smallest structure.
6273  size_t bestIndex = SIZE_MAX;
6274  for(size_t i = 0; i < MAX_COUNT; ++i)
6275  {
6276  // Empty structure.
6277  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6278  {
6279  bestIndex = i;
6280  break;
6281  }
6282  if(m_FreeSpaces[i].size < size &&
6283  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6284  {
6285  bestIndex = i;
6286  }
6287  }
6288 
6289  if(bestIndex != SIZE_MAX)
6290  {
6291  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6292  m_FreeSpaces[bestIndex].offset = offset;
6293  m_FreeSpaces[bestIndex].size = size;
6294  }
6295  }
6296 
6297  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6298  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6299  {
6300  size_t bestIndex = SIZE_MAX;
6301  VkDeviceSize bestFreeSpaceAfter = 0;
6302  for(size_t i = 0; i < MAX_COUNT; ++i)
6303  {
6304  // Structure is valid.
6305  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6306  {
6307  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6308  // Allocation fits into this structure.
6309  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6310  {
6311  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6312  (dstOffset + size);
6313  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6314  {
6315  bestIndex = i;
6316  bestFreeSpaceAfter = freeSpaceAfter;
6317  }
6318  }
6319  }
6320  }
6321 
6322  if(bestIndex != SIZE_MAX)
6323  {
6324  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6325  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6326 
6327  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6328  {
6329  // Leave this structure for remaining empty space.
6330  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6331  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6332  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6333  }
6334  else
6335  {
6336  // This structure becomes invalid.
6337  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6338  }
6339 
6340  return true;
6341  }
6342 
6343  return false;
6344  }
6345 
6346  private:
6347  static const size_t MAX_COUNT = 4;
6348 
6349  struct FreeSpace
6350  {
6351  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6352  VkDeviceSize offset;
6353  VkDeviceSize size;
6354  } m_FreeSpaces[MAX_COUNT];
6355  };
6356 
6357  const bool m_OverlappingMoveSupported;
6358 
6359  uint32_t m_AllocationCount;
6360  bool m_AllAllocations;
6361 
6362  VkDeviceSize m_BytesMoved;
6363  uint32_t m_AllocationsMoved;
6364 
6365  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6366 
6367  void PreprocessMetadata();
6368  void PostprocessMetadata();
6369  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6370 };
6371 
6372 struct VmaBlockDefragmentationContext
6373 {
6374  enum BLOCK_FLAG
6375  {
6376  BLOCK_FLAG_USED = 0x00000001,
6377  };
6378  uint32_t flags;
6379  VkBuffer hBuffer;
6380 
6381  VmaBlockDefragmentationContext() :
6382  flags(0),
6383  hBuffer(VK_NULL_HANDLE)
6384  {
6385  }
6386 };
6387 
6388 class VmaBlockVectorDefragmentationContext
6389 {
6390  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6391 public:
6392  VkResult res;
6393  bool mutexLocked;
6394  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6395 
6396  VmaBlockVectorDefragmentationContext(
6397  VmaAllocator hAllocator,
6398  VmaPool hCustomPool, // Optional.
6399  VmaBlockVector* pBlockVector,
6400  uint32_t currFrameIndex,
6401  uint32_t flags);
6402  ~VmaBlockVectorDefragmentationContext();
6403 
6404  VmaPool GetCustomPool() const { return m_hCustomPool; }
6405  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6406  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6407 
6408  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6409  void AddAll() { m_AllAllocations = true; }
6410 
6411  void Begin(bool overlappingMoveSupported);
6412 
6413 private:
6414  const VmaAllocator m_hAllocator;
6415  // Null if not from custom pool.
6416  const VmaPool m_hCustomPool;
6417  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6418  VmaBlockVector* const m_pBlockVector;
6419  const uint32_t m_CurrFrameIndex;
6420  const uint32_t m_AlgorithmFlags;
6421  // Owner of this object.
6422  VmaDefragmentationAlgorithm* m_pAlgorithm;
6423 
6424  struct AllocInfo
6425  {
6426  VmaAllocation hAlloc;
6427  VkBool32* pChanged;
6428  };
6429  // Used between constructor and Begin.
6430  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6431  bool m_AllAllocations;
6432 };
6433 
6434 struct VmaDefragmentationContext_T
6435 {
6436 private:
6437  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6438 public:
6439  VmaDefragmentationContext_T(
6440  VmaAllocator hAllocator,
6441  uint32_t currFrameIndex,
6442  uint32_t flags,
6443  VmaDefragmentationStats* pStats);
6444  ~VmaDefragmentationContext_T();
6445 
6446  void AddPools(uint32_t poolCount, VmaPool* pPools);
6447  void AddAllocations(
6448  uint32_t allocationCount,
6449  VmaAllocation* pAllocations,
6450  VkBool32* pAllocationsChanged);
6451 
6452  /*
6453  Returns:
6454  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6455  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6456  - Negative value if error occured and object can be destroyed immediately.
6457  */
6458  VkResult Defragment(
6459  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6460  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6461  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6462 
6463 private:
6464  const VmaAllocator m_hAllocator;
6465  const uint32_t m_CurrFrameIndex;
6466  const uint32_t m_Flags;
6467  VmaDefragmentationStats* const m_pStats;
6468  // Owner of these objects.
6469  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6470  // Owner of these objects.
6471  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6472 };
6473 
6474 #if VMA_RECORDING_ENABLED
6475 
6476 class VmaRecorder
6477 {
6478 public:
6479  VmaRecorder();
6480  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6481  void WriteConfiguration(
6482  const VkPhysicalDeviceProperties& devProps,
6483  const VkPhysicalDeviceMemoryProperties& memProps,
6484  bool dedicatedAllocationExtensionEnabled);
6485  ~VmaRecorder();
6486 
6487  void RecordCreateAllocator(uint32_t frameIndex);
6488  void RecordDestroyAllocator(uint32_t frameIndex);
6489  void RecordCreatePool(uint32_t frameIndex,
6490  const VmaPoolCreateInfo& createInfo,
6491  VmaPool pool);
6492  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6493  void RecordAllocateMemory(uint32_t frameIndex,
6494  const VkMemoryRequirements& vkMemReq,
6495  const VmaAllocationCreateInfo& createInfo,
6496  VmaAllocation allocation);
6497  void RecordAllocateMemoryPages(uint32_t frameIndex,
6498  const VkMemoryRequirements& vkMemReq,
6499  const VmaAllocationCreateInfo& createInfo,
6500  uint64_t allocationCount,
6501  const VmaAllocation* pAllocations);
6502  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6503  const VkMemoryRequirements& vkMemReq,
6504  bool requiresDedicatedAllocation,
6505  bool prefersDedicatedAllocation,
6506  const VmaAllocationCreateInfo& createInfo,
6507  VmaAllocation allocation);
6508  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6509  const VkMemoryRequirements& vkMemReq,
6510  bool requiresDedicatedAllocation,
6511  bool prefersDedicatedAllocation,
6512  const VmaAllocationCreateInfo& createInfo,
6513  VmaAllocation allocation);
6514  void RecordFreeMemory(uint32_t frameIndex,
6515  VmaAllocation allocation);
6516  void RecordFreeMemoryPages(uint32_t frameIndex,
6517  uint64_t allocationCount,
6518  const VmaAllocation* pAllocations);
6519  void RecordResizeAllocation(
6520  uint32_t frameIndex,
6521  VmaAllocation allocation,
6522  VkDeviceSize newSize);
6523  void RecordSetAllocationUserData(uint32_t frameIndex,
6524  VmaAllocation allocation,
6525  const void* pUserData);
6526  void RecordCreateLostAllocation(uint32_t frameIndex,
6527  VmaAllocation allocation);
6528  void RecordMapMemory(uint32_t frameIndex,
6529  VmaAllocation allocation);
6530  void RecordUnmapMemory(uint32_t frameIndex,
6531  VmaAllocation allocation);
6532  void RecordFlushAllocation(uint32_t frameIndex,
6533  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6534  void RecordInvalidateAllocation(uint32_t frameIndex,
6535  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6536  void RecordCreateBuffer(uint32_t frameIndex,
6537  const VkBufferCreateInfo& bufCreateInfo,
6538  const VmaAllocationCreateInfo& allocCreateInfo,
6539  VmaAllocation allocation);
6540  void RecordCreateImage(uint32_t frameIndex,
6541  const VkImageCreateInfo& imageCreateInfo,
6542  const VmaAllocationCreateInfo& allocCreateInfo,
6543  VmaAllocation allocation);
6544  void RecordDestroyBuffer(uint32_t frameIndex,
6545  VmaAllocation allocation);
6546  void RecordDestroyImage(uint32_t frameIndex,
6547  VmaAllocation allocation);
6548  void RecordTouchAllocation(uint32_t frameIndex,
6549  VmaAllocation allocation);
6550  void RecordGetAllocationInfo(uint32_t frameIndex,
6551  VmaAllocation allocation);
6552  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6553  VmaPool pool);
6554  void RecordDefragmentationBegin(uint32_t frameIndex,
6555  const VmaDefragmentationInfo2& info,
6557  void RecordDefragmentationEnd(uint32_t frameIndex,
6559 
6560 private:
6561  struct CallParams
6562  {
6563  uint32_t threadId;
6564  double time;
6565  };
6566 
6567  class UserDataString
6568  {
6569  public:
6570  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6571  const char* GetString() const { return m_Str; }
6572 
6573  private:
6574  char m_PtrStr[17];
6575  const char* m_Str;
6576  };
6577 
6578  bool m_UseMutex;
6579  VmaRecordFlags m_Flags;
6580  FILE* m_File;
6581  VMA_MUTEX m_FileMutex;
6582  int64_t m_Freq;
6583  int64_t m_StartCounter;
6584 
6585  void GetBasicParams(CallParams& outParams);
6586 
6587  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6588  template<typename T>
6589  void PrintPointerList(uint64_t count, const T* pItems)
6590  {
6591  if(count)
6592  {
6593  fprintf(m_File, "%p", pItems[0]);
6594  for(uint64_t i = 1; i < count; ++i)
6595  {
6596  fprintf(m_File, " %p", pItems[i]);
6597  }
6598  }
6599  }
6600 
6601  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6602  void Flush();
6603 };
6604 
6605 #endif // #if VMA_RECORDING_ENABLED
6606 
6607 /*
6608 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
6609 */
6610 class VmaAllocationObjectAllocator
6611 {
6612  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
6613 public:
6614  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
6615 
6616  VmaAllocation Allocate();
6617  void Free(VmaAllocation hAlloc);
6618 
6619 private:
6620  VMA_MUTEX m_Mutex;
6621  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
6622 };
6623 
6624 // Main allocator object.
6625 struct VmaAllocator_T
6626 {
6627  VMA_CLASS_NO_COPY(VmaAllocator_T)
6628 public:
6629  bool m_UseMutex;
6630  bool m_UseKhrDedicatedAllocation;
6631  VkDevice m_hDevice;
6632  bool m_AllocationCallbacksSpecified;
6633  VkAllocationCallbacks m_AllocationCallbacks;
6634  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6635  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
6636 
6637  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
6638  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6639  VMA_MUTEX m_HeapSizeLimitMutex;
6640 
6641  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6642  VkPhysicalDeviceMemoryProperties m_MemProps;
6643 
6644  // Default pools.
6645  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6646 
6647  // Each vector is sorted by memory (handle value).
6648  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6649  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6650  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6651 
6652  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6653  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6654  ~VmaAllocator_T();
6655 
6656  const VkAllocationCallbacks* GetAllocationCallbacks() const
6657  {
6658  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6659  }
6660  const VmaVulkanFunctions& GetVulkanFunctions() const
6661  {
6662  return m_VulkanFunctions;
6663  }
6664 
6665  VkDeviceSize GetBufferImageGranularity() const
6666  {
6667  return VMA_MAX(
6668  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6669  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6670  }
6671 
6672  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
6673  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6674 
6675  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6676  {
6677  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6678  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6679  }
6680  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
6681  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6682  {
6683  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6684  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6685  }
6686  // Minimum alignment for all allocations in specific memory type.
6687  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6688  {
6689  return IsMemoryTypeNonCoherent(memTypeIndex) ?
6690  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6691  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6692  }
6693 
6694  bool IsIntegratedGpu() const
6695  {
6696  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6697  }
6698 
6699 #if VMA_RECORDING_ENABLED
6700  VmaRecorder* GetRecorder() const { return m_pRecorder; }
6701 #endif
6702 
6703  void GetBufferMemoryRequirements(
6704  VkBuffer hBuffer,
6705  VkMemoryRequirements& memReq,
6706  bool& requiresDedicatedAllocation,
6707  bool& prefersDedicatedAllocation) const;
6708  void GetImageMemoryRequirements(
6709  VkImage hImage,
6710  VkMemoryRequirements& memReq,
6711  bool& requiresDedicatedAllocation,
6712  bool& prefersDedicatedAllocation) const;
6713 
6714  // Main allocation function.
6715  VkResult AllocateMemory(
6716  const VkMemoryRequirements& vkMemReq,
6717  bool requiresDedicatedAllocation,
6718  bool prefersDedicatedAllocation,
6719  VkBuffer dedicatedBuffer,
6720  VkImage dedicatedImage,
6721  const VmaAllocationCreateInfo& createInfo,
6722  VmaSuballocationType suballocType,
6723  size_t allocationCount,
6724  VmaAllocation* pAllocations);
6725 
6726  // Main deallocation function.
6727  void FreeMemory(
6728  size_t allocationCount,
6729  const VmaAllocation* pAllocations);
6730 
6731  VkResult ResizeAllocation(
6732  const VmaAllocation alloc,
6733  VkDeviceSize newSize);
6734 
6735  void CalculateStats(VmaStats* pStats);
6736 
6737 #if VMA_STATS_STRING_ENABLED
6738  void PrintDetailedMap(class VmaJsonWriter& json);
6739 #endif
6740 
6741  VkResult DefragmentationBegin(
6742  const VmaDefragmentationInfo2& info,
6743  VmaDefragmentationStats* pStats,
6744  VmaDefragmentationContext* pContext);
6745  VkResult DefragmentationEnd(
6746  VmaDefragmentationContext context);
6747 
6748  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
6749  bool TouchAllocation(VmaAllocation hAllocation);
6750 
6751  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
6752  void DestroyPool(VmaPool pool);
6753  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
6754 
6755  void SetCurrentFrameIndex(uint32_t frameIndex);
6756  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6757 
6758  void MakePoolAllocationsLost(
6759  VmaPool hPool,
6760  size_t* pLostAllocationCount);
6761  VkResult CheckPoolCorruption(VmaPool hPool);
6762  VkResult CheckCorruption(uint32_t memoryTypeBits);
6763 
6764  void CreateLostAllocation(VmaAllocation* pAllocation);
6765 
6766  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6767  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6768 
6769  VkResult Map(VmaAllocation hAllocation, void** ppData);
6770  void Unmap(VmaAllocation hAllocation);
6771 
6772  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
6773  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
6774 
6775  void FlushOrInvalidateAllocation(
6776  VmaAllocation hAllocation,
6777  VkDeviceSize offset, VkDeviceSize size,
6778  VMA_CACHE_OPERATION op);
6779 
6780  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
6781 
6782  /*
6783  Returns bit mask of memory types that can support defragmentation on GPU as
6784  they support creation of required buffer for copy operations.
6785  */
6786  uint32_t GetGpuDefragmentationMemoryTypeBits();
6787 
6788 private:
6789  VkDeviceSize m_PreferredLargeHeapBlockSize;
6790 
6791  VkPhysicalDevice m_PhysicalDevice;
6792  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6793  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
6794 
6795  VMA_RW_MUTEX m_PoolsMutex;
6796  // Protected by m_PoolsMutex. Sorted by pointer value.
6797  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6798  uint32_t m_NextPoolId;
6799 
6800  VmaVulkanFunctions m_VulkanFunctions;
6801 
6802 #if VMA_RECORDING_ENABLED
6803  VmaRecorder* m_pRecorder;
6804 #endif
6805 
6806  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
6807 
6808  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6809 
6810  VkResult AllocateMemoryOfType(
6811  VkDeviceSize size,
6812  VkDeviceSize alignment,
6813  bool dedicatedAllocation,
6814  VkBuffer dedicatedBuffer,
6815  VkImage dedicatedImage,
6816  const VmaAllocationCreateInfo& createInfo,
6817  uint32_t memTypeIndex,
6818  VmaSuballocationType suballocType,
6819  size_t allocationCount,
6820  VmaAllocation* pAllocations);
6821 
6822  // Helper function only to be used inside AllocateDedicatedMemory.
6823  VkResult AllocateDedicatedMemoryPage(
6824  VkDeviceSize size,
6825  VmaSuballocationType suballocType,
6826  uint32_t memTypeIndex,
6827  const VkMemoryAllocateInfo& allocInfo,
6828  bool map,
6829  bool isUserDataString,
6830  void* pUserData,
6831  VmaAllocation* pAllocation);
6832 
6833  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
6834  VkResult AllocateDedicatedMemory(
6835  VkDeviceSize size,
6836  VmaSuballocationType suballocType,
6837  uint32_t memTypeIndex,
6838  bool map,
6839  bool isUserDataString,
6840  void* pUserData,
6841  VkBuffer dedicatedBuffer,
6842  VkImage dedicatedImage,
6843  size_t allocationCount,
6844  VmaAllocation* pAllocations);
6845 
6846  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
6847  void FreeDedicatedMemory(VmaAllocation allocation);
6848 
6849  /*
6850  Calculates and returns bit mask of memory types that can support defragmentation
6851  on GPU as they support creation of required buffer for copy operations.
6852  */
6853  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
6854 };
6855 
6857 // Memory allocation #2 after VmaAllocator_T definition
6858 
6859 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
6860 {
6861  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6862 }
6863 
6864 static void VmaFree(VmaAllocator hAllocator, void* ptr)
6865 {
6866  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6867 }
6868 
6869 template<typename T>
6870 static T* VmaAllocate(VmaAllocator hAllocator)
6871 {
6872  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
6873 }
6874 
6875 template<typename T>
6876 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
6877 {
6878  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
6879 }
6880 
6881 template<typename T>
6882 static void vma_delete(VmaAllocator hAllocator, T* ptr)
6883 {
6884  if(ptr != VMA_NULL)
6885  {
6886  ptr->~T();
6887  VmaFree(hAllocator, ptr);
6888  }
6889 }
6890 
6891 template<typename T>
6892 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6893 {
6894  if(ptr != VMA_NULL)
6895  {
6896  for(size_t i = count; i--; )
6897  ptr[i].~T();
6898  VmaFree(hAllocator, ptr);
6899  }
6900 }
6901 
6903 // VmaStringBuilder
6904 
6905 #if VMA_STATS_STRING_ENABLED
6906 
6907 class VmaStringBuilder
6908 {
6909 public:
6910  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
6911  size_t GetLength() const { return m_Data.size(); }
6912  const char* GetData() const { return m_Data.data(); }
6913 
6914  void Add(char ch) { m_Data.push_back(ch); }
6915  void Add(const char* pStr);
6916  void AddNewLine() { Add('\n'); }
6917  void AddNumber(uint32_t num);
6918  void AddNumber(uint64_t num);
6919  void AddPointer(const void* ptr);
6920 
6921 private:
6922  VmaVector< char, VmaStlAllocator<char> > m_Data;
6923 };
6924 
6925 void VmaStringBuilder::Add(const char* pStr)
6926 {
6927  const size_t strLen = strlen(pStr);
6928  if(strLen > 0)
6929  {
6930  const size_t oldCount = m_Data.size();
6931  m_Data.resize(oldCount + strLen);
6932  memcpy(m_Data.data() + oldCount, pStr, strLen);
6933  }
6934 }
6935 
6936 void VmaStringBuilder::AddNumber(uint32_t num)
6937 {
6938  char buf[11];
6939  VmaUint32ToStr(buf, sizeof(buf), num);
6940  Add(buf);
6941 }
6942 
6943 void VmaStringBuilder::AddNumber(uint64_t num)
6944 {
6945  char buf[21];
6946  VmaUint64ToStr(buf, sizeof(buf), num);
6947  Add(buf);
6948 }
6949 
6950 void VmaStringBuilder::AddPointer(const void* ptr)
6951 {
6952  char buf[21];
6953  VmaPtrToStr(buf, sizeof(buf), ptr);
6954  Add(buf);
6955 }
6956 
6957 #endif // #if VMA_STATS_STRING_ENABLED
6958 
6960 // VmaJsonWriter
6961 
6962 #if VMA_STATS_STRING_ENABLED
6963 
6964 class VmaJsonWriter
6965 {
6966  VMA_CLASS_NO_COPY(VmaJsonWriter)
6967 public:
6968  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6969  ~VmaJsonWriter();
6970 
6971  void BeginObject(bool singleLine = false);
6972  void EndObject();
6973 
6974  void BeginArray(bool singleLine = false);
6975  void EndArray();
6976 
6977  void WriteString(const char* pStr);
6978  void BeginString(const char* pStr = VMA_NULL);
6979  void ContinueString(const char* pStr);
6980  void ContinueString(uint32_t n);
6981  void ContinueString(uint64_t n);
6982  void ContinueString_Pointer(const void* ptr);
6983  void EndString(const char* pStr = VMA_NULL);
6984 
6985  void WriteNumber(uint32_t n);
6986  void WriteNumber(uint64_t n);
6987  void WriteBool(bool b);
6988  void WriteNull();
6989 
6990 private:
6991  static const char* const INDENT;
6992 
6993  enum COLLECTION_TYPE
6994  {
6995  COLLECTION_TYPE_OBJECT,
6996  COLLECTION_TYPE_ARRAY,
6997  };
6998  struct StackItem
6999  {
7000  COLLECTION_TYPE type;
7001  uint32_t valueCount;
7002  bool singleLineMode;
7003  };
7004 
7005  VmaStringBuilder& m_SB;
7006  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
7007  bool m_InsideString;
7008 
7009  void BeginValue(bool isString);
7010  void WriteIndent(bool oneLess = false);
7011 };
7012 
7013 const char* const VmaJsonWriter::INDENT = " ";
7014 
7015 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
7016  m_SB(sb),
7017  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
7018  m_InsideString(false)
7019 {
7020 }
7021 
7022 VmaJsonWriter::~VmaJsonWriter()
7023 {
7024  VMA_ASSERT(!m_InsideString);
7025  VMA_ASSERT(m_Stack.empty());
7026 }
7027 
7028 void VmaJsonWriter::BeginObject(bool singleLine)
7029 {
7030  VMA_ASSERT(!m_InsideString);
7031 
7032  BeginValue(false);
7033  m_SB.Add('{');
7034 
7035  StackItem item;
7036  item.type = COLLECTION_TYPE_OBJECT;
7037  item.valueCount = 0;
7038  item.singleLineMode = singleLine;
7039  m_Stack.push_back(item);
7040 }
7041 
7042 void VmaJsonWriter::EndObject()
7043 {
7044  VMA_ASSERT(!m_InsideString);
7045 
7046  WriteIndent(true);
7047  m_SB.Add('}');
7048 
7049  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
7050  m_Stack.pop_back();
7051 }
7052 
7053 void VmaJsonWriter::BeginArray(bool singleLine)
7054 {
7055  VMA_ASSERT(!m_InsideString);
7056 
7057  BeginValue(false);
7058  m_SB.Add('[');
7059 
7060  StackItem item;
7061  item.type = COLLECTION_TYPE_ARRAY;
7062  item.valueCount = 0;
7063  item.singleLineMode = singleLine;
7064  m_Stack.push_back(item);
7065 }
7066 
7067 void VmaJsonWriter::EndArray()
7068 {
7069  VMA_ASSERT(!m_InsideString);
7070 
7071  WriteIndent(true);
7072  m_SB.Add(']');
7073 
7074  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
7075  m_Stack.pop_back();
7076 }
7077 
7078 void VmaJsonWriter::WriteString(const char* pStr)
7079 {
7080  BeginString(pStr);
7081  EndString();
7082 }
7083 
7084 void VmaJsonWriter::BeginString(const char* pStr)
7085 {
7086  VMA_ASSERT(!m_InsideString);
7087 
7088  BeginValue(true);
7089  m_SB.Add('"');
7090  m_InsideString = true;
7091  if(pStr != VMA_NULL && pStr[0] != '\0')
7092  {
7093  ContinueString(pStr);
7094  }
7095 }
7096 
7097 void VmaJsonWriter::ContinueString(const char* pStr)
7098 {
7099  VMA_ASSERT(m_InsideString);
7100 
7101  const size_t strLen = strlen(pStr);
7102  for(size_t i = 0; i < strLen; ++i)
7103  {
7104  char ch = pStr[i];
7105  if(ch == '\\')
7106  {
7107  m_SB.Add("\\\\");
7108  }
7109  else if(ch == '"')
7110  {
7111  m_SB.Add("\\\"");
7112  }
7113  else if(ch >= 32)
7114  {
7115  m_SB.Add(ch);
7116  }
7117  else switch(ch)
7118  {
7119  case '\b':
7120  m_SB.Add("\\b");
7121  break;
7122  case '\f':
7123  m_SB.Add("\\f");
7124  break;
7125  case '\n':
7126  m_SB.Add("\\n");
7127  break;
7128  case '\r':
7129  m_SB.Add("\\r");
7130  break;
7131  case '\t':
7132  m_SB.Add("\\t");
7133  break;
7134  default:
7135  VMA_ASSERT(0 && "Character not currently supported.");
7136  break;
7137  }
7138  }
7139 }
7140 
7141 void VmaJsonWriter::ContinueString(uint32_t n)
7142 {
7143  VMA_ASSERT(m_InsideString);
7144  m_SB.AddNumber(n);
7145 }
7146 
7147 void VmaJsonWriter::ContinueString(uint64_t n)
7148 {
7149  VMA_ASSERT(m_InsideString);
7150  m_SB.AddNumber(n);
7151 }
7152 
7153 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7154 {
7155  VMA_ASSERT(m_InsideString);
7156  m_SB.AddPointer(ptr);
7157 }
7158 
7159 void VmaJsonWriter::EndString(const char* pStr)
7160 {
7161  VMA_ASSERT(m_InsideString);
7162  if(pStr != VMA_NULL && pStr[0] != '\0')
7163  {
7164  ContinueString(pStr);
7165  }
7166  m_SB.Add('"');
7167  m_InsideString = false;
7168 }
7169 
7170 void VmaJsonWriter::WriteNumber(uint32_t n)
7171 {
7172  VMA_ASSERT(!m_InsideString);
7173  BeginValue(false);
7174  m_SB.AddNumber(n);
7175 }
7176 
7177 void VmaJsonWriter::WriteNumber(uint64_t n)
7178 {
7179  VMA_ASSERT(!m_InsideString);
7180  BeginValue(false);
7181  m_SB.AddNumber(n);
7182 }
7183 
7184 void VmaJsonWriter::WriteBool(bool b)
7185 {
7186  VMA_ASSERT(!m_InsideString);
7187  BeginValue(false);
7188  m_SB.Add(b ? "true" : "false");
7189 }
7190 
7191 void VmaJsonWriter::WriteNull()
7192 {
7193  VMA_ASSERT(!m_InsideString);
7194  BeginValue(false);
7195  m_SB.Add("null");
7196 }
7197 
7198 void VmaJsonWriter::BeginValue(bool isString)
7199 {
7200  if(!m_Stack.empty())
7201  {
7202  StackItem& currItem = m_Stack.back();
7203  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7204  currItem.valueCount % 2 == 0)
7205  {
7206  VMA_ASSERT(isString);
7207  }
7208 
7209  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7210  currItem.valueCount % 2 != 0)
7211  {
7212  m_SB.Add(": ");
7213  }
7214  else if(currItem.valueCount > 0)
7215  {
7216  m_SB.Add(", ");
7217  WriteIndent();
7218  }
7219  else
7220  {
7221  WriteIndent();
7222  }
7223  ++currItem.valueCount;
7224  }
7225 }
7226 
7227 void VmaJsonWriter::WriteIndent(bool oneLess)
7228 {
7229  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7230  {
7231  m_SB.AddNewLine();
7232 
7233  size_t count = m_Stack.size();
7234  if(count > 0 && oneLess)
7235  {
7236  --count;
7237  }
7238  for(size_t i = 0; i < count; ++i)
7239  {
7240  m_SB.Add(INDENT);
7241  }
7242  }
7243 }
7244 
7245 #endif // #if VMA_STATS_STRING_ENABLED
7246 
7248 
7249 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7250 {
7251  if(IsUserDataString())
7252  {
7253  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7254 
7255  FreeUserDataString(hAllocator);
7256 
7257  if(pUserData != VMA_NULL)
7258  {
7259  const char* const newStrSrc = (char*)pUserData;
7260  const size_t newStrLen = strlen(newStrSrc);
7261  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
7262  memcpy(newStrDst, newStrSrc, newStrLen + 1);
7263  m_pUserData = newStrDst;
7264  }
7265  }
7266  else
7267  {
7268  m_pUserData = pUserData;
7269  }
7270 }
7271 
7272 void VmaAllocation_T::ChangeBlockAllocation(
7273  VmaAllocator hAllocator,
7274  VmaDeviceMemoryBlock* block,
7275  VkDeviceSize offset)
7276 {
7277  VMA_ASSERT(block != VMA_NULL);
7278  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7279 
7280  // Move mapping reference counter from old block to new block.
7281  if(block != m_BlockAllocation.m_Block)
7282  {
7283  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7284  if(IsPersistentMap())
7285  ++mapRefCount;
7286  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7287  block->Map(hAllocator, mapRefCount, VMA_NULL);
7288  }
7289 
7290  m_BlockAllocation.m_Block = block;
7291  m_BlockAllocation.m_Offset = offset;
7292 }
7293 
7294 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
7295 {
7296  VMA_ASSERT(newSize > 0);
7297  m_Size = newSize;
7298 }
7299 
7300 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7301 {
7302  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7303  m_BlockAllocation.m_Offset = newOffset;
7304 }
7305 
7306 VkDeviceSize VmaAllocation_T::GetOffset() const
7307 {
7308  switch(m_Type)
7309  {
7310  case ALLOCATION_TYPE_BLOCK:
7311  return m_BlockAllocation.m_Offset;
7312  case ALLOCATION_TYPE_DEDICATED:
7313  return 0;
7314  default:
7315  VMA_ASSERT(0);
7316  return 0;
7317  }
7318 }
7319 
7320 VkDeviceMemory VmaAllocation_T::GetMemory() const
7321 {
7322  switch(m_Type)
7323  {
7324  case ALLOCATION_TYPE_BLOCK:
7325  return m_BlockAllocation.m_Block->GetDeviceMemory();
7326  case ALLOCATION_TYPE_DEDICATED:
7327  return m_DedicatedAllocation.m_hMemory;
7328  default:
7329  VMA_ASSERT(0);
7330  return VK_NULL_HANDLE;
7331  }
7332 }
7333 
7334 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7335 {
7336  switch(m_Type)
7337  {
7338  case ALLOCATION_TYPE_BLOCK:
7339  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7340  case ALLOCATION_TYPE_DEDICATED:
7341  return m_DedicatedAllocation.m_MemoryTypeIndex;
7342  default:
7343  VMA_ASSERT(0);
7344  return UINT32_MAX;
7345  }
7346 }
7347 
7348 void* VmaAllocation_T::GetMappedData() const
7349 {
7350  switch(m_Type)
7351  {
7352  case ALLOCATION_TYPE_BLOCK:
7353  if(m_MapCount != 0)
7354  {
7355  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7356  VMA_ASSERT(pBlockData != VMA_NULL);
7357  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7358  }
7359  else
7360  {
7361  return VMA_NULL;
7362  }
7363  break;
7364  case ALLOCATION_TYPE_DEDICATED:
7365  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7366  return m_DedicatedAllocation.m_pMappedData;
7367  default:
7368  VMA_ASSERT(0);
7369  return VMA_NULL;
7370  }
7371 }
7372 
7373 bool VmaAllocation_T::CanBecomeLost() const
7374 {
7375  switch(m_Type)
7376  {
7377  case ALLOCATION_TYPE_BLOCK:
7378  return m_BlockAllocation.m_CanBecomeLost;
7379  case ALLOCATION_TYPE_DEDICATED:
7380  return false;
7381  default:
7382  VMA_ASSERT(0);
7383  return false;
7384  }
7385 }
7386 
7387 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7388 {
7389  VMA_ASSERT(CanBecomeLost());
7390 
7391  /*
7392  Warning: This is a carefully designed algorithm.
7393  Do not modify unless you really know what you're doing :)
7394  */
7395  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7396  for(;;)
7397  {
7398  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7399  {
7400  VMA_ASSERT(0);
7401  return false;
7402  }
7403  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7404  {
7405  return false;
7406  }
7407  else // Last use time earlier than current time.
7408  {
7409  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7410  {
7411  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7412  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7413  return true;
7414  }
7415  }
7416  }
7417 }
7418 
7419 #if VMA_STATS_STRING_ENABLED
7420 
7421 // Correspond to values of enum VmaSuballocationType.
7422 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7423  "FREE",
7424  "UNKNOWN",
7425  "BUFFER",
7426  "IMAGE_UNKNOWN",
7427  "IMAGE_LINEAR",
7428  "IMAGE_OPTIMAL",
7429 };
7430 
7431 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7432 {
7433  json.WriteString("Type");
7434  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7435 
7436  json.WriteString("Size");
7437  json.WriteNumber(m_Size);
7438 
7439  if(m_pUserData != VMA_NULL)
7440  {
7441  json.WriteString("UserData");
7442  if(IsUserDataString())
7443  {
7444  json.WriteString((const char*)m_pUserData);
7445  }
7446  else
7447  {
7448  json.BeginString();
7449  json.ContinueString_Pointer(m_pUserData);
7450  json.EndString();
7451  }
7452  }
7453 
7454  json.WriteString("CreationFrameIndex");
7455  json.WriteNumber(m_CreationFrameIndex);
7456 
7457  json.WriteString("LastUseFrameIndex");
7458  json.WriteNumber(GetLastUseFrameIndex());
7459 
7460  if(m_BufferImageUsage != 0)
7461  {
7462  json.WriteString("Usage");
7463  json.WriteNumber(m_BufferImageUsage);
7464  }
7465 }
7466 
7467 #endif
7468 
7469 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7470 {
7471  VMA_ASSERT(IsUserDataString());
7472  if(m_pUserData != VMA_NULL)
7473  {
7474  char* const oldStr = (char*)m_pUserData;
7475  const size_t oldStrLen = strlen(oldStr);
7476  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7477  m_pUserData = VMA_NULL;
7478  }
7479 }
7480 
7481 void VmaAllocation_T::BlockAllocMap()
7482 {
7483  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7484 
7485  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7486  {
7487  ++m_MapCount;
7488  }
7489  else
7490  {
7491  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7492  }
7493 }
7494 
7495 void VmaAllocation_T::BlockAllocUnmap()
7496 {
7497  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7498 
7499  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7500  {
7501  --m_MapCount;
7502  }
7503  else
7504  {
7505  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7506  }
7507 }
7508 
7509 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7510 {
7511  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7512 
7513  if(m_MapCount != 0)
7514  {
7515  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7516  {
7517  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7518  *ppData = m_DedicatedAllocation.m_pMappedData;
7519  ++m_MapCount;
7520  return VK_SUCCESS;
7521  }
7522  else
7523  {
7524  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7525  return VK_ERROR_MEMORY_MAP_FAILED;
7526  }
7527  }
7528  else
7529  {
7530  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7531  hAllocator->m_hDevice,
7532  m_DedicatedAllocation.m_hMemory,
7533  0, // offset
7534  VK_WHOLE_SIZE,
7535  0, // flags
7536  ppData);
7537  if(result == VK_SUCCESS)
7538  {
7539  m_DedicatedAllocation.m_pMappedData = *ppData;
7540  m_MapCount = 1;
7541  }
7542  return result;
7543  }
7544 }
7545 
7546 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7547 {
7548  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7549 
7550  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7551  {
7552  --m_MapCount;
7553  if(m_MapCount == 0)
7554  {
7555  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7556  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7557  hAllocator->m_hDevice,
7558  m_DedicatedAllocation.m_hMemory);
7559  }
7560  }
7561  else
7562  {
7563  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7564  }
7565 }
7566 
7567 #if VMA_STATS_STRING_ENABLED
7568 
7569 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7570 {
7571  json.BeginObject();
7572 
7573  json.WriteString("Blocks");
7574  json.WriteNumber(stat.blockCount);
7575 
7576  json.WriteString("Allocations");
7577  json.WriteNumber(stat.allocationCount);
7578 
7579  json.WriteString("UnusedRanges");
7580  json.WriteNumber(stat.unusedRangeCount);
7581 
7582  json.WriteString("UsedBytes");
7583  json.WriteNumber(stat.usedBytes);
7584 
7585  json.WriteString("UnusedBytes");
7586  json.WriteNumber(stat.unusedBytes);
7587 
7588  if(stat.allocationCount > 1)
7589  {
7590  json.WriteString("AllocationSize");
7591  json.BeginObject(true);
7592  json.WriteString("Min");
7593  json.WriteNumber(stat.allocationSizeMin);
7594  json.WriteString("Avg");
7595  json.WriteNumber(stat.allocationSizeAvg);
7596  json.WriteString("Max");
7597  json.WriteNumber(stat.allocationSizeMax);
7598  json.EndObject();
7599  }
7600 
7601  if(stat.unusedRangeCount > 1)
7602  {
7603  json.WriteString("UnusedRangeSize");
7604  json.BeginObject(true);
7605  json.WriteString("Min");
7606  json.WriteNumber(stat.unusedRangeSizeMin);
7607  json.WriteString("Avg");
7608  json.WriteNumber(stat.unusedRangeSizeAvg);
7609  json.WriteString("Max");
7610  json.WriteNumber(stat.unusedRangeSizeMax);
7611  json.EndObject();
7612  }
7613 
7614  json.EndObject();
7615 }
7616 
7617 #endif // #if VMA_STATS_STRING_ENABLED
7618 
7619 struct VmaSuballocationItemSizeLess
7620 {
7621  bool operator()(
7622  const VmaSuballocationList::iterator lhs,
7623  const VmaSuballocationList::iterator rhs) const
7624  {
7625  return lhs->size < rhs->size;
7626  }
7627  bool operator()(
7628  const VmaSuballocationList::iterator lhs,
7629  VkDeviceSize rhsSize) const
7630  {
7631  return lhs->size < rhsSize;
7632  }
7633 };
7634 
7635 
7637 // class VmaBlockMetadata
7638 
7639 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7640  m_Size(0),
7641  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7642 {
7643 }
7644 
7645 #if VMA_STATS_STRING_ENABLED
7646 
7647 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7648  VkDeviceSize unusedBytes,
7649  size_t allocationCount,
7650  size_t unusedRangeCount) const
7651 {
7652  json.BeginObject();
7653 
7654  json.WriteString("TotalBytes");
7655  json.WriteNumber(GetSize());
7656 
7657  json.WriteString("UnusedBytes");
7658  json.WriteNumber(unusedBytes);
7659 
7660  json.WriteString("Allocations");
7661  json.WriteNumber((uint64_t)allocationCount);
7662 
7663  json.WriteString("UnusedRanges");
7664  json.WriteNumber((uint64_t)unusedRangeCount);
7665 
7666  json.WriteString("Suballocations");
7667  json.BeginArray();
7668 }
7669 
7670 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
7671  VkDeviceSize offset,
7672  VmaAllocation hAllocation) const
7673 {
7674  json.BeginObject(true);
7675 
7676  json.WriteString("Offset");
7677  json.WriteNumber(offset);
7678 
7679  hAllocation->PrintParameters(json);
7680 
7681  json.EndObject();
7682 }
7683 
7684 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
7685  VkDeviceSize offset,
7686  VkDeviceSize size) const
7687 {
7688  json.BeginObject(true);
7689 
7690  json.WriteString("Offset");
7691  json.WriteNumber(offset);
7692 
7693  json.WriteString("Type");
7694  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7695 
7696  json.WriteString("Size");
7697  json.WriteNumber(size);
7698 
7699  json.EndObject();
7700 }
7701 
7702 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
7703 {
7704  json.EndArray();
7705  json.EndObject();
7706 }
7707 
7708 #endif // #if VMA_STATS_STRING_ENABLED
7709 
7711 // class VmaBlockMetadata_Generic
7712 
7713 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
7714  VmaBlockMetadata(hAllocator),
7715  m_FreeCount(0),
7716  m_SumFreeSize(0),
7717  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7718  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7719 {
7720 }
7721 
7722 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7723 {
7724 }
7725 
7726 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7727 {
7728  VmaBlockMetadata::Init(size);
7729 
7730  m_FreeCount = 1;
7731  m_SumFreeSize = size;
7732 
7733  VmaSuballocation suballoc = {};
7734  suballoc.offset = 0;
7735  suballoc.size = size;
7736  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7737  suballoc.hAllocation = VK_NULL_HANDLE;
7738 
7739  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7740  m_Suballocations.push_back(suballoc);
7741  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7742  --suballocItem;
7743  m_FreeSuballocationsBySize.push_back(suballocItem);
7744 }
7745 
7746 bool VmaBlockMetadata_Generic::Validate() const
7747 {
7748  VMA_VALIDATE(!m_Suballocations.empty());
7749 
7750  // Expected offset of new suballocation as calculated from previous ones.
7751  VkDeviceSize calculatedOffset = 0;
7752  // Expected number of free suballocations as calculated from traversing their list.
7753  uint32_t calculatedFreeCount = 0;
7754  // Expected sum size of free suballocations as calculated from traversing their list.
7755  VkDeviceSize calculatedSumFreeSize = 0;
7756  // Expected number of free suballocations that should be registered in
7757  // m_FreeSuballocationsBySize calculated from traversing their list.
7758  size_t freeSuballocationsToRegister = 0;
7759  // True if previous visited suballocation was free.
7760  bool prevFree = false;
7761 
7762  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7763  suballocItem != m_Suballocations.cend();
7764  ++suballocItem)
7765  {
7766  const VmaSuballocation& subAlloc = *suballocItem;
7767 
7768  // Actual offset of this suballocation doesn't match expected one.
7769  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7770 
7771  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7772  // Two adjacent free suballocations are invalid. They should be merged.
7773  VMA_VALIDATE(!prevFree || !currFree);
7774 
7775  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7776 
7777  if(currFree)
7778  {
7779  calculatedSumFreeSize += subAlloc.size;
7780  ++calculatedFreeCount;
7781  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7782  {
7783  ++freeSuballocationsToRegister;
7784  }
7785 
7786  // Margin required between allocations - every free space must be at least that large.
7787  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7788  }
7789  else
7790  {
7791  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7792  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7793 
7794  // Margin required between allocations - previous allocation must be free.
7795  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7796  }
7797 
7798  calculatedOffset += subAlloc.size;
7799  prevFree = currFree;
7800  }
7801 
7802  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
7803  // match expected one.
7804  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7805 
7806  VkDeviceSize lastSize = 0;
7807  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7808  {
7809  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7810 
7811  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
7812  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7813  // They must be sorted by size ascending.
7814  VMA_VALIDATE(suballocItem->size >= lastSize);
7815 
7816  lastSize = suballocItem->size;
7817  }
7818 
7819  // Check if totals match calculacted values.
7820  VMA_VALIDATE(ValidateFreeSuballocationList());
7821  VMA_VALIDATE(calculatedOffset == GetSize());
7822  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7823  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7824 
7825  return true;
7826 }
7827 
7828 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
7829 {
7830  if(!m_FreeSuballocationsBySize.empty())
7831  {
7832  return m_FreeSuballocationsBySize.back()->size;
7833  }
7834  else
7835  {
7836  return 0;
7837  }
7838 }
7839 
7840 bool VmaBlockMetadata_Generic::IsEmpty() const
7841 {
7842  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7843 }
7844 
7845 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7846 {
7847  outInfo.blockCount = 1;
7848 
7849  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7850  outInfo.allocationCount = rangeCount - m_FreeCount;
7851  outInfo.unusedRangeCount = m_FreeCount;
7852 
7853  outInfo.unusedBytes = m_SumFreeSize;
7854  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
7855 
7856  outInfo.allocationSizeMin = UINT64_MAX;
7857  outInfo.allocationSizeMax = 0;
7858  outInfo.unusedRangeSizeMin = UINT64_MAX;
7859  outInfo.unusedRangeSizeMax = 0;
7860 
7861  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7862  suballocItem != m_Suballocations.cend();
7863  ++suballocItem)
7864  {
7865  const VmaSuballocation& suballoc = *suballocItem;
7866  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7867  {
7868  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7869  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
7870  }
7871  else
7872  {
7873  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
7874  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
7875  }
7876  }
7877 }
7878 
7879 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
7880 {
7881  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7882 
7883  inoutStats.size += GetSize();
7884  inoutStats.unusedSize += m_SumFreeSize;
7885  inoutStats.allocationCount += rangeCount - m_FreeCount;
7886  inoutStats.unusedRangeCount += m_FreeCount;
7887  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
7888 }
7889 
7890 #if VMA_STATS_STRING_ENABLED
7891 
7892 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7893 {
7894  PrintDetailedMap_Begin(json,
7895  m_SumFreeSize, // unusedBytes
7896  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7897  m_FreeCount); // unusedRangeCount
7898 
7899  size_t i = 0;
7900  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7901  suballocItem != m_Suballocations.cend();
7902  ++suballocItem, ++i)
7903  {
7904  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7905  {
7906  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
7907  }
7908  else
7909  {
7910  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
7911  }
7912  }
7913 
7914  PrintDetailedMap_End(json);
7915 }
7916 
7917 #endif // #if VMA_STATS_STRING_ENABLED
7918 
7919 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7920  uint32_t currentFrameIndex,
7921  uint32_t frameInUseCount,
7922  VkDeviceSize bufferImageGranularity,
7923  VkDeviceSize allocSize,
7924  VkDeviceSize allocAlignment,
7925  bool upperAddress,
7926  VmaSuballocationType allocType,
7927  bool canMakeOtherLost,
7928  uint32_t strategy,
7929  VmaAllocationRequest* pAllocationRequest)
7930 {
7931  VMA_ASSERT(allocSize > 0);
7932  VMA_ASSERT(!upperAddress);
7933  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7934  VMA_ASSERT(pAllocationRequest != VMA_NULL);
7935  VMA_HEAVY_ASSERT(Validate());
7936 
7937  pAllocationRequest->type = VmaAllocationRequestType::Normal;
7938 
7939  // There is not enough total free space in this block to fullfill the request: Early return.
7940  if(canMakeOtherLost == false &&
7941  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
7942  {
7943  return false;
7944  }
7945 
7946  // New algorithm, efficiently searching freeSuballocationsBySize.
7947  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7948  if(freeSuballocCount > 0)
7949  {
7951  {
7952  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7953  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7954  m_FreeSuballocationsBySize.data(),
7955  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7956  allocSize + 2 * VMA_DEBUG_MARGIN,
7957  VmaSuballocationItemSizeLess());
7958  size_t index = it - m_FreeSuballocationsBySize.data();
7959  for(; index < freeSuballocCount; ++index)
7960  {
7961  if(CheckAllocation(
7962  currentFrameIndex,
7963  frameInUseCount,
7964  bufferImageGranularity,
7965  allocSize,
7966  allocAlignment,
7967  allocType,
7968  m_FreeSuballocationsBySize[index],
7969  false, // canMakeOtherLost
7970  &pAllocationRequest->offset,
7971  &pAllocationRequest->itemsToMakeLostCount,
7972  &pAllocationRequest->sumFreeSize,
7973  &pAllocationRequest->sumItemSize))
7974  {
7975  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7976  return true;
7977  }
7978  }
7979  }
7980  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7981  {
7982  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7983  it != m_Suballocations.end();
7984  ++it)
7985  {
7986  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
7987  currentFrameIndex,
7988  frameInUseCount,
7989  bufferImageGranularity,
7990  allocSize,
7991  allocAlignment,
7992  allocType,
7993  it,
7994  false, // canMakeOtherLost
7995  &pAllocationRequest->offset,
7996  &pAllocationRequest->itemsToMakeLostCount,
7997  &pAllocationRequest->sumFreeSize,
7998  &pAllocationRequest->sumItemSize))
7999  {
8000  pAllocationRequest->item = it;
8001  return true;
8002  }
8003  }
8004  }
8005  else // WORST_FIT, FIRST_FIT
8006  {
8007  // Search staring from biggest suballocations.
8008  for(size_t index = freeSuballocCount; index--; )
8009  {
8010  if(CheckAllocation(
8011  currentFrameIndex,
8012  frameInUseCount,
8013  bufferImageGranularity,
8014  allocSize,
8015  allocAlignment,
8016  allocType,
8017  m_FreeSuballocationsBySize[index],
8018  false, // canMakeOtherLost
8019  &pAllocationRequest->offset,
8020  &pAllocationRequest->itemsToMakeLostCount,
8021  &pAllocationRequest->sumFreeSize,
8022  &pAllocationRequest->sumItemSize))
8023  {
8024  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8025  return true;
8026  }
8027  }
8028  }
8029  }
8030 
8031  if(canMakeOtherLost)
8032  {
8033  // Brute-force algorithm. TODO: Come up with something better.
8034 
8035  bool found = false;
8036  VmaAllocationRequest tmpAllocRequest = {};
8037  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
8038  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
8039  suballocIt != m_Suballocations.end();
8040  ++suballocIt)
8041  {
8042  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
8043  suballocIt->hAllocation->CanBecomeLost())
8044  {
8045  if(CheckAllocation(
8046  currentFrameIndex,
8047  frameInUseCount,
8048  bufferImageGranularity,
8049  allocSize,
8050  allocAlignment,
8051  allocType,
8052  suballocIt,
8053  canMakeOtherLost,
8054  &tmpAllocRequest.offset,
8055  &tmpAllocRequest.itemsToMakeLostCount,
8056  &tmpAllocRequest.sumFreeSize,
8057  &tmpAllocRequest.sumItemSize))
8058  {
8060  {
8061  *pAllocationRequest = tmpAllocRequest;
8062  pAllocationRequest->item = suballocIt;
8063  break;
8064  }
8065  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
8066  {
8067  *pAllocationRequest = tmpAllocRequest;
8068  pAllocationRequest->item = suballocIt;
8069  found = true;
8070  }
8071  }
8072  }
8073  }
8074 
8075  return found;
8076  }
8077 
8078  return false;
8079 }
8080 
8081 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
8082  uint32_t currentFrameIndex,
8083  uint32_t frameInUseCount,
8084  VmaAllocationRequest* pAllocationRequest)
8085 {
8086  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
8087 
8088  while(pAllocationRequest->itemsToMakeLostCount > 0)
8089  {
8090  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
8091  {
8092  ++pAllocationRequest->item;
8093  }
8094  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8095  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
8096  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
8097  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8098  {
8099  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
8100  --pAllocationRequest->itemsToMakeLostCount;
8101  }
8102  else
8103  {
8104  return false;
8105  }
8106  }
8107 
8108  VMA_HEAVY_ASSERT(Validate());
8109  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8110  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8111 
8112  return true;
8113 }
8114 
8115 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8116 {
8117  uint32_t lostAllocationCount = 0;
8118  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8119  it != m_Suballocations.end();
8120  ++it)
8121  {
8122  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8123  it->hAllocation->CanBecomeLost() &&
8124  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8125  {
8126  it = FreeSuballocation(it);
8127  ++lostAllocationCount;
8128  }
8129  }
8130  return lostAllocationCount;
8131 }
8132 
8133 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8134 {
8135  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8136  it != m_Suballocations.end();
8137  ++it)
8138  {
8139  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8140  {
8141  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8142  {
8143  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8144  return VK_ERROR_VALIDATION_FAILED_EXT;
8145  }
8146  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8147  {
8148  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8149  return VK_ERROR_VALIDATION_FAILED_EXT;
8150  }
8151  }
8152  }
8153 
8154  return VK_SUCCESS;
8155 }
8156 
8157 void VmaBlockMetadata_Generic::Alloc(
8158  const VmaAllocationRequest& request,
8159  VmaSuballocationType type,
8160  VkDeviceSize allocSize,
8161  VmaAllocation hAllocation)
8162 {
8163  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
8164  VMA_ASSERT(request.item != m_Suballocations.end());
8165  VmaSuballocation& suballoc = *request.item;
8166  // Given suballocation is a free block.
8167  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8168  // Given offset is inside this suballocation.
8169  VMA_ASSERT(request.offset >= suballoc.offset);
8170  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8171  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8172  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8173 
8174  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8175  // it to become used.
8176  UnregisterFreeSuballocation(request.item);
8177 
8178  suballoc.offset = request.offset;
8179  suballoc.size = allocSize;
8180  suballoc.type = type;
8181  suballoc.hAllocation = hAllocation;
8182 
8183  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8184  if(paddingEnd)
8185  {
8186  VmaSuballocation paddingSuballoc = {};
8187  paddingSuballoc.offset = request.offset + allocSize;
8188  paddingSuballoc.size = paddingEnd;
8189  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8190  VmaSuballocationList::iterator next = request.item;
8191  ++next;
8192  const VmaSuballocationList::iterator paddingEndItem =
8193  m_Suballocations.insert(next, paddingSuballoc);
8194  RegisterFreeSuballocation(paddingEndItem);
8195  }
8196 
8197  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8198  if(paddingBegin)
8199  {
8200  VmaSuballocation paddingSuballoc = {};
8201  paddingSuballoc.offset = request.offset - paddingBegin;
8202  paddingSuballoc.size = paddingBegin;
8203  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8204  const VmaSuballocationList::iterator paddingBeginItem =
8205  m_Suballocations.insert(request.item, paddingSuballoc);
8206  RegisterFreeSuballocation(paddingBeginItem);
8207  }
8208 
8209  // Update totals.
8210  m_FreeCount = m_FreeCount - 1;
8211  if(paddingBegin > 0)
8212  {
8213  ++m_FreeCount;
8214  }
8215  if(paddingEnd > 0)
8216  {
8217  ++m_FreeCount;
8218  }
8219  m_SumFreeSize -= allocSize;
8220 }
8221 
8222 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8223 {
8224  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8225  suballocItem != m_Suballocations.end();
8226  ++suballocItem)
8227  {
8228  VmaSuballocation& suballoc = *suballocItem;
8229  if(suballoc.hAllocation == allocation)
8230  {
8231  FreeSuballocation(suballocItem);
8232  VMA_HEAVY_ASSERT(Validate());
8233  return;
8234  }
8235  }
8236  VMA_ASSERT(0 && "Not found!");
8237 }
8238 
8239 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8240 {
8241  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8242  suballocItem != m_Suballocations.end();
8243  ++suballocItem)
8244  {
8245  VmaSuballocation& suballoc = *suballocItem;
8246  if(suballoc.offset == offset)
8247  {
8248  FreeSuballocation(suballocItem);
8249  return;
8250  }
8251  }
8252  VMA_ASSERT(0 && "Not found!");
8253 }
8254 
8255 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
8256 {
8257  typedef VmaSuballocationList::iterator iter_type;
8258  for(iter_type suballocItem = m_Suballocations.begin();
8259  suballocItem != m_Suballocations.end();
8260  ++suballocItem)
8261  {
8262  VmaSuballocation& suballoc = *suballocItem;
8263  if(suballoc.hAllocation == alloc)
8264  {
8265  iter_type nextItem = suballocItem;
8266  ++nextItem;
8267 
8268  // Should have been ensured on higher level.
8269  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
8270 
8271  // Shrinking.
8272  if(newSize < alloc->GetSize())
8273  {
8274  const VkDeviceSize sizeDiff = suballoc.size - newSize;
8275 
8276  // There is next item.
8277  if(nextItem != m_Suballocations.end())
8278  {
8279  // Next item is free.
8280  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8281  {
8282  // Grow this next item backward.
8283  UnregisterFreeSuballocation(nextItem);
8284  nextItem->offset -= sizeDiff;
8285  nextItem->size += sizeDiff;
8286  RegisterFreeSuballocation(nextItem);
8287  }
8288  // Next item is not free.
8289  else
8290  {
8291  // Create free item after current one.
8292  VmaSuballocation newFreeSuballoc;
8293  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8294  newFreeSuballoc.offset = suballoc.offset + newSize;
8295  newFreeSuballoc.size = sizeDiff;
8296  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8297  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
8298  RegisterFreeSuballocation(newFreeSuballocIt);
8299 
8300  ++m_FreeCount;
8301  }
8302  }
8303  // This is the last item.
8304  else
8305  {
8306  // Create free item at the end.
8307  VmaSuballocation newFreeSuballoc;
8308  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8309  newFreeSuballoc.offset = suballoc.offset + newSize;
8310  newFreeSuballoc.size = sizeDiff;
8311  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8312  m_Suballocations.push_back(newFreeSuballoc);
8313 
8314  iter_type newFreeSuballocIt = m_Suballocations.end();
8315  RegisterFreeSuballocation(--newFreeSuballocIt);
8316 
8317  ++m_FreeCount;
8318  }
8319 
8320  suballoc.size = newSize;
8321  m_SumFreeSize += sizeDiff;
8322  }
8323  // Growing.
8324  else
8325  {
8326  const VkDeviceSize sizeDiff = newSize - suballoc.size;
8327 
8328  // There is next item.
8329  if(nextItem != m_Suballocations.end())
8330  {
8331  // Next item is free.
8332  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8333  {
8334  // There is not enough free space, including margin.
8335  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
8336  {
8337  return false;
8338  }
8339 
8340  // There is more free space than required.
8341  if(nextItem->size > sizeDiff)
8342  {
8343  // Move and shrink this next item.
8344  UnregisterFreeSuballocation(nextItem);
8345  nextItem->offset += sizeDiff;
8346  nextItem->size -= sizeDiff;
8347  RegisterFreeSuballocation(nextItem);
8348  }
8349  // There is exactly the amount of free space required.
8350  else
8351  {
8352  // Remove this next free item.
8353  UnregisterFreeSuballocation(nextItem);
8354  m_Suballocations.erase(nextItem);
8355  --m_FreeCount;
8356  }
8357  }
8358  // Next item is not free - there is no space to grow.
8359  else
8360  {
8361  return false;
8362  }
8363  }
8364  // This is the last item - there is no space to grow.
8365  else
8366  {
8367  return false;
8368  }
8369 
8370  suballoc.size = newSize;
8371  m_SumFreeSize -= sizeDiff;
8372  }
8373 
8374  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
8375  return true;
8376  }
8377  }
8378  VMA_ASSERT(0 && "Not found!");
8379  return false;
8380 }
8381 
8382 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8383 {
8384  VkDeviceSize lastSize = 0;
8385  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8386  {
8387  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8388 
8389  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8390  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8391  VMA_VALIDATE(it->size >= lastSize);
8392  lastSize = it->size;
8393  }
8394  return true;
8395 }
8396 
8397 bool VmaBlockMetadata_Generic::CheckAllocation(
8398  uint32_t currentFrameIndex,
8399  uint32_t frameInUseCount,
8400  VkDeviceSize bufferImageGranularity,
8401  VkDeviceSize allocSize,
8402  VkDeviceSize allocAlignment,
8403  VmaSuballocationType allocType,
8404  VmaSuballocationList::const_iterator suballocItem,
8405  bool canMakeOtherLost,
8406  VkDeviceSize* pOffset,
8407  size_t* itemsToMakeLostCount,
8408  VkDeviceSize* pSumFreeSize,
8409  VkDeviceSize* pSumItemSize) const
8410 {
8411  VMA_ASSERT(allocSize > 0);
8412  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8413  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8414  VMA_ASSERT(pOffset != VMA_NULL);
8415 
8416  *itemsToMakeLostCount = 0;
8417  *pSumFreeSize = 0;
8418  *pSumItemSize = 0;
8419 
8420  if(canMakeOtherLost)
8421  {
8422  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8423  {
8424  *pSumFreeSize = suballocItem->size;
8425  }
8426  else
8427  {
8428  if(suballocItem->hAllocation->CanBecomeLost() &&
8429  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8430  {
8431  ++*itemsToMakeLostCount;
8432  *pSumItemSize = suballocItem->size;
8433  }
8434  else
8435  {
8436  return false;
8437  }
8438  }
8439 
8440  // Remaining size is too small for this request: Early return.
8441  if(GetSize() - suballocItem->offset < allocSize)
8442  {
8443  return false;
8444  }
8445 
8446  // Start from offset equal to beginning of this suballocation.
8447  *pOffset = suballocItem->offset;
8448 
8449  // Apply VMA_DEBUG_MARGIN at the beginning.
8450  if(VMA_DEBUG_MARGIN > 0)
8451  {
8452  *pOffset += VMA_DEBUG_MARGIN;
8453  }
8454 
8455  // Apply alignment.
8456  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8457 
8458  // Check previous suballocations for BufferImageGranularity conflicts.
8459  // Make bigger alignment if necessary.
8460  if(bufferImageGranularity > 1)
8461  {
8462  bool bufferImageGranularityConflict = false;
8463  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8464  while(prevSuballocItem != m_Suballocations.cbegin())
8465  {
8466  --prevSuballocItem;
8467  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8468  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8469  {
8470  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8471  {
8472  bufferImageGranularityConflict = true;
8473  break;
8474  }
8475  }
8476  else
8477  // Already on previous page.
8478  break;
8479  }
8480  if(bufferImageGranularityConflict)
8481  {
8482  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8483  }
8484  }
8485 
8486  // Now that we have final *pOffset, check if we are past suballocItem.
8487  // If yes, return false - this function should be called for another suballocItem as starting point.
8488  if(*pOffset >= suballocItem->offset + suballocItem->size)
8489  {
8490  return false;
8491  }
8492 
8493  // Calculate padding at the beginning based on current offset.
8494  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8495 
8496  // Calculate required margin at the end.
8497  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8498 
8499  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8500  // Another early return check.
8501  if(suballocItem->offset + totalSize > GetSize())
8502  {
8503  return false;
8504  }
8505 
8506  // Advance lastSuballocItem until desired size is reached.
8507  // Update itemsToMakeLostCount.
8508  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8509  if(totalSize > suballocItem->size)
8510  {
8511  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8512  while(remainingSize > 0)
8513  {
8514  ++lastSuballocItem;
8515  if(lastSuballocItem == m_Suballocations.cend())
8516  {
8517  return false;
8518  }
8519  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8520  {
8521  *pSumFreeSize += lastSuballocItem->size;
8522  }
8523  else
8524  {
8525  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8526  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8527  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8528  {
8529  ++*itemsToMakeLostCount;
8530  *pSumItemSize += lastSuballocItem->size;
8531  }
8532  else
8533  {
8534  return false;
8535  }
8536  }
8537  remainingSize = (lastSuballocItem->size < remainingSize) ?
8538  remainingSize - lastSuballocItem->size : 0;
8539  }
8540  }
8541 
8542  // Check next suballocations for BufferImageGranularity conflicts.
8543  // If conflict exists, we must mark more allocations lost or fail.
8544  if(bufferImageGranularity > 1)
8545  {
8546  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8547  ++nextSuballocItem;
8548  while(nextSuballocItem != m_Suballocations.cend())
8549  {
8550  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8551  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8552  {
8553  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8554  {
8555  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8556  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8557  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8558  {
8559  ++*itemsToMakeLostCount;
8560  }
8561  else
8562  {
8563  return false;
8564  }
8565  }
8566  }
8567  else
8568  {
8569  // Already on next page.
8570  break;
8571  }
8572  ++nextSuballocItem;
8573  }
8574  }
8575  }
8576  else
8577  {
8578  const VmaSuballocation& suballoc = *suballocItem;
8579  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8580 
8581  *pSumFreeSize = suballoc.size;
8582 
8583  // Size of this suballocation is too small for this request: Early return.
8584  if(suballoc.size < allocSize)
8585  {
8586  return false;
8587  }
8588 
8589  // Start from offset equal to beginning of this suballocation.
8590  *pOffset = suballoc.offset;
8591 
8592  // Apply VMA_DEBUG_MARGIN at the beginning.
8593  if(VMA_DEBUG_MARGIN > 0)
8594  {
8595  *pOffset += VMA_DEBUG_MARGIN;
8596  }
8597 
8598  // Apply alignment.
8599  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8600 
8601  // Check previous suballocations for BufferImageGranularity conflicts.
8602  // Make bigger alignment if necessary.
8603  if(bufferImageGranularity > 1)
8604  {
8605  bool bufferImageGranularityConflict = false;
8606  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8607  while(prevSuballocItem != m_Suballocations.cbegin())
8608  {
8609  --prevSuballocItem;
8610  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8611  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8612  {
8613  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8614  {
8615  bufferImageGranularityConflict = true;
8616  break;
8617  }
8618  }
8619  else
8620  // Already on previous page.
8621  break;
8622  }
8623  if(bufferImageGranularityConflict)
8624  {
8625  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8626  }
8627  }
8628 
8629  // Calculate padding at the beginning based on current offset.
8630  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8631 
8632  // Calculate required margin at the end.
8633  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8634 
8635  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8636  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8637  {
8638  return false;
8639  }
8640 
8641  // Check next suballocations for BufferImageGranularity conflicts.
8642  // If conflict exists, allocation cannot be made here.
8643  if(bufferImageGranularity > 1)
8644  {
8645  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8646  ++nextSuballocItem;
8647  while(nextSuballocItem != m_Suballocations.cend())
8648  {
8649  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8650  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8651  {
8652  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8653  {
8654  return false;
8655  }
8656  }
8657  else
8658  {
8659  // Already on next page.
8660  break;
8661  }
8662  ++nextSuballocItem;
8663  }
8664  }
8665  }
8666 
8667  // All tests passed: Success. pOffset is already filled.
8668  return true;
8669 }
8670 
8671 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8672 {
8673  VMA_ASSERT(item != m_Suballocations.end());
8674  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8675 
8676  VmaSuballocationList::iterator nextItem = item;
8677  ++nextItem;
8678  VMA_ASSERT(nextItem != m_Suballocations.end());
8679  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8680 
8681  item->size += nextItem->size;
8682  --m_FreeCount;
8683  m_Suballocations.erase(nextItem);
8684 }
8685 
8686 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8687 {
8688  // Change this suballocation to be marked as free.
8689  VmaSuballocation& suballoc = *suballocItem;
8690  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8691  suballoc.hAllocation = VK_NULL_HANDLE;
8692 
8693  // Update totals.
8694  ++m_FreeCount;
8695  m_SumFreeSize += suballoc.size;
8696 
8697  // Merge with previous and/or next suballocation if it's also free.
8698  bool mergeWithNext = false;
8699  bool mergeWithPrev = false;
8700 
8701  VmaSuballocationList::iterator nextItem = suballocItem;
8702  ++nextItem;
8703  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8704  {
8705  mergeWithNext = true;
8706  }
8707 
8708  VmaSuballocationList::iterator prevItem = suballocItem;
8709  if(suballocItem != m_Suballocations.begin())
8710  {
8711  --prevItem;
8712  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8713  {
8714  mergeWithPrev = true;
8715  }
8716  }
8717 
8718  if(mergeWithNext)
8719  {
8720  UnregisterFreeSuballocation(nextItem);
8721  MergeFreeWithNext(suballocItem);
8722  }
8723 
8724  if(mergeWithPrev)
8725  {
8726  UnregisterFreeSuballocation(prevItem);
8727  MergeFreeWithNext(prevItem);
8728  RegisterFreeSuballocation(prevItem);
8729  return prevItem;
8730  }
8731  else
8732  {
8733  RegisterFreeSuballocation(suballocItem);
8734  return suballocItem;
8735  }
8736 }
8737 
8738 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8739 {
8740  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8741  VMA_ASSERT(item->size > 0);
8742 
8743  // You may want to enable this validation at the beginning or at the end of
8744  // this function, depending on what do you want to check.
8745  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8746 
8747  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8748  {
8749  if(m_FreeSuballocationsBySize.empty())
8750  {
8751  m_FreeSuballocationsBySize.push_back(item);
8752  }
8753  else
8754  {
8755  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8756  }
8757  }
8758 
8759  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8760 }
8761 
8762 
8763 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8764 {
8765  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8766  VMA_ASSERT(item->size > 0);
8767 
8768  // You may want to enable this validation at the beginning or at the end of
8769  // this function, depending on what do you want to check.
8770  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8771 
8772  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8773  {
8774  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8775  m_FreeSuballocationsBySize.data(),
8776  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8777  item,
8778  VmaSuballocationItemSizeLess());
8779  for(size_t index = it - m_FreeSuballocationsBySize.data();
8780  index < m_FreeSuballocationsBySize.size();
8781  ++index)
8782  {
8783  if(m_FreeSuballocationsBySize[index] == item)
8784  {
8785  VmaVectorRemove(m_FreeSuballocationsBySize, index);
8786  return;
8787  }
8788  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8789  }
8790  VMA_ASSERT(0 && "Not found.");
8791  }
8792 
8793  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8794 }
8795 
8796 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8797  VkDeviceSize bufferImageGranularity,
8798  VmaSuballocationType& inOutPrevSuballocType) const
8799 {
8800  if(bufferImageGranularity == 1 || IsEmpty())
8801  {
8802  return false;
8803  }
8804 
8805  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8806  bool typeConflictFound = false;
8807  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8808  it != m_Suballocations.cend();
8809  ++it)
8810  {
8811  const VmaSuballocationType suballocType = it->type;
8812  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8813  {
8814  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8815  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8816  {
8817  typeConflictFound = true;
8818  }
8819  inOutPrevSuballocType = suballocType;
8820  }
8821  }
8822 
8823  return typeConflictFound || minAlignment >= bufferImageGranularity;
8824 }
8825 
8827 // class VmaBlockMetadata_Linear
8828 
8829 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
8830  VmaBlockMetadata(hAllocator),
8831  m_SumFreeSize(0),
8832  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8833  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8834  m_1stVectorIndex(0),
8835  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8836  m_1stNullItemsBeginCount(0),
8837  m_1stNullItemsMiddleCount(0),
8838  m_2ndNullItemsCount(0)
8839 {
8840 }
8841 
8842 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8843 {
8844 }
8845 
8846 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8847 {
8848  VmaBlockMetadata::Init(size);
8849  m_SumFreeSize = size;
8850 }
8851 
8852 bool VmaBlockMetadata_Linear::Validate() const
8853 {
8854  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8855  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8856 
8857  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8858  VMA_VALIDATE(!suballocations1st.empty() ||
8859  suballocations2nd.empty() ||
8860  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8861 
8862  if(!suballocations1st.empty())
8863  {
8864  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
8865  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8866  // Null item at the end should be just pop_back().
8867  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8868  }
8869  if(!suballocations2nd.empty())
8870  {
8871  // Null item at the end should be just pop_back().
8872  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8873  }
8874 
8875  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8876  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8877 
8878  VkDeviceSize sumUsedSize = 0;
8879  const size_t suballoc1stCount = suballocations1st.size();
8880  VkDeviceSize offset = VMA_DEBUG_MARGIN;
8881 
8882  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8883  {
8884  const size_t suballoc2ndCount = suballocations2nd.size();
8885  size_t nullItem2ndCount = 0;
8886  for(size_t i = 0; i < suballoc2ndCount; ++i)
8887  {
8888  const VmaSuballocation& suballoc = suballocations2nd[i];
8889  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8890 
8891  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8892  VMA_VALIDATE(suballoc.offset >= offset);
8893 
8894  if(!currFree)
8895  {
8896  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8897  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8898  sumUsedSize += suballoc.size;
8899  }
8900  else
8901  {
8902  ++nullItem2ndCount;
8903  }
8904 
8905  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8906  }
8907 
8908  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8909  }
8910 
8911  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8912  {
8913  const VmaSuballocation& suballoc = suballocations1st[i];
8914  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8915  suballoc.hAllocation == VK_NULL_HANDLE);
8916  }
8917 
8918  size_t nullItem1stCount = m_1stNullItemsBeginCount;
8919 
8920  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8921  {
8922  const VmaSuballocation& suballoc = suballocations1st[i];
8923  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8924 
8925  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8926  VMA_VALIDATE(suballoc.offset >= offset);
8927  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8928 
8929  if(!currFree)
8930  {
8931  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8932  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8933  sumUsedSize += suballoc.size;
8934  }
8935  else
8936  {
8937  ++nullItem1stCount;
8938  }
8939 
8940  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8941  }
8942  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8943 
8944  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8945  {
8946  const size_t suballoc2ndCount = suballocations2nd.size();
8947  size_t nullItem2ndCount = 0;
8948  for(size_t i = suballoc2ndCount; i--; )
8949  {
8950  const VmaSuballocation& suballoc = suballocations2nd[i];
8951  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8952 
8953  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8954  VMA_VALIDATE(suballoc.offset >= offset);
8955 
8956  if(!currFree)
8957  {
8958  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8959  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8960  sumUsedSize += suballoc.size;
8961  }
8962  else
8963  {
8964  ++nullItem2ndCount;
8965  }
8966 
8967  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8968  }
8969 
8970  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8971  }
8972 
8973  VMA_VALIDATE(offset <= GetSize());
8974  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
8975 
8976  return true;
8977 }
8978 
8979 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
8980 {
8981  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
8982  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
8983 }
8984 
8985 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
8986 {
8987  const VkDeviceSize size = GetSize();
8988 
8989  /*
8990  We don't consider gaps inside allocation vectors with freed allocations because
8991  they are not suitable for reuse in linear allocator. We consider only space that
8992  is available for new allocations.
8993  */
8994  if(IsEmpty())
8995  {
8996  return size;
8997  }
8998 
8999  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9000 
9001  switch(m_2ndVectorMode)
9002  {
9003  case SECOND_VECTOR_EMPTY:
9004  /*
9005  Available space is after end of 1st, as well as before beginning of 1st (which
9006  whould make it a ring buffer).
9007  */
9008  {
9009  const size_t suballocations1stCount = suballocations1st.size();
9010  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
9011  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9012  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
9013  return VMA_MAX(
9014  firstSuballoc.offset,
9015  size - (lastSuballoc.offset + lastSuballoc.size));
9016  }
9017  break;
9018 
9019  case SECOND_VECTOR_RING_BUFFER:
9020  /*
9021  Available space is only between end of 2nd and beginning of 1st.
9022  */
9023  {
9024  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9025  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
9026  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
9027  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
9028  }
9029  break;
9030 
9031  case SECOND_VECTOR_DOUBLE_STACK:
9032  /*
9033  Available space is only between end of 1st and top of 2nd.
9034  */
9035  {
9036  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9037  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
9038  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
9039  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
9040  }
9041  break;
9042 
9043  default:
9044  VMA_ASSERT(0);
9045  return 0;
9046  }
9047 }
9048 
9049 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9050 {
9051  const VkDeviceSize size = GetSize();
9052  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9053  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9054  const size_t suballoc1stCount = suballocations1st.size();
9055  const size_t suballoc2ndCount = suballocations2nd.size();
9056 
9057  outInfo.blockCount = 1;
9058  outInfo.allocationCount = (uint32_t)GetAllocationCount();
9059  outInfo.unusedRangeCount = 0;
9060  outInfo.usedBytes = 0;
9061  outInfo.allocationSizeMin = UINT64_MAX;
9062  outInfo.allocationSizeMax = 0;
9063  outInfo.unusedRangeSizeMin = UINT64_MAX;
9064  outInfo.unusedRangeSizeMax = 0;
9065 
9066  VkDeviceSize lastOffset = 0;
9067 
9068  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9069  {
9070  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9071  size_t nextAlloc2ndIndex = 0;
9072  while(lastOffset < freeSpace2ndTo1stEnd)
9073  {
9074  // Find next non-null allocation or move nextAllocIndex to the end.
9075  while(nextAlloc2ndIndex < suballoc2ndCount &&
9076  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9077  {
9078  ++nextAlloc2ndIndex;
9079  }
9080 
9081  // Found non-null allocation.
9082  if(nextAlloc2ndIndex < suballoc2ndCount)
9083  {
9084  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9085 
9086  // 1. Process free space before this allocation.
9087  if(lastOffset < suballoc.offset)
9088  {
9089  // There is free space from lastOffset to suballoc.offset.
9090  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9091  ++outInfo.unusedRangeCount;
9092  outInfo.unusedBytes += unusedRangeSize;
9093  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9094  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9095  }
9096 
9097  // 2. Process this allocation.
9098  // There is allocation with suballoc.offset, suballoc.size.
9099  outInfo.usedBytes += suballoc.size;
9100  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9101  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9102 
9103  // 3. Prepare for next iteration.
9104  lastOffset = suballoc.offset + suballoc.size;
9105  ++nextAlloc2ndIndex;
9106  }
9107  // We are at the end.
9108  else
9109  {
9110  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9111  if(lastOffset < freeSpace2ndTo1stEnd)
9112  {
9113  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9114  ++outInfo.unusedRangeCount;
9115  outInfo.unusedBytes += unusedRangeSize;
9116  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9117  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9118  }
9119 
9120  // End of loop.
9121  lastOffset = freeSpace2ndTo1stEnd;
9122  }
9123  }
9124  }
9125 
9126  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9127  const VkDeviceSize freeSpace1stTo2ndEnd =
9128  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9129  while(lastOffset < freeSpace1stTo2ndEnd)
9130  {
9131  // Find next non-null allocation or move nextAllocIndex to the end.
9132  while(nextAlloc1stIndex < suballoc1stCount &&
9133  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9134  {
9135  ++nextAlloc1stIndex;
9136  }
9137 
9138  // Found non-null allocation.
9139  if(nextAlloc1stIndex < suballoc1stCount)
9140  {
9141  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9142 
9143  // 1. Process free space before this allocation.
9144  if(lastOffset < suballoc.offset)
9145  {
9146  // There is free space from lastOffset to suballoc.offset.
9147  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9148  ++outInfo.unusedRangeCount;
9149  outInfo.unusedBytes += unusedRangeSize;
9150  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9151  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9152  }
9153 
9154  // 2. Process this allocation.
9155  // There is allocation with suballoc.offset, suballoc.size.
9156  outInfo.usedBytes += suballoc.size;
9157  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9158  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9159 
9160  // 3. Prepare for next iteration.
9161  lastOffset = suballoc.offset + suballoc.size;
9162  ++nextAlloc1stIndex;
9163  }
9164  // We are at the end.
9165  else
9166  {
9167  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9168  if(lastOffset < freeSpace1stTo2ndEnd)
9169  {
9170  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9171  ++outInfo.unusedRangeCount;
9172  outInfo.unusedBytes += unusedRangeSize;
9173  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9174  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9175  }
9176 
9177  // End of loop.
9178  lastOffset = freeSpace1stTo2ndEnd;
9179  }
9180  }
9181 
9182  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9183  {
9184  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9185  while(lastOffset < size)
9186  {
9187  // Find next non-null allocation or move nextAllocIndex to the end.
9188  while(nextAlloc2ndIndex != SIZE_MAX &&
9189  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9190  {
9191  --nextAlloc2ndIndex;
9192  }
9193 
9194  // Found non-null allocation.
9195  if(nextAlloc2ndIndex != SIZE_MAX)
9196  {
9197  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9198 
9199  // 1. Process free space before this allocation.
9200  if(lastOffset < suballoc.offset)
9201  {
9202  // There is free space from lastOffset to suballoc.offset.
9203  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9204  ++outInfo.unusedRangeCount;
9205  outInfo.unusedBytes += unusedRangeSize;
9206  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9207  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9208  }
9209 
9210  // 2. Process this allocation.
9211  // There is allocation with suballoc.offset, suballoc.size.
9212  outInfo.usedBytes += suballoc.size;
9213  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9214  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9215 
9216  // 3. Prepare for next iteration.
9217  lastOffset = suballoc.offset + suballoc.size;
9218  --nextAlloc2ndIndex;
9219  }
9220  // We are at the end.
9221  else
9222  {
9223  // There is free space from lastOffset to size.
9224  if(lastOffset < size)
9225  {
9226  const VkDeviceSize unusedRangeSize = size - lastOffset;
9227  ++outInfo.unusedRangeCount;
9228  outInfo.unusedBytes += unusedRangeSize;
9229  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9230  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9231  }
9232 
9233  // End of loop.
9234  lastOffset = size;
9235  }
9236  }
9237  }
9238 
9239  outInfo.unusedBytes = size - outInfo.usedBytes;
9240 }
9241 
9242 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9243 {
9244  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9245  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9246  const VkDeviceSize size = GetSize();
9247  const size_t suballoc1stCount = suballocations1st.size();
9248  const size_t suballoc2ndCount = suballocations2nd.size();
9249 
9250  inoutStats.size += size;
9251 
9252  VkDeviceSize lastOffset = 0;
9253 
9254  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9255  {
9256  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9257  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9258  while(lastOffset < freeSpace2ndTo1stEnd)
9259  {
9260  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9261  while(nextAlloc2ndIndex < suballoc2ndCount &&
9262  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9263  {
9264  ++nextAlloc2ndIndex;
9265  }
9266 
9267  // Found non-null allocation.
9268  if(nextAlloc2ndIndex < suballoc2ndCount)
9269  {
9270  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9271 
9272  // 1. Process free space before this allocation.
9273  if(lastOffset < suballoc.offset)
9274  {
9275  // There is free space from lastOffset to suballoc.offset.
9276  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9277  inoutStats.unusedSize += unusedRangeSize;
9278  ++inoutStats.unusedRangeCount;
9279  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9280  }
9281 
9282  // 2. Process this allocation.
9283  // There is allocation with suballoc.offset, suballoc.size.
9284  ++inoutStats.allocationCount;
9285 
9286  // 3. Prepare for next iteration.
9287  lastOffset = suballoc.offset + suballoc.size;
9288  ++nextAlloc2ndIndex;
9289  }
9290  // We are at the end.
9291  else
9292  {
9293  if(lastOffset < freeSpace2ndTo1stEnd)
9294  {
9295  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9296  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9297  inoutStats.unusedSize += unusedRangeSize;
9298  ++inoutStats.unusedRangeCount;
9299  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9300  }
9301 
9302  // End of loop.
9303  lastOffset = freeSpace2ndTo1stEnd;
9304  }
9305  }
9306  }
9307 
9308  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9309  const VkDeviceSize freeSpace1stTo2ndEnd =
9310  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9311  while(lastOffset < freeSpace1stTo2ndEnd)
9312  {
9313  // Find next non-null allocation or move nextAllocIndex to the end.
9314  while(nextAlloc1stIndex < suballoc1stCount &&
9315  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9316  {
9317  ++nextAlloc1stIndex;
9318  }
9319 
9320  // Found non-null allocation.
9321  if(nextAlloc1stIndex < suballoc1stCount)
9322  {
9323  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9324 
9325  // 1. Process free space before this allocation.
9326  if(lastOffset < suballoc.offset)
9327  {
9328  // There is free space from lastOffset to suballoc.offset.
9329  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9330  inoutStats.unusedSize += unusedRangeSize;
9331  ++inoutStats.unusedRangeCount;
9332  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9333  }
9334 
9335  // 2. Process this allocation.
9336  // There is allocation with suballoc.offset, suballoc.size.
9337  ++inoutStats.allocationCount;
9338 
9339  // 3. Prepare for next iteration.
9340  lastOffset = suballoc.offset + suballoc.size;
9341  ++nextAlloc1stIndex;
9342  }
9343  // We are at the end.
9344  else
9345  {
9346  if(lastOffset < freeSpace1stTo2ndEnd)
9347  {
9348  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9349  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9350  inoutStats.unusedSize += unusedRangeSize;
9351  ++inoutStats.unusedRangeCount;
9352  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9353  }
9354 
9355  // End of loop.
9356  lastOffset = freeSpace1stTo2ndEnd;
9357  }
9358  }
9359 
9360  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9361  {
9362  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9363  while(lastOffset < size)
9364  {
9365  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9366  while(nextAlloc2ndIndex != SIZE_MAX &&
9367  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9368  {
9369  --nextAlloc2ndIndex;
9370  }
9371 
9372  // Found non-null allocation.
9373  if(nextAlloc2ndIndex != SIZE_MAX)
9374  {
9375  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9376 
9377  // 1. Process free space before this allocation.
9378  if(lastOffset < suballoc.offset)
9379  {
9380  // There is free space from lastOffset to suballoc.offset.
9381  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9382  inoutStats.unusedSize += unusedRangeSize;
9383  ++inoutStats.unusedRangeCount;
9384  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9385  }
9386 
9387  // 2. Process this allocation.
9388  // There is allocation with suballoc.offset, suballoc.size.
9389  ++inoutStats.allocationCount;
9390 
9391  // 3. Prepare for next iteration.
9392  lastOffset = suballoc.offset + suballoc.size;
9393  --nextAlloc2ndIndex;
9394  }
9395  // We are at the end.
9396  else
9397  {
9398  if(lastOffset < size)
9399  {
9400  // There is free space from lastOffset to size.
9401  const VkDeviceSize unusedRangeSize = size - lastOffset;
9402  inoutStats.unusedSize += unusedRangeSize;
9403  ++inoutStats.unusedRangeCount;
9404  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9405  }
9406 
9407  // End of loop.
9408  lastOffset = size;
9409  }
9410  }
9411  }
9412 }
9413 
9414 #if VMA_STATS_STRING_ENABLED
9415 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9416 {
9417  const VkDeviceSize size = GetSize();
9418  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9419  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9420  const size_t suballoc1stCount = suballocations1st.size();
9421  const size_t suballoc2ndCount = suballocations2nd.size();
9422 
9423  // FIRST PASS
9424 
9425  size_t unusedRangeCount = 0;
9426  VkDeviceSize usedBytes = 0;
9427 
9428  VkDeviceSize lastOffset = 0;
9429 
9430  size_t alloc2ndCount = 0;
9431  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9432  {
9433  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9434  size_t nextAlloc2ndIndex = 0;
9435  while(lastOffset < freeSpace2ndTo1stEnd)
9436  {
9437  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9438  while(nextAlloc2ndIndex < suballoc2ndCount &&
9439  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9440  {
9441  ++nextAlloc2ndIndex;
9442  }
9443 
9444  // Found non-null allocation.
9445  if(nextAlloc2ndIndex < suballoc2ndCount)
9446  {
9447  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9448 
9449  // 1. Process free space before this allocation.
9450  if(lastOffset < suballoc.offset)
9451  {
9452  // There is free space from lastOffset to suballoc.offset.
9453  ++unusedRangeCount;
9454  }
9455 
9456  // 2. Process this allocation.
9457  // There is allocation with suballoc.offset, suballoc.size.
9458  ++alloc2ndCount;
9459  usedBytes += suballoc.size;
9460 
9461  // 3. Prepare for next iteration.
9462  lastOffset = suballoc.offset + suballoc.size;
9463  ++nextAlloc2ndIndex;
9464  }
9465  // We are at the end.
9466  else
9467  {
9468  if(lastOffset < freeSpace2ndTo1stEnd)
9469  {
9470  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9471  ++unusedRangeCount;
9472  }
9473 
9474  // End of loop.
9475  lastOffset = freeSpace2ndTo1stEnd;
9476  }
9477  }
9478  }
9479 
9480  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9481  size_t alloc1stCount = 0;
9482  const VkDeviceSize freeSpace1stTo2ndEnd =
9483  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9484  while(lastOffset < freeSpace1stTo2ndEnd)
9485  {
9486  // Find next non-null allocation or move nextAllocIndex to the end.
9487  while(nextAlloc1stIndex < suballoc1stCount &&
9488  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9489  {
9490  ++nextAlloc1stIndex;
9491  }
9492 
9493  // Found non-null allocation.
9494  if(nextAlloc1stIndex < suballoc1stCount)
9495  {
9496  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9497 
9498  // 1. Process free space before this allocation.
9499  if(lastOffset < suballoc.offset)
9500  {
9501  // There is free space from lastOffset to suballoc.offset.
9502  ++unusedRangeCount;
9503  }
9504 
9505  // 2. Process this allocation.
9506  // There is allocation with suballoc.offset, suballoc.size.
9507  ++alloc1stCount;
9508  usedBytes += suballoc.size;
9509 
9510  // 3. Prepare for next iteration.
9511  lastOffset = suballoc.offset + suballoc.size;
9512  ++nextAlloc1stIndex;
9513  }
9514  // We are at the end.
9515  else
9516  {
9517  if(lastOffset < size)
9518  {
9519  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9520  ++unusedRangeCount;
9521  }
9522 
9523  // End of loop.
9524  lastOffset = freeSpace1stTo2ndEnd;
9525  }
9526  }
9527 
9528  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9529  {
9530  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9531  while(lastOffset < size)
9532  {
9533  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9534  while(nextAlloc2ndIndex != SIZE_MAX &&
9535  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9536  {
9537  --nextAlloc2ndIndex;
9538  }
9539 
9540  // Found non-null allocation.
9541  if(nextAlloc2ndIndex != SIZE_MAX)
9542  {
9543  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9544 
9545  // 1. Process free space before this allocation.
9546  if(lastOffset < suballoc.offset)
9547  {
9548  // There is free space from lastOffset to suballoc.offset.
9549  ++unusedRangeCount;
9550  }
9551 
9552  // 2. Process this allocation.
9553  // There is allocation with suballoc.offset, suballoc.size.
9554  ++alloc2ndCount;
9555  usedBytes += suballoc.size;
9556 
9557  // 3. Prepare for next iteration.
9558  lastOffset = suballoc.offset + suballoc.size;
9559  --nextAlloc2ndIndex;
9560  }
9561  // We are at the end.
9562  else
9563  {
9564  if(lastOffset < size)
9565  {
9566  // There is free space from lastOffset to size.
9567  ++unusedRangeCount;
9568  }
9569 
9570  // End of loop.
9571  lastOffset = size;
9572  }
9573  }
9574  }
9575 
9576  const VkDeviceSize unusedBytes = size - usedBytes;
9577  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9578 
9579  // SECOND PASS
9580  lastOffset = 0;
9581 
9582  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9583  {
9584  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9585  size_t nextAlloc2ndIndex = 0;
9586  while(lastOffset < freeSpace2ndTo1stEnd)
9587  {
9588  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9589  while(nextAlloc2ndIndex < suballoc2ndCount &&
9590  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9591  {
9592  ++nextAlloc2ndIndex;
9593  }
9594 
9595  // Found non-null allocation.
9596  if(nextAlloc2ndIndex < suballoc2ndCount)
9597  {
9598  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9599 
9600  // 1. Process free space before this allocation.
9601  if(lastOffset < suballoc.offset)
9602  {
9603  // There is free space from lastOffset to suballoc.offset.
9604  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9605  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9606  }
9607 
9608  // 2. Process this allocation.
9609  // There is allocation with suballoc.offset, suballoc.size.
9610  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9611 
9612  // 3. Prepare for next iteration.
9613  lastOffset = suballoc.offset + suballoc.size;
9614  ++nextAlloc2ndIndex;
9615  }
9616  // We are at the end.
9617  else
9618  {
9619  if(lastOffset < freeSpace2ndTo1stEnd)
9620  {
9621  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9622  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9623  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9624  }
9625 
9626  // End of loop.
9627  lastOffset = freeSpace2ndTo1stEnd;
9628  }
9629  }
9630  }
9631 
9632  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9633  while(lastOffset < freeSpace1stTo2ndEnd)
9634  {
9635  // Find next non-null allocation or move nextAllocIndex to the end.
9636  while(nextAlloc1stIndex < suballoc1stCount &&
9637  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9638  {
9639  ++nextAlloc1stIndex;
9640  }
9641 
9642  // Found non-null allocation.
9643  if(nextAlloc1stIndex < suballoc1stCount)
9644  {
9645  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9646 
9647  // 1. Process free space before this allocation.
9648  if(lastOffset < suballoc.offset)
9649  {
9650  // There is free space from lastOffset to suballoc.offset.
9651  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9652  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9653  }
9654 
9655  // 2. Process this allocation.
9656  // There is allocation with suballoc.offset, suballoc.size.
9657  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9658 
9659  // 3. Prepare for next iteration.
9660  lastOffset = suballoc.offset + suballoc.size;
9661  ++nextAlloc1stIndex;
9662  }
9663  // We are at the end.
9664  else
9665  {
9666  if(lastOffset < freeSpace1stTo2ndEnd)
9667  {
9668  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9669  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9670  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9671  }
9672 
9673  // End of loop.
9674  lastOffset = freeSpace1stTo2ndEnd;
9675  }
9676  }
9677 
9678  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9679  {
9680  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9681  while(lastOffset < size)
9682  {
9683  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9684  while(nextAlloc2ndIndex != SIZE_MAX &&
9685  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9686  {
9687  --nextAlloc2ndIndex;
9688  }
9689 
9690  // Found non-null allocation.
9691  if(nextAlloc2ndIndex != SIZE_MAX)
9692  {
9693  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9694 
9695  // 1. Process free space before this allocation.
9696  if(lastOffset < suballoc.offset)
9697  {
9698  // There is free space from lastOffset to suballoc.offset.
9699  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9700  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9701  }
9702 
9703  // 2. Process this allocation.
9704  // There is allocation with suballoc.offset, suballoc.size.
9705  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9706 
9707  // 3. Prepare for next iteration.
9708  lastOffset = suballoc.offset + suballoc.size;
9709  --nextAlloc2ndIndex;
9710  }
9711  // We are at the end.
9712  else
9713  {
9714  if(lastOffset < size)
9715  {
9716  // There is free space from lastOffset to size.
9717  const VkDeviceSize unusedRangeSize = size - lastOffset;
9718  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9719  }
9720 
9721  // End of loop.
9722  lastOffset = size;
9723  }
9724  }
9725  }
9726 
9727  PrintDetailedMap_End(json);
9728 }
9729 #endif // #if VMA_STATS_STRING_ENABLED
9730 
9731 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9732  uint32_t currentFrameIndex,
9733  uint32_t frameInUseCount,
9734  VkDeviceSize bufferImageGranularity,
9735  VkDeviceSize allocSize,
9736  VkDeviceSize allocAlignment,
9737  bool upperAddress,
9738  VmaSuballocationType allocType,
9739  bool canMakeOtherLost,
9740  uint32_t strategy,
9741  VmaAllocationRequest* pAllocationRequest)
9742 {
9743  VMA_ASSERT(allocSize > 0);
9744  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9745  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9746  VMA_HEAVY_ASSERT(Validate());
9747  return upperAddress ?
9748  CreateAllocationRequest_UpperAddress(
9749  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9750  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
9751  CreateAllocationRequest_LowerAddress(
9752  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9753  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
9754 }
9755 
9756 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
9757  uint32_t currentFrameIndex,
9758  uint32_t frameInUseCount,
9759  VkDeviceSize bufferImageGranularity,
9760  VkDeviceSize allocSize,
9761  VkDeviceSize allocAlignment,
9762  VmaSuballocationType allocType,
9763  bool canMakeOtherLost,
9764  uint32_t strategy,
9765  VmaAllocationRequest* pAllocationRequest)
9766 {
9767  const VkDeviceSize size = GetSize();
9768  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9769  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9770 
9771  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9772  {
9773  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9774  return false;
9775  }
9776 
9777  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9778  if(allocSize > size)
9779  {
9780  return false;
9781  }
9782  VkDeviceSize resultBaseOffset = size - allocSize;
9783  if(!suballocations2nd.empty())
9784  {
9785  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9786  resultBaseOffset = lastSuballoc.offset - allocSize;
9787  if(allocSize > lastSuballoc.offset)
9788  {
9789  return false;
9790  }
9791  }
9792 
9793  // Start from offset equal to end of free space.
9794  VkDeviceSize resultOffset = resultBaseOffset;
9795 
9796  // Apply VMA_DEBUG_MARGIN at the end.
9797  if(VMA_DEBUG_MARGIN > 0)
9798  {
9799  if(resultOffset < VMA_DEBUG_MARGIN)
9800  {
9801  return false;
9802  }
9803  resultOffset -= VMA_DEBUG_MARGIN;
9804  }
9805 
9806  // Apply alignment.
9807  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9808 
9809  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9810  // Make bigger alignment if necessary.
9811  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9812  {
9813  bool bufferImageGranularityConflict = false;
9814  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9815  {
9816  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9817  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9818  {
9819  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9820  {
9821  bufferImageGranularityConflict = true;
9822  break;
9823  }
9824  }
9825  else
9826  // Already on previous page.
9827  break;
9828  }
9829  if(bufferImageGranularityConflict)
9830  {
9831  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9832  }
9833  }
9834 
9835  // There is enough free space.
9836  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9837  suballocations1st.back().offset + suballocations1st.back().size :
9838  0;
9839  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9840  {
9841  // Check previous suballocations for BufferImageGranularity conflicts.
9842  // If conflict exists, allocation cannot be made here.
9843  if(bufferImageGranularity > 1)
9844  {
9845  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9846  {
9847  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9848  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9849  {
9850  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9851  {
9852  return false;
9853  }
9854  }
9855  else
9856  {
9857  // Already on next page.
9858  break;
9859  }
9860  }
9861  }
9862 
9863  // All tests passed: Success.
9864  pAllocationRequest->offset = resultOffset;
9865  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9866  pAllocationRequest->sumItemSize = 0;
9867  // pAllocationRequest->item unused.
9868  pAllocationRequest->itemsToMakeLostCount = 0;
9869  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
9870  return true;
9871  }
9872 
9873  return false;
9874 }
9875 
9876 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
9877  uint32_t currentFrameIndex,
9878  uint32_t frameInUseCount,
9879  VkDeviceSize bufferImageGranularity,
9880  VkDeviceSize allocSize,
9881  VkDeviceSize allocAlignment,
9882  VmaSuballocationType allocType,
9883  bool canMakeOtherLost,
9884  uint32_t strategy,
9885  VmaAllocationRequest* pAllocationRequest)
9886 {
9887  const VkDeviceSize size = GetSize();
9888  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9889  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9890 
9891  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9892  {
9893  // Try to allocate at the end of 1st vector.
9894 
9895  VkDeviceSize resultBaseOffset = 0;
9896  if(!suballocations1st.empty())
9897  {
9898  const VmaSuballocation& lastSuballoc = suballocations1st.back();
9899  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9900  }
9901 
9902  // Start from offset equal to beginning of free space.
9903  VkDeviceSize resultOffset = resultBaseOffset;
9904 
9905  // Apply VMA_DEBUG_MARGIN at the beginning.
9906  if(VMA_DEBUG_MARGIN > 0)
9907  {
9908  resultOffset += VMA_DEBUG_MARGIN;
9909  }
9910 
9911  // Apply alignment.
9912  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9913 
9914  // Check previous suballocations for BufferImageGranularity conflicts.
9915  // Make bigger alignment if necessary.
9916  if(bufferImageGranularity > 1 && !suballocations1st.empty())
9917  {
9918  bool bufferImageGranularityConflict = false;
9919  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9920  {
9921  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9922  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9923  {
9924  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9925  {
9926  bufferImageGranularityConflict = true;
9927  break;
9928  }
9929  }
9930  else
9931  // Already on previous page.
9932  break;
9933  }
9934  if(bufferImageGranularityConflict)
9935  {
9936  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9937  }
9938  }
9939 
9940  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9941  suballocations2nd.back().offset : size;
9942 
9943  // There is enough free space at the end after alignment.
9944  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9945  {
9946  // Check next suballocations for BufferImageGranularity conflicts.
9947  // If conflict exists, allocation cannot be made here.
9948  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9949  {
9950  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9951  {
9952  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9953  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9954  {
9955  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9956  {
9957  return false;
9958  }
9959  }
9960  else
9961  {
9962  // Already on previous page.
9963  break;
9964  }
9965  }
9966  }
9967 
9968  // All tests passed: Success.
9969  pAllocationRequest->offset = resultOffset;
9970  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
9971  pAllocationRequest->sumItemSize = 0;
9972  // pAllocationRequest->item, customData unused.
9973  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
9974  pAllocationRequest->itemsToMakeLostCount = 0;
9975  return true;
9976  }
9977  }
9978 
9979  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9980  // beginning of 1st vector as the end of free space.
9981  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9982  {
9983  VMA_ASSERT(!suballocations1st.empty());
9984 
9985  VkDeviceSize resultBaseOffset = 0;
9986  if(!suballocations2nd.empty())
9987  {
9988  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9989  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9990  }
9991 
9992  // Start from offset equal to beginning of free space.
9993  VkDeviceSize resultOffset = resultBaseOffset;
9994 
9995  // Apply VMA_DEBUG_MARGIN at the beginning.
9996  if(VMA_DEBUG_MARGIN > 0)
9997  {
9998  resultOffset += VMA_DEBUG_MARGIN;
9999  }
10000 
10001  // Apply alignment.
10002  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10003 
10004  // Check previous suballocations for BufferImageGranularity conflicts.
10005  // Make bigger alignment if necessary.
10006  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10007  {
10008  bool bufferImageGranularityConflict = false;
10009  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
10010  {
10011  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
10012  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10013  {
10014  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10015  {
10016  bufferImageGranularityConflict = true;
10017  break;
10018  }
10019  }
10020  else
10021  // Already on previous page.
10022  break;
10023  }
10024  if(bufferImageGranularityConflict)
10025  {
10026  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10027  }
10028  }
10029 
10030  pAllocationRequest->itemsToMakeLostCount = 0;
10031  pAllocationRequest->sumItemSize = 0;
10032  size_t index1st = m_1stNullItemsBeginCount;
10033 
10034  if(canMakeOtherLost)
10035  {
10036  while(index1st < suballocations1st.size() &&
10037  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
10038  {
10039  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
10040  const VmaSuballocation& suballoc = suballocations1st[index1st];
10041  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
10042  {
10043  // No problem.
10044  }
10045  else
10046  {
10047  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10048  if(suballoc.hAllocation->CanBecomeLost() &&
10049  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10050  {
10051  ++pAllocationRequest->itemsToMakeLostCount;
10052  pAllocationRequest->sumItemSize += suballoc.size;
10053  }
10054  else
10055  {
10056  return false;
10057  }
10058  }
10059  ++index1st;
10060  }
10061 
10062  // Check next suballocations for BufferImageGranularity conflicts.
10063  // If conflict exists, we must mark more allocations lost or fail.
10064  if(bufferImageGranularity > 1)
10065  {
10066  while(index1st < suballocations1st.size())
10067  {
10068  const VmaSuballocation& suballoc = suballocations1st[index1st];
10069  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
10070  {
10071  if(suballoc.hAllocation != VK_NULL_HANDLE)
10072  {
10073  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
10074  if(suballoc.hAllocation->CanBecomeLost() &&
10075  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10076  {
10077  ++pAllocationRequest->itemsToMakeLostCount;
10078  pAllocationRequest->sumItemSize += suballoc.size;
10079  }
10080  else
10081  {
10082  return false;
10083  }
10084  }
10085  }
10086  else
10087  {
10088  // Already on next page.
10089  break;
10090  }
10091  ++index1st;
10092  }
10093  }
10094 
10095  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
10096  if(index1st == suballocations1st.size() &&
10097  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
10098  {
10099  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
10100  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
10101  }
10102  }
10103 
10104  // There is enough free space at the end after alignment.
10105  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
10106  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
10107  {
10108  // Check next suballocations for BufferImageGranularity conflicts.
10109  // If conflict exists, allocation cannot be made here.
10110  if(bufferImageGranularity > 1)
10111  {
10112  for(size_t nextSuballocIndex = index1st;
10113  nextSuballocIndex < suballocations1st.size();
10114  nextSuballocIndex++)
10115  {
10116  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
10117  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10118  {
10119  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10120  {
10121  return false;
10122  }
10123  }
10124  else
10125  {
10126  // Already on next page.
10127  break;
10128  }
10129  }
10130  }
10131 
10132  // All tests passed: Success.
10133  pAllocationRequest->offset = resultOffset;
10134  pAllocationRequest->sumFreeSize =
10135  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
10136  - resultBaseOffset
10137  - pAllocationRequest->sumItemSize;
10138  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
10139  // pAllocationRequest->item, customData unused.
10140  return true;
10141  }
10142  }
10143 
10144  return false;
10145 }
10146 
10147 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10148  uint32_t currentFrameIndex,
10149  uint32_t frameInUseCount,
10150  VmaAllocationRequest* pAllocationRequest)
10151 {
10152  if(pAllocationRequest->itemsToMakeLostCount == 0)
10153  {
10154  return true;
10155  }
10156 
10157  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10158 
10159  // We always start from 1st.
10160  SuballocationVectorType* suballocations = &AccessSuballocations1st();
10161  size_t index = m_1stNullItemsBeginCount;
10162  size_t madeLostCount = 0;
10163  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10164  {
10165  if(index == suballocations->size())
10166  {
10167  index = 0;
10168  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
10169  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10170  {
10171  suballocations = &AccessSuballocations2nd();
10172  }
10173  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
10174  // suballocations continues pointing at AccessSuballocations1st().
10175  VMA_ASSERT(!suballocations->empty());
10176  }
10177  VmaSuballocation& suballoc = (*suballocations)[index];
10178  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10179  {
10180  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10181  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10182  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10183  {
10184  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10185  suballoc.hAllocation = VK_NULL_HANDLE;
10186  m_SumFreeSize += suballoc.size;
10187  if(suballocations == &AccessSuballocations1st())
10188  {
10189  ++m_1stNullItemsMiddleCount;
10190  }
10191  else
10192  {
10193  ++m_2ndNullItemsCount;
10194  }
10195  ++madeLostCount;
10196  }
10197  else
10198  {
10199  return false;
10200  }
10201  }
10202  ++index;
10203  }
10204 
10205  CleanupAfterFree();
10206  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10207 
10208  return true;
10209 }
10210 
10211 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10212 {
10213  uint32_t lostAllocationCount = 0;
10214 
10215  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10216  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10217  {
10218  VmaSuballocation& suballoc = suballocations1st[i];
10219  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10220  suballoc.hAllocation->CanBecomeLost() &&
10221  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10222  {
10223  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10224  suballoc.hAllocation = VK_NULL_HANDLE;
10225  ++m_1stNullItemsMiddleCount;
10226  m_SumFreeSize += suballoc.size;
10227  ++lostAllocationCount;
10228  }
10229  }
10230 
10231  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10232  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10233  {
10234  VmaSuballocation& suballoc = suballocations2nd[i];
10235  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10236  suballoc.hAllocation->CanBecomeLost() &&
10237  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10238  {
10239  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10240  suballoc.hAllocation = VK_NULL_HANDLE;
10241  ++m_2ndNullItemsCount;
10242  m_SumFreeSize += suballoc.size;
10243  ++lostAllocationCount;
10244  }
10245  }
10246 
10247  if(lostAllocationCount)
10248  {
10249  CleanupAfterFree();
10250  }
10251 
10252  return lostAllocationCount;
10253 }
10254 
10255 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10256 {
10257  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10258  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10259  {
10260  const VmaSuballocation& suballoc = suballocations1st[i];
10261  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10262  {
10263  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10264  {
10265  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10266  return VK_ERROR_VALIDATION_FAILED_EXT;
10267  }
10268  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10269  {
10270  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10271  return VK_ERROR_VALIDATION_FAILED_EXT;
10272  }
10273  }
10274  }
10275 
10276  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10277  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10278  {
10279  const VmaSuballocation& suballoc = suballocations2nd[i];
10280  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10281  {
10282  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10283  {
10284  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10285  return VK_ERROR_VALIDATION_FAILED_EXT;
10286  }
10287  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10288  {
10289  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10290  return VK_ERROR_VALIDATION_FAILED_EXT;
10291  }
10292  }
10293  }
10294 
10295  return VK_SUCCESS;
10296 }
10297 
10298 void VmaBlockMetadata_Linear::Alloc(
10299  const VmaAllocationRequest& request,
10300  VmaSuballocationType type,
10301  VkDeviceSize allocSize,
10302  VmaAllocation hAllocation)
10303 {
10304  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10305 
10306  switch(request.type)
10307  {
10308  case VmaAllocationRequestType::UpperAddress:
10309  {
10310  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10311  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10312  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10313  suballocations2nd.push_back(newSuballoc);
10314  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10315  }
10316  break;
10317  case VmaAllocationRequestType::EndOf1st:
10318  {
10319  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10320 
10321  VMA_ASSERT(suballocations1st.empty() ||
10322  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
10323  // Check if it fits before the end of the block.
10324  VMA_ASSERT(request.offset + allocSize <= GetSize());
10325 
10326  suballocations1st.push_back(newSuballoc);
10327  }
10328  break;
10329  case VmaAllocationRequestType::EndOf2nd:
10330  {
10331  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10332  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10333  VMA_ASSERT(!suballocations1st.empty() &&
10334  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
10335  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10336 
10337  switch(m_2ndVectorMode)
10338  {
10339  case SECOND_VECTOR_EMPTY:
10340  // First allocation from second part ring buffer.
10341  VMA_ASSERT(suballocations2nd.empty());
10342  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10343  break;
10344  case SECOND_VECTOR_RING_BUFFER:
10345  // 2-part ring buffer is already started.
10346  VMA_ASSERT(!suballocations2nd.empty());
10347  break;
10348  case SECOND_VECTOR_DOUBLE_STACK:
10349  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10350  break;
10351  default:
10352  VMA_ASSERT(0);
10353  }
10354 
10355  suballocations2nd.push_back(newSuballoc);
10356  }
10357  break;
10358  default:
10359  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10360  }
10361 
10362  m_SumFreeSize -= newSuballoc.size;
10363 }
10364 
10365 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10366 {
10367  FreeAtOffset(allocation->GetOffset());
10368 }
10369 
10370 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10371 {
10372  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10373  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10374 
10375  if(!suballocations1st.empty())
10376  {
10377  // First allocation: Mark it as next empty at the beginning.
10378  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10379  if(firstSuballoc.offset == offset)
10380  {
10381  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10382  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10383  m_SumFreeSize += firstSuballoc.size;
10384  ++m_1stNullItemsBeginCount;
10385  CleanupAfterFree();
10386  return;
10387  }
10388  }
10389 
10390  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10391  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10392  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10393  {
10394  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10395  if(lastSuballoc.offset == offset)
10396  {
10397  m_SumFreeSize += lastSuballoc.size;
10398  suballocations2nd.pop_back();
10399  CleanupAfterFree();
10400  return;
10401  }
10402  }
10403  // Last allocation in 1st vector.
10404  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10405  {
10406  VmaSuballocation& lastSuballoc = suballocations1st.back();
10407  if(lastSuballoc.offset == offset)
10408  {
10409  m_SumFreeSize += lastSuballoc.size;
10410  suballocations1st.pop_back();
10411  CleanupAfterFree();
10412  return;
10413  }
10414  }
10415 
10416  // Item from the middle of 1st vector.
10417  {
10418  VmaSuballocation refSuballoc;
10419  refSuballoc.offset = offset;
10420  // Rest of members stays uninitialized intentionally for better performance.
10421  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
10422  suballocations1st.begin() + m_1stNullItemsBeginCount,
10423  suballocations1st.end(),
10424  refSuballoc);
10425  if(it != suballocations1st.end())
10426  {
10427  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10428  it->hAllocation = VK_NULL_HANDLE;
10429  ++m_1stNullItemsMiddleCount;
10430  m_SumFreeSize += it->size;
10431  CleanupAfterFree();
10432  return;
10433  }
10434  }
10435 
10436  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10437  {
10438  // Item from the middle of 2nd vector.
10439  VmaSuballocation refSuballoc;
10440  refSuballoc.offset = offset;
10441  // Rest of members stays uninitialized intentionally for better performance.
10442  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10443  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
10444  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
10445  if(it != suballocations2nd.end())
10446  {
10447  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10448  it->hAllocation = VK_NULL_HANDLE;
10449  ++m_2ndNullItemsCount;
10450  m_SumFreeSize += it->size;
10451  CleanupAfterFree();
10452  return;
10453  }
10454  }
10455 
10456  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10457 }
10458 
10459 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10460 {
10461  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10462  const size_t suballocCount = AccessSuballocations1st().size();
10463  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10464 }
10465 
10466 void VmaBlockMetadata_Linear::CleanupAfterFree()
10467 {
10468  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10469  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10470 
10471  if(IsEmpty())
10472  {
10473  suballocations1st.clear();
10474  suballocations2nd.clear();
10475  m_1stNullItemsBeginCount = 0;
10476  m_1stNullItemsMiddleCount = 0;
10477  m_2ndNullItemsCount = 0;
10478  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10479  }
10480  else
10481  {
10482  const size_t suballoc1stCount = suballocations1st.size();
10483  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10484  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10485 
10486  // Find more null items at the beginning of 1st vector.
10487  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10488  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10489  {
10490  ++m_1stNullItemsBeginCount;
10491  --m_1stNullItemsMiddleCount;
10492  }
10493 
10494  // Find more null items at the end of 1st vector.
10495  while(m_1stNullItemsMiddleCount > 0 &&
10496  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10497  {
10498  --m_1stNullItemsMiddleCount;
10499  suballocations1st.pop_back();
10500  }
10501 
10502  // Find more null items at the end of 2nd vector.
10503  while(m_2ndNullItemsCount > 0 &&
10504  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10505  {
10506  --m_2ndNullItemsCount;
10507  suballocations2nd.pop_back();
10508  }
10509 
10510  // Find more null items at the beginning of 2nd vector.
10511  while(m_2ndNullItemsCount > 0 &&
10512  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
10513  {
10514  --m_2ndNullItemsCount;
10515  suballocations2nd.remove(0);
10516  }
10517 
10518  if(ShouldCompact1st())
10519  {
10520  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10521  size_t srcIndex = m_1stNullItemsBeginCount;
10522  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10523  {
10524  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10525  {
10526  ++srcIndex;
10527  }
10528  if(dstIndex != srcIndex)
10529  {
10530  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10531  }
10532  ++srcIndex;
10533  }
10534  suballocations1st.resize(nonNullItemCount);
10535  m_1stNullItemsBeginCount = 0;
10536  m_1stNullItemsMiddleCount = 0;
10537  }
10538 
10539  // 2nd vector became empty.
10540  if(suballocations2nd.empty())
10541  {
10542  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10543  }
10544 
10545  // 1st vector became empty.
10546  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10547  {
10548  suballocations1st.clear();
10549  m_1stNullItemsBeginCount = 0;
10550 
10551  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10552  {
10553  // Swap 1st with 2nd. Now 2nd is empty.
10554  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10555  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10556  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10557  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10558  {
10559  ++m_1stNullItemsBeginCount;
10560  --m_1stNullItemsMiddleCount;
10561  }
10562  m_2ndNullItemsCount = 0;
10563  m_1stVectorIndex ^= 1;
10564  }
10565  }
10566  }
10567 
10568  VMA_HEAVY_ASSERT(Validate());
10569 }
10570 
10571 
10573 // class VmaBlockMetadata_Buddy
10574 
10575 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10576  VmaBlockMetadata(hAllocator),
10577  m_Root(VMA_NULL),
10578  m_AllocationCount(0),
10579  m_FreeCount(1),
10580  m_SumFreeSize(0)
10581 {
10582  memset(m_FreeList, 0, sizeof(m_FreeList));
10583 }
10584 
10585 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10586 {
10587  DeleteNode(m_Root);
10588 }
10589 
10590 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10591 {
10592  VmaBlockMetadata::Init(size);
10593 
10594  m_UsableSize = VmaPrevPow2(size);
10595  m_SumFreeSize = m_UsableSize;
10596 
10597  // Calculate m_LevelCount.
10598  m_LevelCount = 1;
10599  while(m_LevelCount < MAX_LEVELS &&
10600  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10601  {
10602  ++m_LevelCount;
10603  }
10604 
10605  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10606  rootNode->offset = 0;
10607  rootNode->type = Node::TYPE_FREE;
10608  rootNode->parent = VMA_NULL;
10609  rootNode->buddy = VMA_NULL;
10610 
10611  m_Root = rootNode;
10612  AddToFreeListFront(0, rootNode);
10613 }
10614 
10615 bool VmaBlockMetadata_Buddy::Validate() const
10616 {
10617  // Validate tree.
10618  ValidationContext ctx;
10619  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10620  {
10621  VMA_VALIDATE(false && "ValidateNode failed.");
10622  }
10623  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10624  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10625 
10626  // Validate free node lists.
10627  for(uint32_t level = 0; level < m_LevelCount; ++level)
10628  {
10629  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10630  m_FreeList[level].front->free.prev == VMA_NULL);
10631 
10632  for(Node* node = m_FreeList[level].front;
10633  node != VMA_NULL;
10634  node = node->free.next)
10635  {
10636  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10637 
10638  if(node->free.next == VMA_NULL)
10639  {
10640  VMA_VALIDATE(m_FreeList[level].back == node);
10641  }
10642  else
10643  {
10644  VMA_VALIDATE(node->free.next->free.prev == node);
10645  }
10646  }
10647  }
10648 
10649  // Validate that free lists ar higher levels are empty.
10650  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10651  {
10652  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10653  }
10654 
10655  return true;
10656 }
10657 
10658 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10659 {
10660  for(uint32_t level = 0; level < m_LevelCount; ++level)
10661  {
10662  if(m_FreeList[level].front != VMA_NULL)
10663  {
10664  return LevelToNodeSize(level);
10665  }
10666  }
10667  return 0;
10668 }
10669 
10670 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10671 {
10672  const VkDeviceSize unusableSize = GetUnusableSize();
10673 
10674  outInfo.blockCount = 1;
10675 
10676  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10677  outInfo.usedBytes = outInfo.unusedBytes = 0;
10678 
10679  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10680  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10681  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10682 
10683  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10684 
10685  if(unusableSize > 0)
10686  {
10687  ++outInfo.unusedRangeCount;
10688  outInfo.unusedBytes += unusableSize;
10689  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10690  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10691  }
10692 }
10693 
10694 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10695 {
10696  const VkDeviceSize unusableSize = GetUnusableSize();
10697 
10698  inoutStats.size += GetSize();
10699  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10700  inoutStats.allocationCount += m_AllocationCount;
10701  inoutStats.unusedRangeCount += m_FreeCount;
10702  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10703 
10704  if(unusableSize > 0)
10705  {
10706  ++inoutStats.unusedRangeCount;
10707  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10708  }
10709 }
10710 
10711 #if VMA_STATS_STRING_ENABLED
10712 
10713 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10714 {
10715  // TODO optimize
10716  VmaStatInfo stat;
10717  CalcAllocationStatInfo(stat);
10718 
10719  PrintDetailedMap_Begin(
10720  json,
10721  stat.unusedBytes,
10722  stat.allocationCount,
10723  stat.unusedRangeCount);
10724 
10725  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10726 
10727  const VkDeviceSize unusableSize = GetUnusableSize();
10728  if(unusableSize > 0)
10729  {
10730  PrintDetailedMap_UnusedRange(json,
10731  m_UsableSize, // offset
10732  unusableSize); // size
10733  }
10734 
10735  PrintDetailedMap_End(json);
10736 }
10737 
10738 #endif // #if VMA_STATS_STRING_ENABLED
10739 
10740 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10741  uint32_t currentFrameIndex,
10742  uint32_t frameInUseCount,
10743  VkDeviceSize bufferImageGranularity,
10744  VkDeviceSize allocSize,
10745  VkDeviceSize allocAlignment,
10746  bool upperAddress,
10747  VmaSuballocationType allocType,
10748  bool canMakeOtherLost,
10749  uint32_t strategy,
10750  VmaAllocationRequest* pAllocationRequest)
10751 {
10752  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10753 
10754  // Simple way to respect bufferImageGranularity. May be optimized some day.
10755  // Whenever it might be an OPTIMAL image...
10756  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10757  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10758  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10759  {
10760  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10761  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10762  }
10763 
10764  if(allocSize > m_UsableSize)
10765  {
10766  return false;
10767  }
10768 
10769  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10770  for(uint32_t level = targetLevel + 1; level--; )
10771  {
10772  for(Node* freeNode = m_FreeList[level].front;
10773  freeNode != VMA_NULL;
10774  freeNode = freeNode->free.next)
10775  {
10776  if(freeNode->offset % allocAlignment == 0)
10777  {
10778  pAllocationRequest->type = VmaAllocationRequestType::Normal;
10779  pAllocationRequest->offset = freeNode->offset;
10780  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10781  pAllocationRequest->sumItemSize = 0;
10782  pAllocationRequest->itemsToMakeLostCount = 0;
10783  pAllocationRequest->customData = (void*)(uintptr_t)level;
10784  return true;
10785  }
10786  }
10787  }
10788 
10789  return false;
10790 }
10791 
10792 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10793  uint32_t currentFrameIndex,
10794  uint32_t frameInUseCount,
10795  VmaAllocationRequest* pAllocationRequest)
10796 {
10797  /*
10798  Lost allocations are not supported in buddy allocator at the moment.
10799  Support might be added in the future.
10800  */
10801  return pAllocationRequest->itemsToMakeLostCount == 0;
10802 }
10803 
10804 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10805 {
10806  /*
10807  Lost allocations are not supported in buddy allocator at the moment.
10808  Support might be added in the future.
10809  */
10810  return 0;
10811 }
10812 
10813 void VmaBlockMetadata_Buddy::Alloc(
10814  const VmaAllocationRequest& request,
10815  VmaSuballocationType type,
10816  VkDeviceSize allocSize,
10817  VmaAllocation hAllocation)
10818 {
10819  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
10820 
10821  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10822  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10823 
10824  Node* currNode = m_FreeList[currLevel].front;
10825  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10826  while(currNode->offset != request.offset)
10827  {
10828  currNode = currNode->free.next;
10829  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10830  }
10831 
10832  // Go down, splitting free nodes.
10833  while(currLevel < targetLevel)
10834  {
10835  // currNode is already first free node at currLevel.
10836  // Remove it from list of free nodes at this currLevel.
10837  RemoveFromFreeList(currLevel, currNode);
10838 
10839  const uint32_t childrenLevel = currLevel + 1;
10840 
10841  // Create two free sub-nodes.
10842  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10843  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10844 
10845  leftChild->offset = currNode->offset;
10846  leftChild->type = Node::TYPE_FREE;
10847  leftChild->parent = currNode;
10848  leftChild->buddy = rightChild;
10849 
10850  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10851  rightChild->type = Node::TYPE_FREE;
10852  rightChild->parent = currNode;
10853  rightChild->buddy = leftChild;
10854 
10855  // Convert current currNode to split type.
10856  currNode->type = Node::TYPE_SPLIT;
10857  currNode->split.leftChild = leftChild;
10858 
10859  // Add child nodes to free list. Order is important!
10860  AddToFreeListFront(childrenLevel, rightChild);
10861  AddToFreeListFront(childrenLevel, leftChild);
10862 
10863  ++m_FreeCount;
10864  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
10865  ++currLevel;
10866  currNode = m_FreeList[currLevel].front;
10867 
10868  /*
10869  We can be sure that currNode, as left child of node previously split,
10870  also fullfills the alignment requirement.
10871  */
10872  }
10873 
10874  // Remove from free list.
10875  VMA_ASSERT(currLevel == targetLevel &&
10876  currNode != VMA_NULL &&
10877  currNode->type == Node::TYPE_FREE);
10878  RemoveFromFreeList(currLevel, currNode);
10879 
10880  // Convert to allocation node.
10881  currNode->type = Node::TYPE_ALLOCATION;
10882  currNode->allocation.alloc = hAllocation;
10883 
10884  ++m_AllocationCount;
10885  --m_FreeCount;
10886  m_SumFreeSize -= allocSize;
10887 }
10888 
10889 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10890 {
10891  if(node->type == Node::TYPE_SPLIT)
10892  {
10893  DeleteNode(node->split.leftChild->buddy);
10894  DeleteNode(node->split.leftChild);
10895  }
10896 
10897  vma_delete(GetAllocationCallbacks(), node);
10898 }
10899 
10900 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
10901 {
10902  VMA_VALIDATE(level < m_LevelCount);
10903  VMA_VALIDATE(curr->parent == parent);
10904  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10905  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10906  switch(curr->type)
10907  {
10908  case Node::TYPE_FREE:
10909  // curr->free.prev, next are validated separately.
10910  ctx.calculatedSumFreeSize += levelNodeSize;
10911  ++ctx.calculatedFreeCount;
10912  break;
10913  case Node::TYPE_ALLOCATION:
10914  ++ctx.calculatedAllocationCount;
10915  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10916  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10917  break;
10918  case Node::TYPE_SPLIT:
10919  {
10920  const uint32_t childrenLevel = level + 1;
10921  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10922  const Node* const leftChild = curr->split.leftChild;
10923  VMA_VALIDATE(leftChild != VMA_NULL);
10924  VMA_VALIDATE(leftChild->offset == curr->offset);
10925  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10926  {
10927  VMA_VALIDATE(false && "ValidateNode for left child failed.");
10928  }
10929  const Node* const rightChild = leftChild->buddy;
10930  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10931  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10932  {
10933  VMA_VALIDATE(false && "ValidateNode for right child failed.");
10934  }
10935  }
10936  break;
10937  default:
10938  return false;
10939  }
10940 
10941  return true;
10942 }
10943 
10944 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
10945 {
10946  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
10947  uint32_t level = 0;
10948  VkDeviceSize currLevelNodeSize = m_UsableSize;
10949  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10950  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10951  {
10952  ++level;
10953  currLevelNodeSize = nextLevelNodeSize;
10954  nextLevelNodeSize = currLevelNodeSize >> 1;
10955  }
10956  return level;
10957 }
10958 
10959 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
10960 {
10961  // Find node and level.
10962  Node* node = m_Root;
10963  VkDeviceSize nodeOffset = 0;
10964  uint32_t level = 0;
10965  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
10966  while(node->type == Node::TYPE_SPLIT)
10967  {
10968  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
10969  if(offset < nodeOffset + nextLevelSize)
10970  {
10971  node = node->split.leftChild;
10972  }
10973  else
10974  {
10975  node = node->split.leftChild->buddy;
10976  nodeOffset += nextLevelSize;
10977  }
10978  ++level;
10979  levelNodeSize = nextLevelSize;
10980  }
10981 
10982  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
10983  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
10984 
10985  ++m_FreeCount;
10986  --m_AllocationCount;
10987  m_SumFreeSize += alloc->GetSize();
10988 
10989  node->type = Node::TYPE_FREE;
10990 
10991  // Join free nodes if possible.
10992  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
10993  {
10994  RemoveFromFreeList(level, node->buddy);
10995  Node* const parent = node->parent;
10996 
10997  vma_delete(GetAllocationCallbacks(), node->buddy);
10998  vma_delete(GetAllocationCallbacks(), node);
10999  parent->type = Node::TYPE_FREE;
11000 
11001  node = parent;
11002  --level;
11003  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
11004  --m_FreeCount;
11005  }
11006 
11007  AddToFreeListFront(level, node);
11008 }
11009 
11010 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
11011 {
11012  switch(node->type)
11013  {
11014  case Node::TYPE_FREE:
11015  ++outInfo.unusedRangeCount;
11016  outInfo.unusedBytes += levelNodeSize;
11017  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
11018  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
11019  break;
11020  case Node::TYPE_ALLOCATION:
11021  {
11022  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11023  ++outInfo.allocationCount;
11024  outInfo.usedBytes += allocSize;
11025  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
11026  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
11027 
11028  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
11029  if(unusedRangeSize > 0)
11030  {
11031  ++outInfo.unusedRangeCount;
11032  outInfo.unusedBytes += unusedRangeSize;
11033  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
11034  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
11035  }
11036  }
11037  break;
11038  case Node::TYPE_SPLIT:
11039  {
11040  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11041  const Node* const leftChild = node->split.leftChild;
11042  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
11043  const Node* const rightChild = leftChild->buddy;
11044  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
11045  }
11046  break;
11047  default:
11048  VMA_ASSERT(0);
11049  }
11050 }
11051 
11052 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
11053 {
11054  VMA_ASSERT(node->type == Node::TYPE_FREE);
11055 
11056  // List is empty.
11057  Node* const frontNode = m_FreeList[level].front;
11058  if(frontNode == VMA_NULL)
11059  {
11060  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
11061  node->free.prev = node->free.next = VMA_NULL;
11062  m_FreeList[level].front = m_FreeList[level].back = node;
11063  }
11064  else
11065  {
11066  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
11067  node->free.prev = VMA_NULL;
11068  node->free.next = frontNode;
11069  frontNode->free.prev = node;
11070  m_FreeList[level].front = node;
11071  }
11072 }
11073 
11074 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
11075 {
11076  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
11077 
11078  // It is at the front.
11079  if(node->free.prev == VMA_NULL)
11080  {
11081  VMA_ASSERT(m_FreeList[level].front == node);
11082  m_FreeList[level].front = node->free.next;
11083  }
11084  else
11085  {
11086  Node* const prevFreeNode = node->free.prev;
11087  VMA_ASSERT(prevFreeNode->free.next == node);
11088  prevFreeNode->free.next = node->free.next;
11089  }
11090 
11091  // It is at the back.
11092  if(node->free.next == VMA_NULL)
11093  {
11094  VMA_ASSERT(m_FreeList[level].back == node);
11095  m_FreeList[level].back = node->free.prev;
11096  }
11097  else
11098  {
11099  Node* const nextFreeNode = node->free.next;
11100  VMA_ASSERT(nextFreeNode->free.prev == node);
11101  nextFreeNode->free.prev = node->free.prev;
11102  }
11103 }
11104 
11105 #if VMA_STATS_STRING_ENABLED
11106 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
11107 {
11108  switch(node->type)
11109  {
11110  case Node::TYPE_FREE:
11111  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
11112  break;
11113  case Node::TYPE_ALLOCATION:
11114  {
11115  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
11116  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11117  if(allocSize < levelNodeSize)
11118  {
11119  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
11120  }
11121  }
11122  break;
11123  case Node::TYPE_SPLIT:
11124  {
11125  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11126  const Node* const leftChild = node->split.leftChild;
11127  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
11128  const Node* const rightChild = leftChild->buddy;
11129  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
11130  }
11131  break;
11132  default:
11133  VMA_ASSERT(0);
11134  }
11135 }
11136 #endif // #if VMA_STATS_STRING_ENABLED
11137 
11138 
11140 // class VmaDeviceMemoryBlock
11141 
11142 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
11143  m_pMetadata(VMA_NULL),
11144  m_MemoryTypeIndex(UINT32_MAX),
11145  m_Id(0),
11146  m_hMemory(VK_NULL_HANDLE),
11147  m_MapCount(0),
11148  m_pMappedData(VMA_NULL)
11149 {
11150 }
11151 
11152 void VmaDeviceMemoryBlock::Init(
11153  VmaAllocator hAllocator,
11154  VmaPool hParentPool,
11155  uint32_t newMemoryTypeIndex,
11156  VkDeviceMemory newMemory,
11157  VkDeviceSize newSize,
11158  uint32_t id,
11159  uint32_t algorithm)
11160 {
11161  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11162 
11163  m_hParentPool = hParentPool;
11164  m_MemoryTypeIndex = newMemoryTypeIndex;
11165  m_Id = id;
11166  m_hMemory = newMemory;
11167 
11168  switch(algorithm)
11169  {
11171  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
11172  break;
11174  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11175  break;
11176  default:
11177  VMA_ASSERT(0);
11178  // Fall-through.
11179  case 0:
11180  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11181  }
11182  m_pMetadata->Init(newSize);
11183 }
11184 
11185 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11186 {
11187  // This is the most important assert in the entire library.
11188  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11189  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11190 
11191  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11192  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11193  m_hMemory = VK_NULL_HANDLE;
11194 
11195  vma_delete(allocator, m_pMetadata);
11196  m_pMetadata = VMA_NULL;
11197 }
11198 
11199 bool VmaDeviceMemoryBlock::Validate() const
11200 {
11201  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11202  (m_pMetadata->GetSize() != 0));
11203 
11204  return m_pMetadata->Validate();
11205 }
11206 
11207 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11208 {
11209  void* pData = nullptr;
11210  VkResult res = Map(hAllocator, 1, &pData);
11211  if(res != VK_SUCCESS)
11212  {
11213  return res;
11214  }
11215 
11216  res = m_pMetadata->CheckCorruption(pData);
11217 
11218  Unmap(hAllocator, 1);
11219 
11220  return res;
11221 }
11222 
11223 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11224 {
11225  if(count == 0)
11226  {
11227  return VK_SUCCESS;
11228  }
11229 
11230  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11231  if(m_MapCount != 0)
11232  {
11233  m_MapCount += count;
11234  VMA_ASSERT(m_pMappedData != VMA_NULL);
11235  if(ppData != VMA_NULL)
11236  {
11237  *ppData = m_pMappedData;
11238  }
11239  return VK_SUCCESS;
11240  }
11241  else
11242  {
11243  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11244  hAllocator->m_hDevice,
11245  m_hMemory,
11246  0, // offset
11247  VK_WHOLE_SIZE,
11248  0, // flags
11249  &m_pMappedData);
11250  if(result == VK_SUCCESS)
11251  {
11252  if(ppData != VMA_NULL)
11253  {
11254  *ppData = m_pMappedData;
11255  }
11256  m_MapCount = count;
11257  }
11258  return result;
11259  }
11260 }
11261 
11262 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11263 {
11264  if(count == 0)
11265  {
11266  return;
11267  }
11268 
11269  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11270  if(m_MapCount >= count)
11271  {
11272  m_MapCount -= count;
11273  if(m_MapCount == 0)
11274  {
11275  m_pMappedData = VMA_NULL;
11276  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11277  }
11278  }
11279  else
11280  {
11281  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11282  }
11283 }
11284 
11285 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11286 {
11287  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11288  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11289 
11290  void* pData;
11291  VkResult res = Map(hAllocator, 1, &pData);
11292  if(res != VK_SUCCESS)
11293  {
11294  return res;
11295  }
11296 
11297  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11298  VmaWriteMagicValue(pData, allocOffset + allocSize);
11299 
11300  Unmap(hAllocator, 1);
11301 
11302  return VK_SUCCESS;
11303 }
11304 
11305 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11306 {
11307  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11308  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11309 
11310  void* pData;
11311  VkResult res = Map(hAllocator, 1, &pData);
11312  if(res != VK_SUCCESS)
11313  {
11314  return res;
11315  }
11316 
11317  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11318  {
11319  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11320  }
11321  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11322  {
11323  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11324  }
11325 
11326  Unmap(hAllocator, 1);
11327 
11328  return VK_SUCCESS;
11329 }
11330 
11331 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11332  const VmaAllocator hAllocator,
11333  const VmaAllocation hAllocation,
11334  VkBuffer hBuffer)
11335 {
11336  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11337  hAllocation->GetBlock() == this);
11338  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11339  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11340  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
11341  hAllocator->m_hDevice,
11342  hBuffer,
11343  m_hMemory,
11344  hAllocation->GetOffset());
11345 }
11346 
11347 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11348  const VmaAllocator hAllocator,
11349  const VmaAllocation hAllocation,
11350  VkImage hImage)
11351 {
11352  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11353  hAllocation->GetBlock() == this);
11354  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11355  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11356  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
11357  hAllocator->m_hDevice,
11358  hImage,
11359  m_hMemory,
11360  hAllocation->GetOffset());
11361 }
11362 
11363 static void InitStatInfo(VmaStatInfo& outInfo)
11364 {
11365  memset(&outInfo, 0, sizeof(outInfo));
11366  outInfo.allocationSizeMin = UINT64_MAX;
11367  outInfo.unusedRangeSizeMin = UINT64_MAX;
11368 }
11369 
11370 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11371 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11372 {
11373  inoutInfo.blockCount += srcInfo.blockCount;
11374  inoutInfo.allocationCount += srcInfo.allocationCount;
11375  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11376  inoutInfo.usedBytes += srcInfo.usedBytes;
11377  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11378  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11379  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11380  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11381  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11382 }
11383 
11384 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11385 {
11386  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11387  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11388  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11389  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11390 }
11391 
11392 VmaPool_T::VmaPool_T(
11393  VmaAllocator hAllocator,
11394  const VmaPoolCreateInfo& createInfo,
11395  VkDeviceSize preferredBlockSize) :
11396  m_BlockVector(
11397  hAllocator,
11398  this, // hParentPool
11399  createInfo.memoryTypeIndex,
11400  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11401  createInfo.minBlockCount,
11402  createInfo.maxBlockCount,
11403  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11404  createInfo.frameInUseCount,
11405  true, // isCustomPool
11406  createInfo.blockSize != 0, // explicitBlockSize
11407  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11408  m_Id(0)
11409 {
11410 }
11411 
11412 VmaPool_T::~VmaPool_T()
11413 {
11414 }
11415 
11416 #if VMA_STATS_STRING_ENABLED
11417 
11418 #endif // #if VMA_STATS_STRING_ENABLED
11419 
11420 VmaBlockVector::VmaBlockVector(
11421  VmaAllocator hAllocator,
11422  VmaPool hParentPool,
11423  uint32_t memoryTypeIndex,
11424  VkDeviceSize preferredBlockSize,
11425  size_t minBlockCount,
11426  size_t maxBlockCount,
11427  VkDeviceSize bufferImageGranularity,
11428  uint32_t frameInUseCount,
11429  bool isCustomPool,
11430  bool explicitBlockSize,
11431  uint32_t algorithm) :
11432  m_hAllocator(hAllocator),
11433  m_hParentPool(hParentPool),
11434  m_MemoryTypeIndex(memoryTypeIndex),
11435  m_PreferredBlockSize(preferredBlockSize),
11436  m_MinBlockCount(minBlockCount),
11437  m_MaxBlockCount(maxBlockCount),
11438  m_BufferImageGranularity(bufferImageGranularity),
11439  m_FrameInUseCount(frameInUseCount),
11440  m_IsCustomPool(isCustomPool),
11441  m_ExplicitBlockSize(explicitBlockSize),
11442  m_Algorithm(algorithm),
11443  m_HasEmptyBlock(false),
11444  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11445  m_NextBlockId(0)
11446 {
11447 }
11448 
11449 VmaBlockVector::~VmaBlockVector()
11450 {
11451  for(size_t i = m_Blocks.size(); i--; )
11452  {
11453  m_Blocks[i]->Destroy(m_hAllocator);
11454  vma_delete(m_hAllocator, m_Blocks[i]);
11455  }
11456 }
11457 
11458 VkResult VmaBlockVector::CreateMinBlocks()
11459 {
11460  for(size_t i = 0; i < m_MinBlockCount; ++i)
11461  {
11462  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11463  if(res != VK_SUCCESS)
11464  {
11465  return res;
11466  }
11467  }
11468  return VK_SUCCESS;
11469 }
11470 
11471 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11472 {
11473  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11474 
11475  const size_t blockCount = m_Blocks.size();
11476 
11477  pStats->size = 0;
11478  pStats->unusedSize = 0;
11479  pStats->allocationCount = 0;
11480  pStats->unusedRangeCount = 0;
11481  pStats->unusedRangeSizeMax = 0;
11482  pStats->blockCount = blockCount;
11483 
11484  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11485  {
11486  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11487  VMA_ASSERT(pBlock);
11488  VMA_HEAVY_ASSERT(pBlock->Validate());
11489  pBlock->m_pMetadata->AddPoolStats(*pStats);
11490  }
11491 }
11492 
11493 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11494 {
11495  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11496  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11497  (VMA_DEBUG_MARGIN > 0) &&
11498  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
11499  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11500 }
11501 
11502 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11503 
11504 VkResult VmaBlockVector::Allocate(
11505  uint32_t currentFrameIndex,
11506  VkDeviceSize size,
11507  VkDeviceSize alignment,
11508  const VmaAllocationCreateInfo& createInfo,
11509  VmaSuballocationType suballocType,
11510  size_t allocationCount,
11511  VmaAllocation* pAllocations)
11512 {
11513  size_t allocIndex;
11514  VkResult res = VK_SUCCESS;
11515 
11516  if(IsCorruptionDetectionEnabled())
11517  {
11518  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11519  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11520  }
11521 
11522  {
11523  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11524  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11525  {
11526  res = AllocatePage(
11527  currentFrameIndex,
11528  size,
11529  alignment,
11530  createInfo,
11531  suballocType,
11532  pAllocations + allocIndex);
11533  if(res != VK_SUCCESS)
11534  {
11535  break;
11536  }
11537  }
11538  }
11539 
11540  if(res != VK_SUCCESS)
11541  {
11542  // Free all already created allocations.
11543  while(allocIndex--)
11544  {
11545  Free(pAllocations[allocIndex]);
11546  }
11547  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11548  }
11549 
11550  return res;
11551 }
11552 
11553 VkResult VmaBlockVector::AllocatePage(
11554  uint32_t currentFrameIndex,
11555  VkDeviceSize size,
11556  VkDeviceSize alignment,
11557  const VmaAllocationCreateInfo& createInfo,
11558  VmaSuballocationType suballocType,
11559  VmaAllocation* pAllocation)
11560 {
11561  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11562  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11563  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11564  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11565  const bool canCreateNewBlock =
11566  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11567  (m_Blocks.size() < m_MaxBlockCount);
11568  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11569 
11570  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11571  // Which in turn is available only when maxBlockCount = 1.
11572  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11573  {
11574  canMakeOtherLost = false;
11575  }
11576 
11577  // Upper address can only be used with linear allocator and within single memory block.
11578  if(isUpperAddress &&
11579  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11580  {
11581  return VK_ERROR_FEATURE_NOT_PRESENT;
11582  }
11583 
11584  // Validate strategy.
11585  switch(strategy)
11586  {
11587  case 0:
11589  break;
11593  break;
11594  default:
11595  return VK_ERROR_FEATURE_NOT_PRESENT;
11596  }
11597 
11598  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11599  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11600  {
11601  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11602  }
11603 
11604  /*
11605  Under certain condition, this whole section can be skipped for optimization, so
11606  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11607  e.g. for custom pools with linear algorithm.
11608  */
11609  if(!canMakeOtherLost || canCreateNewBlock)
11610  {
11611  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11612  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11614 
11615  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11616  {
11617  // Use only last block.
11618  if(!m_Blocks.empty())
11619  {
11620  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11621  VMA_ASSERT(pCurrBlock);
11622  VkResult res = AllocateFromBlock(
11623  pCurrBlock,
11624  currentFrameIndex,
11625  size,
11626  alignment,
11627  allocFlagsCopy,
11628  createInfo.pUserData,
11629  suballocType,
11630  strategy,
11631  pAllocation);
11632  if(res == VK_SUCCESS)
11633  {
11634  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11635  return VK_SUCCESS;
11636  }
11637  }
11638  }
11639  else
11640  {
11642  {
11643  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11644  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11645  {
11646  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11647  VMA_ASSERT(pCurrBlock);
11648  VkResult res = AllocateFromBlock(
11649  pCurrBlock,
11650  currentFrameIndex,
11651  size,
11652  alignment,
11653  allocFlagsCopy,
11654  createInfo.pUserData,
11655  suballocType,
11656  strategy,
11657  pAllocation);
11658  if(res == VK_SUCCESS)
11659  {
11660  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11661  return VK_SUCCESS;
11662  }
11663  }
11664  }
11665  else // WORST_FIT, FIRST_FIT
11666  {
11667  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11668  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11669  {
11670  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11671  VMA_ASSERT(pCurrBlock);
11672  VkResult res = AllocateFromBlock(
11673  pCurrBlock,
11674  currentFrameIndex,
11675  size,
11676  alignment,
11677  allocFlagsCopy,
11678  createInfo.pUserData,
11679  suballocType,
11680  strategy,
11681  pAllocation);
11682  if(res == VK_SUCCESS)
11683  {
11684  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11685  return VK_SUCCESS;
11686  }
11687  }
11688  }
11689  }
11690 
11691  // 2. Try to create new block.
11692  if(canCreateNewBlock)
11693  {
11694  // Calculate optimal size for new block.
11695  VkDeviceSize newBlockSize = m_PreferredBlockSize;
11696  uint32_t newBlockSizeShift = 0;
11697  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11698 
11699  if(!m_ExplicitBlockSize)
11700  {
11701  // Allocate 1/8, 1/4, 1/2 as first blocks.
11702  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11703  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11704  {
11705  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11706  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11707  {
11708  newBlockSize = smallerNewBlockSize;
11709  ++newBlockSizeShift;
11710  }
11711  else
11712  {
11713  break;
11714  }
11715  }
11716  }
11717 
11718  size_t newBlockIndex = 0;
11719  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11720  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11721  if(!m_ExplicitBlockSize)
11722  {
11723  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11724  {
11725  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11726  if(smallerNewBlockSize >= size)
11727  {
11728  newBlockSize = smallerNewBlockSize;
11729  ++newBlockSizeShift;
11730  res = CreateBlock(newBlockSize, &newBlockIndex);
11731  }
11732  else
11733  {
11734  break;
11735  }
11736  }
11737  }
11738 
11739  if(res == VK_SUCCESS)
11740  {
11741  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11742  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11743 
11744  res = AllocateFromBlock(
11745  pBlock,
11746  currentFrameIndex,
11747  size,
11748  alignment,
11749  allocFlagsCopy,
11750  createInfo.pUserData,
11751  suballocType,
11752  strategy,
11753  pAllocation);
11754  if(res == VK_SUCCESS)
11755  {
11756  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
11757  return VK_SUCCESS;
11758  }
11759  else
11760  {
11761  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11762  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11763  }
11764  }
11765  }
11766  }
11767 
11768  // 3. Try to allocate from existing blocks with making other allocations lost.
11769  if(canMakeOtherLost)
11770  {
11771  uint32_t tryIndex = 0;
11772  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11773  {
11774  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11775  VmaAllocationRequest bestRequest = {};
11776  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11777 
11778  // 1. Search existing allocations.
11780  {
11781  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11782  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11783  {
11784  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11785  VMA_ASSERT(pCurrBlock);
11786  VmaAllocationRequest currRequest = {};
11787  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11788  currentFrameIndex,
11789  m_FrameInUseCount,
11790  m_BufferImageGranularity,
11791  size,
11792  alignment,
11793  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11794  suballocType,
11795  canMakeOtherLost,
11796  strategy,
11797  &currRequest))
11798  {
11799  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11800  if(pBestRequestBlock == VMA_NULL ||
11801  currRequestCost < bestRequestCost)
11802  {
11803  pBestRequestBlock = pCurrBlock;
11804  bestRequest = currRequest;
11805  bestRequestCost = currRequestCost;
11806 
11807  if(bestRequestCost == 0)
11808  {
11809  break;
11810  }
11811  }
11812  }
11813  }
11814  }
11815  else // WORST_FIT, FIRST_FIT
11816  {
11817  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11818  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11819  {
11820  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11821  VMA_ASSERT(pCurrBlock);
11822  VmaAllocationRequest currRequest = {};
11823  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11824  currentFrameIndex,
11825  m_FrameInUseCount,
11826  m_BufferImageGranularity,
11827  size,
11828  alignment,
11829  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11830  suballocType,
11831  canMakeOtherLost,
11832  strategy,
11833  &currRequest))
11834  {
11835  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11836  if(pBestRequestBlock == VMA_NULL ||
11837  currRequestCost < bestRequestCost ||
11839  {
11840  pBestRequestBlock = pCurrBlock;
11841  bestRequest = currRequest;
11842  bestRequestCost = currRequestCost;
11843 
11844  if(bestRequestCost == 0 ||
11846  {
11847  break;
11848  }
11849  }
11850  }
11851  }
11852  }
11853 
11854  if(pBestRequestBlock != VMA_NULL)
11855  {
11856  if(mapped)
11857  {
11858  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11859  if(res != VK_SUCCESS)
11860  {
11861  return res;
11862  }
11863  }
11864 
11865  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11866  currentFrameIndex,
11867  m_FrameInUseCount,
11868  &bestRequest))
11869  {
11870  // We no longer have an empty Allocation.
11871  if(pBestRequestBlock->m_pMetadata->IsEmpty())
11872  {
11873  m_HasEmptyBlock = false;
11874  }
11875  // Allocate from this pBlock.
11876  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
11877  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
11878  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
11879  (*pAllocation)->InitBlockAllocation(
11880  pBestRequestBlock,
11881  bestRequest.offset,
11882  alignment,
11883  size,
11884  suballocType,
11885  mapped,
11886  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11887  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11888  VMA_DEBUG_LOG(" Returned from existing block");
11889  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
11890  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11891  {
11892  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11893  }
11894  if(IsCorruptionDetectionEnabled())
11895  {
11896  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11897  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11898  }
11899  return VK_SUCCESS;
11900  }
11901  // else: Some allocations must have been touched while we are here. Next try.
11902  }
11903  else
11904  {
11905  // Could not find place in any of the blocks - break outer loop.
11906  break;
11907  }
11908  }
11909  /* Maximum number of tries exceeded - a very unlike event when many other
11910  threads are simultaneously touching allocations making it impossible to make
11911  lost at the same time as we try to allocate. */
11912  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11913  {
11914  return VK_ERROR_TOO_MANY_OBJECTS;
11915  }
11916  }
11917 
11918  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11919 }
11920 
11921 void VmaBlockVector::Free(
11922  VmaAllocation hAllocation)
11923 {
11924  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11925 
11926  // Scope for lock.
11927  {
11928  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11929 
11930  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11931 
11932  if(IsCorruptionDetectionEnabled())
11933  {
11934  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11935  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
11936  }
11937 
11938  if(hAllocation->IsPersistentMap())
11939  {
11940  pBlock->Unmap(m_hAllocator, 1);
11941  }
11942 
11943  pBlock->m_pMetadata->Free(hAllocation);
11944  VMA_HEAVY_ASSERT(pBlock->Validate());
11945 
11946  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
11947 
11948  // pBlock became empty after this deallocation.
11949  if(pBlock->m_pMetadata->IsEmpty())
11950  {
11951  // Already has empty Allocation. We don't want to have two, so delete this one.
11952  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11953  {
11954  pBlockToDelete = pBlock;
11955  Remove(pBlock);
11956  }
11957  // We now have first empty block.
11958  else
11959  {
11960  m_HasEmptyBlock = true;
11961  }
11962  }
11963  // pBlock didn't become empty, but we have another empty block - find and free that one.
11964  // (This is optional, heuristics.)
11965  else if(m_HasEmptyBlock)
11966  {
11967  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
11968  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
11969  {
11970  pBlockToDelete = pLastBlock;
11971  m_Blocks.pop_back();
11972  m_HasEmptyBlock = false;
11973  }
11974  }
11975 
11976  IncrementallySortBlocks();
11977  }
11978 
11979  // Destruction of a free Allocation. Deferred until this point, outside of mutex
11980  // lock, for performance reason.
11981  if(pBlockToDelete != VMA_NULL)
11982  {
11983  VMA_DEBUG_LOG(" Deleted empty allocation");
11984  pBlockToDelete->Destroy(m_hAllocator);
11985  vma_delete(m_hAllocator, pBlockToDelete);
11986  }
11987 }
11988 
11989 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
11990 {
11991  VkDeviceSize result = 0;
11992  for(size_t i = m_Blocks.size(); i--; )
11993  {
11994  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
11995  if(result >= m_PreferredBlockSize)
11996  {
11997  break;
11998  }
11999  }
12000  return result;
12001 }
12002 
12003 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
12004 {
12005  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12006  {
12007  if(m_Blocks[blockIndex] == pBlock)
12008  {
12009  VmaVectorRemove(m_Blocks, blockIndex);
12010  return;
12011  }
12012  }
12013  VMA_ASSERT(0);
12014 }
12015 
12016 void VmaBlockVector::IncrementallySortBlocks()
12017 {
12018  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12019  {
12020  // Bubble sort only until first swap.
12021  for(size_t i = 1; i < m_Blocks.size(); ++i)
12022  {
12023  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
12024  {
12025  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
12026  return;
12027  }
12028  }
12029  }
12030 }
12031 
12032 VkResult VmaBlockVector::AllocateFromBlock(
12033  VmaDeviceMemoryBlock* pBlock,
12034  uint32_t currentFrameIndex,
12035  VkDeviceSize size,
12036  VkDeviceSize alignment,
12037  VmaAllocationCreateFlags allocFlags,
12038  void* pUserData,
12039  VmaSuballocationType suballocType,
12040  uint32_t strategy,
12041  VmaAllocation* pAllocation)
12042 {
12043  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
12044  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12045  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12046  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12047 
12048  VmaAllocationRequest currRequest = {};
12049  if(pBlock->m_pMetadata->CreateAllocationRequest(
12050  currentFrameIndex,
12051  m_FrameInUseCount,
12052  m_BufferImageGranularity,
12053  size,
12054  alignment,
12055  isUpperAddress,
12056  suballocType,
12057  false, // canMakeOtherLost
12058  strategy,
12059  &currRequest))
12060  {
12061  // Allocate from pCurrBlock.
12062  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
12063 
12064  if(mapped)
12065  {
12066  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12067  if(res != VK_SUCCESS)
12068  {
12069  return res;
12070  }
12071  }
12072 
12073  // We no longer have an empty Allocation.
12074  if(pBlock->m_pMetadata->IsEmpty())
12075  {
12076  m_HasEmptyBlock = false;
12077  }
12078 
12079  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
12080  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
12081  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
12082  (*pAllocation)->InitBlockAllocation(
12083  pBlock,
12084  currRequest.offset,
12085  alignment,
12086  size,
12087  suballocType,
12088  mapped,
12089  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12090  VMA_HEAVY_ASSERT(pBlock->Validate());
12091  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12092  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12093  {
12094  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12095  }
12096  if(IsCorruptionDetectionEnabled())
12097  {
12098  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
12099  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12100  }
12101  return VK_SUCCESS;
12102  }
12103  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12104 }
12105 
12106 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12107 {
12108  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12109  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12110  allocInfo.allocationSize = blockSize;
12111  VkDeviceMemory mem = VK_NULL_HANDLE;
12112  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12113  if(res < 0)
12114  {
12115  return res;
12116  }
12117 
12118  // New VkDeviceMemory successfully created.
12119 
12120  // Create new Allocation for it.
12121  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12122  pBlock->Init(
12123  m_hAllocator,
12124  m_hParentPool,
12125  m_MemoryTypeIndex,
12126  mem,
12127  allocInfo.allocationSize,
12128  m_NextBlockId++,
12129  m_Algorithm);
12130 
12131  m_Blocks.push_back(pBlock);
12132  if(pNewBlockIndex != VMA_NULL)
12133  {
12134  *pNewBlockIndex = m_Blocks.size() - 1;
12135  }
12136 
12137  return VK_SUCCESS;
12138 }
12139 
12140 void VmaBlockVector::ApplyDefragmentationMovesCpu(
12141  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12142  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
12143 {
12144  const size_t blockCount = m_Blocks.size();
12145  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
12146 
12147  enum BLOCK_FLAG
12148  {
12149  BLOCK_FLAG_USED = 0x00000001,
12150  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
12151  };
12152 
12153  struct BlockInfo
12154  {
12155  uint32_t flags;
12156  void* pMappedData;
12157  };
12158  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
12159  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
12160  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
12161 
12162  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12163  const size_t moveCount = moves.size();
12164  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12165  {
12166  const VmaDefragmentationMove& move = moves[moveIndex];
12167  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
12168  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
12169  }
12170 
12171  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12172 
12173  // Go over all blocks. Get mapped pointer or map if necessary.
12174  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12175  {
12176  BlockInfo& currBlockInfo = blockInfo[blockIndex];
12177  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12178  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12179  {
12180  currBlockInfo.pMappedData = pBlock->GetMappedData();
12181  // It is not originally mapped - map it.
12182  if(currBlockInfo.pMappedData == VMA_NULL)
12183  {
12184  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12185  if(pDefragCtx->res == VK_SUCCESS)
12186  {
12187  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12188  }
12189  }
12190  }
12191  }
12192 
12193  // Go over all moves. Do actual data transfer.
12194  if(pDefragCtx->res == VK_SUCCESS)
12195  {
12196  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12197  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12198 
12199  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12200  {
12201  const VmaDefragmentationMove& move = moves[moveIndex];
12202 
12203  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12204  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12205 
12206  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12207 
12208  // Invalidate source.
12209  if(isNonCoherent)
12210  {
12211  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12212  memRange.memory = pSrcBlock->GetDeviceMemory();
12213  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12214  memRange.size = VMA_MIN(
12215  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12216  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12217  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12218  }
12219 
12220  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12221  memmove(
12222  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12223  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12224  static_cast<size_t>(move.size));
12225 
12226  if(IsCorruptionDetectionEnabled())
12227  {
12228  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12229  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12230  }
12231 
12232  // Flush destination.
12233  if(isNonCoherent)
12234  {
12235  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12236  memRange.memory = pDstBlock->GetDeviceMemory();
12237  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12238  memRange.size = VMA_MIN(
12239  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12240  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12241  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12242  }
12243  }
12244  }
12245 
12246  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12247  // Regardless of pCtx->res == VK_SUCCESS.
12248  for(size_t blockIndex = blockCount; blockIndex--; )
12249  {
12250  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12251  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12252  {
12253  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12254  pBlock->Unmap(m_hAllocator, 1);
12255  }
12256  }
12257 }
12258 
12259 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12260  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12261  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12262  VkCommandBuffer commandBuffer)
12263 {
12264  const size_t blockCount = m_Blocks.size();
12265 
12266  pDefragCtx->blockContexts.resize(blockCount);
12267  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12268 
12269  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12270  const size_t moveCount = moves.size();
12271  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12272  {
12273  const VmaDefragmentationMove& move = moves[moveIndex];
12274  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12275  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12276  }
12277 
12278  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12279 
12280  // Go over all blocks. Create and bind buffer for whole block if necessary.
12281  {
12282  VkBufferCreateInfo bufCreateInfo;
12283  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
12284 
12285  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12286  {
12287  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12288  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12289  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12290  {
12291  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12292  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12293  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12294  if(pDefragCtx->res == VK_SUCCESS)
12295  {
12296  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12297  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12298  }
12299  }
12300  }
12301  }
12302 
12303  // Go over all moves. Post data transfer commands to command buffer.
12304  if(pDefragCtx->res == VK_SUCCESS)
12305  {
12306  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12307  {
12308  const VmaDefragmentationMove& move = moves[moveIndex];
12309 
12310  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12311  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12312 
12313  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12314 
12315  VkBufferCopy region = {
12316  move.srcOffset,
12317  move.dstOffset,
12318  move.size };
12319  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12320  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12321  }
12322  }
12323 
12324  // Save buffers to defrag context for later destruction.
12325  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12326  {
12327  pDefragCtx->res = VK_NOT_READY;
12328  }
12329 }
12330 
12331 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12332 {
12333  m_HasEmptyBlock = false;
12334  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12335  {
12336  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12337  if(pBlock->m_pMetadata->IsEmpty())
12338  {
12339  if(m_Blocks.size() > m_MinBlockCount)
12340  {
12341  if(pDefragmentationStats != VMA_NULL)
12342  {
12343  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12344  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12345  }
12346 
12347  VmaVectorRemove(m_Blocks, blockIndex);
12348  pBlock->Destroy(m_hAllocator);
12349  vma_delete(m_hAllocator, pBlock);
12350  }
12351  else
12352  {
12353  m_HasEmptyBlock = true;
12354  }
12355  }
12356  }
12357 }
12358 
12359 #if VMA_STATS_STRING_ENABLED
12360 
12361 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12362 {
12363  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12364 
12365  json.BeginObject();
12366 
12367  if(m_IsCustomPool)
12368  {
12369  json.WriteString("MemoryTypeIndex");
12370  json.WriteNumber(m_MemoryTypeIndex);
12371 
12372  json.WriteString("BlockSize");
12373  json.WriteNumber(m_PreferredBlockSize);
12374 
12375  json.WriteString("BlockCount");
12376  json.BeginObject(true);
12377  if(m_MinBlockCount > 0)
12378  {
12379  json.WriteString("Min");
12380  json.WriteNumber((uint64_t)m_MinBlockCount);
12381  }
12382  if(m_MaxBlockCount < SIZE_MAX)
12383  {
12384  json.WriteString("Max");
12385  json.WriteNumber((uint64_t)m_MaxBlockCount);
12386  }
12387  json.WriteString("Cur");
12388  json.WriteNumber((uint64_t)m_Blocks.size());
12389  json.EndObject();
12390 
12391  if(m_FrameInUseCount > 0)
12392  {
12393  json.WriteString("FrameInUseCount");
12394  json.WriteNumber(m_FrameInUseCount);
12395  }
12396 
12397  if(m_Algorithm != 0)
12398  {
12399  json.WriteString("Algorithm");
12400  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12401  }
12402  }
12403  else
12404  {
12405  json.WriteString("PreferredBlockSize");
12406  json.WriteNumber(m_PreferredBlockSize);
12407  }
12408 
12409  json.WriteString("Blocks");
12410  json.BeginObject();
12411  for(size_t i = 0; i < m_Blocks.size(); ++i)
12412  {
12413  json.BeginString();
12414  json.ContinueString(m_Blocks[i]->GetId());
12415  json.EndString();
12416 
12417  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12418  }
12419  json.EndObject();
12420 
12421  json.EndObject();
12422 }
12423 
12424 #endif // #if VMA_STATS_STRING_ENABLED
12425 
12426 void VmaBlockVector::Defragment(
12427  class VmaBlockVectorDefragmentationContext* pCtx,
12428  VmaDefragmentationStats* pStats,
12429  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12430  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12431  VkCommandBuffer commandBuffer)
12432 {
12433  pCtx->res = VK_SUCCESS;
12434 
12435  const VkMemoryPropertyFlags memPropFlags =
12436  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12437  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12438  const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
12439 
12440  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12441  isHostVisible;
12442  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12443  !IsCorruptionDetectionEnabled() &&
12444  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
12445 
12446  // There are options to defragment this memory type.
12447  if(canDefragmentOnCpu || canDefragmentOnGpu)
12448  {
12449  bool defragmentOnGpu;
12450  // There is only one option to defragment this memory type.
12451  if(canDefragmentOnGpu != canDefragmentOnCpu)
12452  {
12453  defragmentOnGpu = canDefragmentOnGpu;
12454  }
12455  // Both options are available: Heuristics to choose the best one.
12456  else
12457  {
12458  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12459  m_hAllocator->IsIntegratedGpu();
12460  }
12461 
12462  bool overlappingMoveSupported = !defragmentOnGpu;
12463 
12464  if(m_hAllocator->m_UseMutex)
12465  {
12466  m_Mutex.LockWrite();
12467  pCtx->mutexLocked = true;
12468  }
12469 
12470  pCtx->Begin(overlappingMoveSupported);
12471 
12472  // Defragment.
12473 
12474  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12475  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12476  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12477  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12478  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12479 
12480  // Accumulate statistics.
12481  if(pStats != VMA_NULL)
12482  {
12483  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12484  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12485  pStats->bytesMoved += bytesMoved;
12486  pStats->allocationsMoved += allocationsMoved;
12487  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12488  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12489  if(defragmentOnGpu)
12490  {
12491  maxGpuBytesToMove -= bytesMoved;
12492  maxGpuAllocationsToMove -= allocationsMoved;
12493  }
12494  else
12495  {
12496  maxCpuBytesToMove -= bytesMoved;
12497  maxCpuAllocationsToMove -= allocationsMoved;
12498  }
12499  }
12500 
12501  if(pCtx->res >= VK_SUCCESS)
12502  {
12503  if(defragmentOnGpu)
12504  {
12505  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12506  }
12507  else
12508  {
12509  ApplyDefragmentationMovesCpu(pCtx, moves);
12510  }
12511  }
12512  }
12513 }
12514 
12515 void VmaBlockVector::DefragmentationEnd(
12516  class VmaBlockVectorDefragmentationContext* pCtx,
12517  VmaDefragmentationStats* pStats)
12518 {
12519  // Destroy buffers.
12520  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12521  {
12522  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12523  if(blockCtx.hBuffer)
12524  {
12525  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12526  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12527  }
12528  }
12529 
12530  if(pCtx->res >= VK_SUCCESS)
12531  {
12532  FreeEmptyBlocks(pStats);
12533  }
12534 
12535  if(pCtx->mutexLocked)
12536  {
12537  VMA_ASSERT(m_hAllocator->m_UseMutex);
12538  m_Mutex.UnlockWrite();
12539  }
12540 }
12541 
12542 size_t VmaBlockVector::CalcAllocationCount() const
12543 {
12544  size_t result = 0;
12545  for(size_t i = 0; i < m_Blocks.size(); ++i)
12546  {
12547  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12548  }
12549  return result;
12550 }
12551 
12552 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12553 {
12554  if(m_BufferImageGranularity == 1)
12555  {
12556  return false;
12557  }
12558  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12559  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12560  {
12561  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12562  VMA_ASSERT(m_Algorithm == 0);
12563  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12564  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12565  {
12566  return true;
12567  }
12568  }
12569  return false;
12570 }
12571 
12572 void VmaBlockVector::MakePoolAllocationsLost(
12573  uint32_t currentFrameIndex,
12574  size_t* pLostAllocationCount)
12575 {
12576  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12577  size_t lostAllocationCount = 0;
12578  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12579  {
12580  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12581  VMA_ASSERT(pBlock);
12582  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12583  }
12584  if(pLostAllocationCount != VMA_NULL)
12585  {
12586  *pLostAllocationCount = lostAllocationCount;
12587  }
12588 }
12589 
12590 VkResult VmaBlockVector::CheckCorruption()
12591 {
12592  if(!IsCorruptionDetectionEnabled())
12593  {
12594  return VK_ERROR_FEATURE_NOT_PRESENT;
12595  }
12596 
12597  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12598  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12599  {
12600  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12601  VMA_ASSERT(pBlock);
12602  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12603  if(res != VK_SUCCESS)
12604  {
12605  return res;
12606  }
12607  }
12608  return VK_SUCCESS;
12609 }
12610 
12611 void VmaBlockVector::AddStats(VmaStats* pStats)
12612 {
12613  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12614  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12615 
12616  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12617 
12618  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12619  {
12620  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12621  VMA_ASSERT(pBlock);
12622  VMA_HEAVY_ASSERT(pBlock->Validate());
12623  VmaStatInfo allocationStatInfo;
12624  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12625  VmaAddStatInfo(pStats->total, allocationStatInfo);
12626  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12627  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12628  }
12629 }
12630 
12632 // VmaDefragmentationAlgorithm_Generic members definition
12633 
12634 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12635  VmaAllocator hAllocator,
12636  VmaBlockVector* pBlockVector,
12637  uint32_t currentFrameIndex,
12638  bool overlappingMoveSupported) :
12639  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12640  m_AllocationCount(0),
12641  m_AllAllocations(false),
12642  m_BytesMoved(0),
12643  m_AllocationsMoved(0),
12644  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12645 {
12646  // Create block info for each block.
12647  const size_t blockCount = m_pBlockVector->m_Blocks.size();
12648  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12649  {
12650  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12651  pBlockInfo->m_OriginalBlockIndex = blockIndex;
12652  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12653  m_Blocks.push_back(pBlockInfo);
12654  }
12655 
12656  // Sort them by m_pBlock pointer value.
12657  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12658 }
12659 
12660 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12661 {
12662  for(size_t i = m_Blocks.size(); i--; )
12663  {
12664  vma_delete(m_hAllocator, m_Blocks[i]);
12665  }
12666 }
12667 
12668 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12669 {
12670  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12671  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12672  {
12673  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12674  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12675  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12676  {
12677  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12678  (*it)->m_Allocations.push_back(allocInfo);
12679  }
12680  else
12681  {
12682  VMA_ASSERT(0);
12683  }
12684 
12685  ++m_AllocationCount;
12686  }
12687 }
12688 
12689 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12690  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12691  VkDeviceSize maxBytesToMove,
12692  uint32_t maxAllocationsToMove)
12693 {
12694  if(m_Blocks.empty())
12695  {
12696  return VK_SUCCESS;
12697  }
12698 
12699  // This is a choice based on research.
12700  // Option 1:
12701  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12702  // Option 2:
12703  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12704  // Option 3:
12705  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12706 
12707  size_t srcBlockMinIndex = 0;
12708  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12709  /*
12710  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12711  {
12712  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12713  if(blocksWithNonMovableCount > 0)
12714  {
12715  srcBlockMinIndex = blocksWithNonMovableCount - 1;
12716  }
12717  }
12718  */
12719 
12720  size_t srcBlockIndex = m_Blocks.size() - 1;
12721  size_t srcAllocIndex = SIZE_MAX;
12722  for(;;)
12723  {
12724  // 1. Find next allocation to move.
12725  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12726  // 1.2. Then start from last to first m_Allocations.
12727  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12728  {
12729  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12730  {
12731  // Finished: no more allocations to process.
12732  if(srcBlockIndex == srcBlockMinIndex)
12733  {
12734  return VK_SUCCESS;
12735  }
12736  else
12737  {
12738  --srcBlockIndex;
12739  srcAllocIndex = SIZE_MAX;
12740  }
12741  }
12742  else
12743  {
12744  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12745  }
12746  }
12747 
12748  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12749  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12750 
12751  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12752  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12753  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12754  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12755 
12756  // 2. Try to find new place for this allocation in preceding or current block.
12757  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12758  {
12759  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12760  VmaAllocationRequest dstAllocRequest;
12761  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12762  m_CurrentFrameIndex,
12763  m_pBlockVector->GetFrameInUseCount(),
12764  m_pBlockVector->GetBufferImageGranularity(),
12765  size,
12766  alignment,
12767  false, // upperAddress
12768  suballocType,
12769  false, // canMakeOtherLost
12770  strategy,
12771  &dstAllocRequest) &&
12772  MoveMakesSense(
12773  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12774  {
12775  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12776 
12777  // Reached limit on number of allocations or bytes to move.
12778  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12779  (m_BytesMoved + size > maxBytesToMove))
12780  {
12781  return VK_SUCCESS;
12782  }
12783 
12784  VmaDefragmentationMove move;
12785  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12786  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12787  move.srcOffset = srcOffset;
12788  move.dstOffset = dstAllocRequest.offset;
12789  move.size = size;
12790  moves.push_back(move);
12791 
12792  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12793  dstAllocRequest,
12794  suballocType,
12795  size,
12796  allocInfo.m_hAllocation);
12797  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12798 
12799  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12800 
12801  if(allocInfo.m_pChanged != VMA_NULL)
12802  {
12803  *allocInfo.m_pChanged = VK_TRUE;
12804  }
12805 
12806  ++m_AllocationsMoved;
12807  m_BytesMoved += size;
12808 
12809  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12810 
12811  break;
12812  }
12813  }
12814 
12815  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
12816 
12817  if(srcAllocIndex > 0)
12818  {
12819  --srcAllocIndex;
12820  }
12821  else
12822  {
12823  if(srcBlockIndex > 0)
12824  {
12825  --srcBlockIndex;
12826  srcAllocIndex = SIZE_MAX;
12827  }
12828  else
12829  {
12830  return VK_SUCCESS;
12831  }
12832  }
12833  }
12834 }
12835 
12836 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
12837 {
12838  size_t result = 0;
12839  for(size_t i = 0; i < m_Blocks.size(); ++i)
12840  {
12841  if(m_Blocks[i]->m_HasNonMovableAllocations)
12842  {
12843  ++result;
12844  }
12845  }
12846  return result;
12847 }
12848 
12849 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12850  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12851  VkDeviceSize maxBytesToMove,
12852  uint32_t maxAllocationsToMove)
12853 {
12854  if(!m_AllAllocations && m_AllocationCount == 0)
12855  {
12856  return VK_SUCCESS;
12857  }
12858 
12859  const size_t blockCount = m_Blocks.size();
12860  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12861  {
12862  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12863 
12864  if(m_AllAllocations)
12865  {
12866  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12867  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12868  it != pMetadata->m_Suballocations.end();
12869  ++it)
12870  {
12871  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12872  {
12873  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12874  pBlockInfo->m_Allocations.push_back(allocInfo);
12875  }
12876  }
12877  }
12878 
12879  pBlockInfo->CalcHasNonMovableAllocations();
12880 
12881  // This is a choice based on research.
12882  // Option 1:
12883  pBlockInfo->SortAllocationsByOffsetDescending();
12884  // Option 2:
12885  //pBlockInfo->SortAllocationsBySizeDescending();
12886  }
12887 
12888  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
12889  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12890 
12891  // This is a choice based on research.
12892  const uint32_t roundCount = 2;
12893 
12894  // Execute defragmentation rounds (the main part).
12895  VkResult result = VK_SUCCESS;
12896  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12897  {
12898  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12899  }
12900 
12901  return result;
12902 }
12903 
12904 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12905  size_t dstBlockIndex, VkDeviceSize dstOffset,
12906  size_t srcBlockIndex, VkDeviceSize srcOffset)
12907 {
12908  if(dstBlockIndex < srcBlockIndex)
12909  {
12910  return true;
12911  }
12912  if(dstBlockIndex > srcBlockIndex)
12913  {
12914  return false;
12915  }
12916  if(dstOffset < srcOffset)
12917  {
12918  return true;
12919  }
12920  return false;
12921 }
12922 
12924 // VmaDefragmentationAlgorithm_Fast
12925 
12926 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12927  VmaAllocator hAllocator,
12928  VmaBlockVector* pBlockVector,
12929  uint32_t currentFrameIndex,
12930  bool overlappingMoveSupported) :
12931  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12932  m_OverlappingMoveSupported(overlappingMoveSupported),
12933  m_AllocationCount(0),
12934  m_AllAllocations(false),
12935  m_BytesMoved(0),
12936  m_AllocationsMoved(0),
12937  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12938 {
12939  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12940 
12941 }
12942 
12943 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12944 {
12945 }
12946 
12947 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12948  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12949  VkDeviceSize maxBytesToMove,
12950  uint32_t maxAllocationsToMove)
12951 {
12952  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12953 
12954  const size_t blockCount = m_pBlockVector->GetBlockCount();
12955  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12956  {
12957  return VK_SUCCESS;
12958  }
12959 
12960  PreprocessMetadata();
12961 
12962  // Sort blocks in order from most destination.
12963 
12964  m_BlockInfos.resize(blockCount);
12965  for(size_t i = 0; i < blockCount; ++i)
12966  {
12967  m_BlockInfos[i].origBlockIndex = i;
12968  }
12969 
12970  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
12971  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
12972  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
12973  });
12974 
12975  // THE MAIN ALGORITHM
12976 
12977  FreeSpaceDatabase freeSpaceDb;
12978 
12979  size_t dstBlockInfoIndex = 0;
12980  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12981  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12982  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12983  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
12984  VkDeviceSize dstOffset = 0;
12985 
12986  bool end = false;
12987  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
12988  {
12989  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
12990  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
12991  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
12992  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
12993  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
12994  {
12995  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
12996  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
12997  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
12998  if(m_AllocationsMoved == maxAllocationsToMove ||
12999  m_BytesMoved + srcAllocSize > maxBytesToMove)
13000  {
13001  end = true;
13002  break;
13003  }
13004  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
13005 
13006  // Try to place it in one of free spaces from the database.
13007  size_t freeSpaceInfoIndex;
13008  VkDeviceSize dstAllocOffset;
13009  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
13010  freeSpaceInfoIndex, dstAllocOffset))
13011  {
13012  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
13013  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
13014  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
13015 
13016  // Same block
13017  if(freeSpaceInfoIndex == srcBlockInfoIndex)
13018  {
13019  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13020 
13021  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13022 
13023  VmaSuballocation suballoc = *srcSuballocIt;
13024  suballoc.offset = dstAllocOffset;
13025  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
13026  m_BytesMoved += srcAllocSize;
13027  ++m_AllocationsMoved;
13028 
13029  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13030  ++nextSuballocIt;
13031  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13032  srcSuballocIt = nextSuballocIt;
13033 
13034  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13035 
13036  VmaDefragmentationMove move = {
13037  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13038  srcAllocOffset, dstAllocOffset,
13039  srcAllocSize };
13040  moves.push_back(move);
13041  }
13042  // Different block
13043  else
13044  {
13045  // MOVE OPTION 2: Move the allocation to a different block.
13046 
13047  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
13048 
13049  VmaSuballocation suballoc = *srcSuballocIt;
13050  suballoc.offset = dstAllocOffset;
13051  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
13052  m_BytesMoved += srcAllocSize;
13053  ++m_AllocationsMoved;
13054 
13055  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13056  ++nextSuballocIt;
13057  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13058  srcSuballocIt = nextSuballocIt;
13059 
13060  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13061 
13062  VmaDefragmentationMove move = {
13063  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13064  srcAllocOffset, dstAllocOffset,
13065  srcAllocSize };
13066  moves.push_back(move);
13067  }
13068  }
13069  else
13070  {
13071  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
13072 
13073  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
13074  while(dstBlockInfoIndex < srcBlockInfoIndex &&
13075  dstAllocOffset + srcAllocSize > dstBlockSize)
13076  {
13077  // But before that, register remaining free space at the end of dst block.
13078  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
13079 
13080  ++dstBlockInfoIndex;
13081  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13082  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13083  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13084  dstBlockSize = pDstMetadata->GetSize();
13085  dstOffset = 0;
13086  dstAllocOffset = 0;
13087  }
13088 
13089  // Same block
13090  if(dstBlockInfoIndex == srcBlockInfoIndex)
13091  {
13092  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13093 
13094  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
13095 
13096  bool skipOver = overlap;
13097  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
13098  {
13099  // If destination and source place overlap, skip if it would move it
13100  // by only < 1/64 of its size.
13101  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
13102  }
13103 
13104  if(skipOver)
13105  {
13106  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
13107 
13108  dstOffset = srcAllocOffset + srcAllocSize;
13109  ++srcSuballocIt;
13110  }
13111  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13112  else
13113  {
13114  srcSuballocIt->offset = dstAllocOffset;
13115  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
13116  dstOffset = dstAllocOffset + srcAllocSize;
13117  m_BytesMoved += srcAllocSize;
13118  ++m_AllocationsMoved;
13119  ++srcSuballocIt;
13120  VmaDefragmentationMove move = {
13121  srcOrigBlockIndex, dstOrigBlockIndex,
13122  srcAllocOffset, dstAllocOffset,
13123  srcAllocSize };
13124  moves.push_back(move);
13125  }
13126  }
13127  // Different block
13128  else
13129  {
13130  // MOVE OPTION 2: Move the allocation to a different block.
13131 
13132  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
13133  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
13134 
13135  VmaSuballocation suballoc = *srcSuballocIt;
13136  suballoc.offset = dstAllocOffset;
13137  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
13138  dstOffset = dstAllocOffset + srcAllocSize;
13139  m_BytesMoved += srcAllocSize;
13140  ++m_AllocationsMoved;
13141 
13142  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13143  ++nextSuballocIt;
13144  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13145  srcSuballocIt = nextSuballocIt;
13146 
13147  pDstMetadata->m_Suballocations.push_back(suballoc);
13148 
13149  VmaDefragmentationMove move = {
13150  srcOrigBlockIndex, dstOrigBlockIndex,
13151  srcAllocOffset, dstAllocOffset,
13152  srcAllocSize };
13153  moves.push_back(move);
13154  }
13155  }
13156  }
13157  }
13158 
13159  m_BlockInfos.clear();
13160 
13161  PostprocessMetadata();
13162 
13163  return VK_SUCCESS;
13164 }
13165 
13166 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
13167 {
13168  const size_t blockCount = m_pBlockVector->GetBlockCount();
13169  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13170  {
13171  VmaBlockMetadata_Generic* const pMetadata =
13172  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13173  pMetadata->m_FreeCount = 0;
13174  pMetadata->m_SumFreeSize = pMetadata->GetSize();
13175  pMetadata->m_FreeSuballocationsBySize.clear();
13176  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13177  it != pMetadata->m_Suballocations.end(); )
13178  {
13179  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13180  {
13181  VmaSuballocationList::iterator nextIt = it;
13182  ++nextIt;
13183  pMetadata->m_Suballocations.erase(it);
13184  it = nextIt;
13185  }
13186  else
13187  {
13188  ++it;
13189  }
13190  }
13191  }
13192 }
13193 
13194 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13195 {
13196  const size_t blockCount = m_pBlockVector->GetBlockCount();
13197  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13198  {
13199  VmaBlockMetadata_Generic* const pMetadata =
13200  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13201  const VkDeviceSize blockSize = pMetadata->GetSize();
13202 
13203  // No allocations in this block - entire area is free.
13204  if(pMetadata->m_Suballocations.empty())
13205  {
13206  pMetadata->m_FreeCount = 1;
13207  //pMetadata->m_SumFreeSize is already set to blockSize.
13208  VmaSuballocation suballoc = {
13209  0, // offset
13210  blockSize, // size
13211  VMA_NULL, // hAllocation
13212  VMA_SUBALLOCATION_TYPE_FREE };
13213  pMetadata->m_Suballocations.push_back(suballoc);
13214  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13215  }
13216  // There are some allocations in this block.
13217  else
13218  {
13219  VkDeviceSize offset = 0;
13220  VmaSuballocationList::iterator it;
13221  for(it = pMetadata->m_Suballocations.begin();
13222  it != pMetadata->m_Suballocations.end();
13223  ++it)
13224  {
13225  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13226  VMA_ASSERT(it->offset >= offset);
13227 
13228  // Need to insert preceding free space.
13229  if(it->offset > offset)
13230  {
13231  ++pMetadata->m_FreeCount;
13232  const VkDeviceSize freeSize = it->offset - offset;
13233  VmaSuballocation suballoc = {
13234  offset, // offset
13235  freeSize, // size
13236  VMA_NULL, // hAllocation
13237  VMA_SUBALLOCATION_TYPE_FREE };
13238  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13239  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13240  {
13241  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13242  }
13243  }
13244 
13245  pMetadata->m_SumFreeSize -= it->size;
13246  offset = it->offset + it->size;
13247  }
13248 
13249  // Need to insert trailing free space.
13250  if(offset < blockSize)
13251  {
13252  ++pMetadata->m_FreeCount;
13253  const VkDeviceSize freeSize = blockSize - offset;
13254  VmaSuballocation suballoc = {
13255  offset, // offset
13256  freeSize, // size
13257  VMA_NULL, // hAllocation
13258  VMA_SUBALLOCATION_TYPE_FREE };
13259  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13260  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13261  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13262  {
13263  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13264  }
13265  }
13266 
13267  VMA_SORT(
13268  pMetadata->m_FreeSuballocationsBySize.begin(),
13269  pMetadata->m_FreeSuballocationsBySize.end(),
13270  VmaSuballocationItemSizeLess());
13271  }
13272 
13273  VMA_HEAVY_ASSERT(pMetadata->Validate());
13274  }
13275 }
13276 
13277 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13278 {
13279  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13280  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13281  while(it != pMetadata->m_Suballocations.end())
13282  {
13283  if(it->offset < suballoc.offset)
13284  {
13285  ++it;
13286  }
13287  }
13288  pMetadata->m_Suballocations.insert(it, suballoc);
13289 }
13290 
13292 // VmaBlockVectorDefragmentationContext
13293 
13294 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13295  VmaAllocator hAllocator,
13296  VmaPool hCustomPool,
13297  VmaBlockVector* pBlockVector,
13298  uint32_t currFrameIndex,
13299  uint32_t algorithmFlags) :
13300  res(VK_SUCCESS),
13301  mutexLocked(false),
13302  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13303  m_hAllocator(hAllocator),
13304  m_hCustomPool(hCustomPool),
13305  m_pBlockVector(pBlockVector),
13306  m_CurrFrameIndex(currFrameIndex),
13307  m_AlgorithmFlags(algorithmFlags),
13308  m_pAlgorithm(VMA_NULL),
13309  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13310  m_AllAllocations(false)
13311 {
13312 }
13313 
13314 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13315 {
13316  vma_delete(m_hAllocator, m_pAlgorithm);
13317 }
13318 
13319 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13320 {
13321  AllocInfo info = { hAlloc, pChanged };
13322  m_Allocations.push_back(info);
13323 }
13324 
13325 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13326 {
13327  const bool allAllocations = m_AllAllocations ||
13328  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13329 
13330  /********************************
13331  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13332  ********************************/
13333 
13334  /*
13335  Fast algorithm is supported only when certain criteria are met:
13336  - VMA_DEBUG_MARGIN is 0.
13337  - All allocations in this block vector are moveable.
13338  - There is no possibility of image/buffer granularity conflict.
13339  */
13340  if(VMA_DEBUG_MARGIN == 0 &&
13341  allAllocations &&
13342  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13343  {
13344  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13345  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13346  }
13347  else
13348  {
13349  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13350  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13351  }
13352 
13353  if(allAllocations)
13354  {
13355  m_pAlgorithm->AddAll();
13356  }
13357  else
13358  {
13359  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13360  {
13361  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13362  }
13363  }
13364 }
13365 
13367 // VmaDefragmentationContext
13368 
13369 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13370  VmaAllocator hAllocator,
13371  uint32_t currFrameIndex,
13372  uint32_t flags,
13373  VmaDefragmentationStats* pStats) :
13374  m_hAllocator(hAllocator),
13375  m_CurrFrameIndex(currFrameIndex),
13376  m_Flags(flags),
13377  m_pStats(pStats),
13378  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13379 {
13380  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13381 }
13382 
13383 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13384 {
13385  for(size_t i = m_CustomPoolContexts.size(); i--; )
13386  {
13387  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13388  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13389  vma_delete(m_hAllocator, pBlockVectorCtx);
13390  }
13391  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13392  {
13393  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13394  if(pBlockVectorCtx)
13395  {
13396  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13397  vma_delete(m_hAllocator, pBlockVectorCtx);
13398  }
13399  }
13400 }
13401 
13402 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13403 {
13404  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13405  {
13406  VmaPool pool = pPools[poolIndex];
13407  VMA_ASSERT(pool);
13408  // Pools with algorithm other than default are not defragmented.
13409  if(pool->m_BlockVector.GetAlgorithm() == 0)
13410  {
13411  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13412 
13413  for(size_t i = m_CustomPoolContexts.size(); i--; )
13414  {
13415  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13416  {
13417  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13418  break;
13419  }
13420  }
13421 
13422  if(!pBlockVectorDefragCtx)
13423  {
13424  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13425  m_hAllocator,
13426  pool,
13427  &pool->m_BlockVector,
13428  m_CurrFrameIndex,
13429  m_Flags);
13430  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13431  }
13432 
13433  pBlockVectorDefragCtx->AddAll();
13434  }
13435  }
13436 }
13437 
13438 void VmaDefragmentationContext_T::AddAllocations(
13439  uint32_t allocationCount,
13440  VmaAllocation* pAllocations,
13441  VkBool32* pAllocationsChanged)
13442 {
13443  // Dispatch pAllocations among defragmentators. Create them when necessary.
13444  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13445  {
13446  const VmaAllocation hAlloc = pAllocations[allocIndex];
13447  VMA_ASSERT(hAlloc);
13448  // DedicatedAlloc cannot be defragmented.
13449  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13450  // Lost allocation cannot be defragmented.
13451  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13452  {
13453  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13454 
13455  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
13456  // This allocation belongs to custom pool.
13457  if(hAllocPool != VK_NULL_HANDLE)
13458  {
13459  // Pools with algorithm other than default are not defragmented.
13460  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13461  {
13462  for(size_t i = m_CustomPoolContexts.size(); i--; )
13463  {
13464  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13465  {
13466  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13467  break;
13468  }
13469  }
13470  if(!pBlockVectorDefragCtx)
13471  {
13472  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13473  m_hAllocator,
13474  hAllocPool,
13475  &hAllocPool->m_BlockVector,
13476  m_CurrFrameIndex,
13477  m_Flags);
13478  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13479  }
13480  }
13481  }
13482  // This allocation belongs to default pool.
13483  else
13484  {
13485  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13486  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13487  if(!pBlockVectorDefragCtx)
13488  {
13489  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13490  m_hAllocator,
13491  VMA_NULL, // hCustomPool
13492  m_hAllocator->m_pBlockVectors[memTypeIndex],
13493  m_CurrFrameIndex,
13494  m_Flags);
13495  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13496  }
13497  }
13498 
13499  if(pBlockVectorDefragCtx)
13500  {
13501  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13502  &pAllocationsChanged[allocIndex] : VMA_NULL;
13503  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13504  }
13505  }
13506  }
13507 }
13508 
13509 VkResult VmaDefragmentationContext_T::Defragment(
13510  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13511  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13512  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13513 {
13514  if(pStats)
13515  {
13516  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13517  }
13518 
13519  if(commandBuffer == VK_NULL_HANDLE)
13520  {
13521  maxGpuBytesToMove = 0;
13522  maxGpuAllocationsToMove = 0;
13523  }
13524 
13525  VkResult res = VK_SUCCESS;
13526 
13527  // Process default pools.
13528  for(uint32_t memTypeIndex = 0;
13529  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13530  ++memTypeIndex)
13531  {
13532  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13533  if(pBlockVectorCtx)
13534  {
13535  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13536  pBlockVectorCtx->GetBlockVector()->Defragment(
13537  pBlockVectorCtx,
13538  pStats,
13539  maxCpuBytesToMove, maxCpuAllocationsToMove,
13540  maxGpuBytesToMove, maxGpuAllocationsToMove,
13541  commandBuffer);
13542  if(pBlockVectorCtx->res != VK_SUCCESS)
13543  {
13544  res = pBlockVectorCtx->res;
13545  }
13546  }
13547  }
13548 
13549  // Process custom pools.
13550  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13551  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13552  ++customCtxIndex)
13553  {
13554  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13555  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13556  pBlockVectorCtx->GetBlockVector()->Defragment(
13557  pBlockVectorCtx,
13558  pStats,
13559  maxCpuBytesToMove, maxCpuAllocationsToMove,
13560  maxGpuBytesToMove, maxGpuAllocationsToMove,
13561  commandBuffer);
13562  if(pBlockVectorCtx->res != VK_SUCCESS)
13563  {
13564  res = pBlockVectorCtx->res;
13565  }
13566  }
13567 
13568  return res;
13569 }
13570 
13572 // VmaRecorder
13573 
13574 #if VMA_RECORDING_ENABLED
13575 
13576 VmaRecorder::VmaRecorder() :
13577  m_UseMutex(true),
13578  m_Flags(0),
13579  m_File(VMA_NULL),
13580  m_Freq(INT64_MAX),
13581  m_StartCounter(INT64_MAX)
13582 {
13583 }
13584 
13585 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13586 {
13587  m_UseMutex = useMutex;
13588  m_Flags = settings.flags;
13589 
13590  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13591  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13592 
13593  // Open file for writing.
13594  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13595  if(err != 0)
13596  {
13597  return VK_ERROR_INITIALIZATION_FAILED;
13598  }
13599 
13600  // Write header.
13601  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13602  fprintf(m_File, "%s\n", "1,5");
13603 
13604  return VK_SUCCESS;
13605 }
13606 
13607 VmaRecorder::~VmaRecorder()
13608 {
13609  if(m_File != VMA_NULL)
13610  {
13611  fclose(m_File);
13612  }
13613 }
13614 
13615 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13616 {
13617  CallParams callParams;
13618  GetBasicParams(callParams);
13619 
13620  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13621  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13622  Flush();
13623 }
13624 
13625 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13626 {
13627  CallParams callParams;
13628  GetBasicParams(callParams);
13629 
13630  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13631  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13632  Flush();
13633 }
13634 
13635 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13636 {
13637  CallParams callParams;
13638  GetBasicParams(callParams);
13639 
13640  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13641  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13642  createInfo.memoryTypeIndex,
13643  createInfo.flags,
13644  createInfo.blockSize,
13645  (uint64_t)createInfo.minBlockCount,
13646  (uint64_t)createInfo.maxBlockCount,
13647  createInfo.frameInUseCount,
13648  pool);
13649  Flush();
13650 }
13651 
13652 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13653 {
13654  CallParams callParams;
13655  GetBasicParams(callParams);
13656 
13657  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13658  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13659  pool);
13660  Flush();
13661 }
13662 
13663 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13664  const VkMemoryRequirements& vkMemReq,
13665  const VmaAllocationCreateInfo& createInfo,
13666  VmaAllocation allocation)
13667 {
13668  CallParams callParams;
13669  GetBasicParams(callParams);
13670 
13671  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13672  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13673  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13674  vkMemReq.size,
13675  vkMemReq.alignment,
13676  vkMemReq.memoryTypeBits,
13677  createInfo.flags,
13678  createInfo.usage,
13679  createInfo.requiredFlags,
13680  createInfo.preferredFlags,
13681  createInfo.memoryTypeBits,
13682  createInfo.pool,
13683  allocation,
13684  userDataStr.GetString());
13685  Flush();
13686 }
13687 
13688 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
13689  const VkMemoryRequirements& vkMemReq,
13690  const VmaAllocationCreateInfo& createInfo,
13691  uint64_t allocationCount,
13692  const VmaAllocation* pAllocations)
13693 {
13694  CallParams callParams;
13695  GetBasicParams(callParams);
13696 
13697  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13698  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13699  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
13700  vkMemReq.size,
13701  vkMemReq.alignment,
13702  vkMemReq.memoryTypeBits,
13703  createInfo.flags,
13704  createInfo.usage,
13705  createInfo.requiredFlags,
13706  createInfo.preferredFlags,
13707  createInfo.memoryTypeBits,
13708  createInfo.pool);
13709  PrintPointerList(allocationCount, pAllocations);
13710  fprintf(m_File, ",%s\n", userDataStr.GetString());
13711  Flush();
13712 }
13713 
13714 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13715  const VkMemoryRequirements& vkMemReq,
13716  bool requiresDedicatedAllocation,
13717  bool prefersDedicatedAllocation,
13718  const VmaAllocationCreateInfo& createInfo,
13719  VmaAllocation allocation)
13720 {
13721  CallParams callParams;
13722  GetBasicParams(callParams);
13723 
13724  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13725  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13726  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13727  vkMemReq.size,
13728  vkMemReq.alignment,
13729  vkMemReq.memoryTypeBits,
13730  requiresDedicatedAllocation ? 1 : 0,
13731  prefersDedicatedAllocation ? 1 : 0,
13732  createInfo.flags,
13733  createInfo.usage,
13734  createInfo.requiredFlags,
13735  createInfo.preferredFlags,
13736  createInfo.memoryTypeBits,
13737  createInfo.pool,
13738  allocation,
13739  userDataStr.GetString());
13740  Flush();
13741 }
13742 
13743 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13744  const VkMemoryRequirements& vkMemReq,
13745  bool requiresDedicatedAllocation,
13746  bool prefersDedicatedAllocation,
13747  const VmaAllocationCreateInfo& createInfo,
13748  VmaAllocation allocation)
13749 {
13750  CallParams callParams;
13751  GetBasicParams(callParams);
13752 
13753  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13754  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13755  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13756  vkMemReq.size,
13757  vkMemReq.alignment,
13758  vkMemReq.memoryTypeBits,
13759  requiresDedicatedAllocation ? 1 : 0,
13760  prefersDedicatedAllocation ? 1 : 0,
13761  createInfo.flags,
13762  createInfo.usage,
13763  createInfo.requiredFlags,
13764  createInfo.preferredFlags,
13765  createInfo.memoryTypeBits,
13766  createInfo.pool,
13767  allocation,
13768  userDataStr.GetString());
13769  Flush();
13770 }
13771 
13772 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13773  VmaAllocation allocation)
13774 {
13775  CallParams callParams;
13776  GetBasicParams(callParams);
13777 
13778  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13779  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13780  allocation);
13781  Flush();
13782 }
13783 
13784 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
13785  uint64_t allocationCount,
13786  const VmaAllocation* pAllocations)
13787 {
13788  CallParams callParams;
13789  GetBasicParams(callParams);
13790 
13791  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13792  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
13793  PrintPointerList(allocationCount, pAllocations);
13794  fprintf(m_File, "\n");
13795  Flush();
13796 }
13797 
13798 void VmaRecorder::RecordResizeAllocation(
13799  uint32_t frameIndex,
13800  VmaAllocation allocation,
13801  VkDeviceSize newSize)
13802 {
13803  CallParams callParams;
13804  GetBasicParams(callParams);
13805 
13806  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13807  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
13808  allocation, newSize);
13809  Flush();
13810 }
13811 
13812 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13813  VmaAllocation allocation,
13814  const void* pUserData)
13815 {
13816  CallParams callParams;
13817  GetBasicParams(callParams);
13818 
13819  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13820  UserDataString userDataStr(
13821  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
13822  pUserData);
13823  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13824  allocation,
13825  userDataStr.GetString());
13826  Flush();
13827 }
13828 
13829 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13830  VmaAllocation allocation)
13831 {
13832  CallParams callParams;
13833  GetBasicParams(callParams);
13834 
13835  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13836  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13837  allocation);
13838  Flush();
13839 }
13840 
13841 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13842  VmaAllocation allocation)
13843 {
13844  CallParams callParams;
13845  GetBasicParams(callParams);
13846 
13847  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13848  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13849  allocation);
13850  Flush();
13851 }
13852 
13853 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13854  VmaAllocation allocation)
13855 {
13856  CallParams callParams;
13857  GetBasicParams(callParams);
13858 
13859  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13860  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13861  allocation);
13862  Flush();
13863 }
13864 
13865 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13866  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13867 {
13868  CallParams callParams;
13869  GetBasicParams(callParams);
13870 
13871  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13872  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13873  allocation,
13874  offset,
13875  size);
13876  Flush();
13877 }
13878 
13879 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13880  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13881 {
13882  CallParams callParams;
13883  GetBasicParams(callParams);
13884 
13885  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13886  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13887  allocation,
13888  offset,
13889  size);
13890  Flush();
13891 }
13892 
13893 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13894  const VkBufferCreateInfo& bufCreateInfo,
13895  const VmaAllocationCreateInfo& allocCreateInfo,
13896  VmaAllocation allocation)
13897 {
13898  CallParams callParams;
13899  GetBasicParams(callParams);
13900 
13901  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13902  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13903  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13904  bufCreateInfo.flags,
13905  bufCreateInfo.size,
13906  bufCreateInfo.usage,
13907  bufCreateInfo.sharingMode,
13908  allocCreateInfo.flags,
13909  allocCreateInfo.usage,
13910  allocCreateInfo.requiredFlags,
13911  allocCreateInfo.preferredFlags,
13912  allocCreateInfo.memoryTypeBits,
13913  allocCreateInfo.pool,
13914  allocation,
13915  userDataStr.GetString());
13916  Flush();
13917 }
13918 
13919 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13920  const VkImageCreateInfo& imageCreateInfo,
13921  const VmaAllocationCreateInfo& allocCreateInfo,
13922  VmaAllocation allocation)
13923 {
13924  CallParams callParams;
13925  GetBasicParams(callParams);
13926 
13927  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13928  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13929  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13930  imageCreateInfo.flags,
13931  imageCreateInfo.imageType,
13932  imageCreateInfo.format,
13933  imageCreateInfo.extent.width,
13934  imageCreateInfo.extent.height,
13935  imageCreateInfo.extent.depth,
13936  imageCreateInfo.mipLevels,
13937  imageCreateInfo.arrayLayers,
13938  imageCreateInfo.samples,
13939  imageCreateInfo.tiling,
13940  imageCreateInfo.usage,
13941  imageCreateInfo.sharingMode,
13942  imageCreateInfo.initialLayout,
13943  allocCreateInfo.flags,
13944  allocCreateInfo.usage,
13945  allocCreateInfo.requiredFlags,
13946  allocCreateInfo.preferredFlags,
13947  allocCreateInfo.memoryTypeBits,
13948  allocCreateInfo.pool,
13949  allocation,
13950  userDataStr.GetString());
13951  Flush();
13952 }
13953 
13954 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13955  VmaAllocation allocation)
13956 {
13957  CallParams callParams;
13958  GetBasicParams(callParams);
13959 
13960  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13961  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13962  allocation);
13963  Flush();
13964 }
13965 
13966 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
13967  VmaAllocation allocation)
13968 {
13969  CallParams callParams;
13970  GetBasicParams(callParams);
13971 
13972  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13973  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
13974  allocation);
13975  Flush();
13976 }
13977 
13978 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
13979  VmaAllocation allocation)
13980 {
13981  CallParams callParams;
13982  GetBasicParams(callParams);
13983 
13984  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13985  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13986  allocation);
13987  Flush();
13988 }
13989 
13990 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
13991  VmaAllocation allocation)
13992 {
13993  CallParams callParams;
13994  GetBasicParams(callParams);
13995 
13996  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13997  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
13998  allocation);
13999  Flush();
14000 }
14001 
14002 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
14003  VmaPool pool)
14004 {
14005  CallParams callParams;
14006  GetBasicParams(callParams);
14007 
14008  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14009  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
14010  pool);
14011  Flush();
14012 }
14013 
14014 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
14015  const VmaDefragmentationInfo2& info,
14017 {
14018  CallParams callParams;
14019  GetBasicParams(callParams);
14020 
14021  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14022  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
14023  info.flags);
14024  PrintPointerList(info.allocationCount, info.pAllocations);
14025  fprintf(m_File, ",");
14026  PrintPointerList(info.poolCount, info.pPools);
14027  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
14028  info.maxCpuBytesToMove,
14030  info.maxGpuBytesToMove,
14032  info.commandBuffer,
14033  ctx);
14034  Flush();
14035 }
14036 
14037 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
14039 {
14040  CallParams callParams;
14041  GetBasicParams(callParams);
14042 
14043  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14044  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
14045  ctx);
14046  Flush();
14047 }
14048 
14049 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
14050 {
14051  if(pUserData != VMA_NULL)
14052  {
14053  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
14054  {
14055  m_Str = (const char*)pUserData;
14056  }
14057  else
14058  {
14059  sprintf_s(m_PtrStr, "%p", pUserData);
14060  m_Str = m_PtrStr;
14061  }
14062  }
14063  else
14064  {
14065  m_Str = "";
14066  }
14067 }
14068 
14069 void VmaRecorder::WriteConfiguration(
14070  const VkPhysicalDeviceProperties& devProps,
14071  const VkPhysicalDeviceMemoryProperties& memProps,
14072  bool dedicatedAllocationExtensionEnabled)
14073 {
14074  fprintf(m_File, "Config,Begin\n");
14075 
14076  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
14077  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
14078  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
14079  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
14080  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
14081  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
14082 
14083  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
14084  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
14085  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
14086 
14087  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
14088  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
14089  {
14090  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
14091  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
14092  }
14093  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
14094  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
14095  {
14096  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
14097  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
14098  }
14099 
14100  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
14101 
14102  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
14103  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
14104  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
14105  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
14106  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
14107  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
14108  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
14109  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
14110  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14111 
14112  fprintf(m_File, "Config,End\n");
14113 }
14114 
14115 void VmaRecorder::GetBasicParams(CallParams& outParams)
14116 {
14117  outParams.threadId = GetCurrentThreadId();
14118 
14119  LARGE_INTEGER counter;
14120  QueryPerformanceCounter(&counter);
14121  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
14122 }
14123 
14124 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
14125 {
14126  if(count)
14127  {
14128  fprintf(m_File, "%p", pItems[0]);
14129  for(uint64_t i = 1; i < count; ++i)
14130  {
14131  fprintf(m_File, " %p", pItems[i]);
14132  }
14133  }
14134 }
14135 
14136 void VmaRecorder::Flush()
14137 {
14138  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
14139  {
14140  fflush(m_File);
14141  }
14142 }
14143 
14144 #endif // #if VMA_RECORDING_ENABLED
14145 
14147 // VmaAllocationObjectAllocator
14148 
14149 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
14150  m_Allocator(pAllocationCallbacks, 1024)
14151 {
14152 }
14153 
14154 VmaAllocation VmaAllocationObjectAllocator::Allocate()
14155 {
14156  VmaMutexLock mutexLock(m_Mutex);
14157  return m_Allocator.Alloc();
14158 }
14159 
14160 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
14161 {
14162  VmaMutexLock mutexLock(m_Mutex);
14163  m_Allocator.Free(hAlloc);
14164 }
14165 
14167 // VmaAllocator_T
14168 
14169 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
14170  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
14171  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
14172  m_hDevice(pCreateInfo->device),
14173  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
14174  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
14175  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
14176  m_AllocationObjectAllocator(&m_AllocationCallbacks),
14177  m_PreferredLargeHeapBlockSize(0),
14178  m_PhysicalDevice(pCreateInfo->physicalDevice),
14179  m_CurrentFrameIndex(0),
14180  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
14181  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
14182  m_NextPoolId(0)
14184  ,m_pRecorder(VMA_NULL)
14185 #endif
14186 {
14187  if(VMA_DEBUG_DETECT_CORRUPTION)
14188  {
14189  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
14190  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
14191  }
14192 
14193  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
14194 
14195 #if !(VMA_DEDICATED_ALLOCATION)
14197  {
14198  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14199  }
14200 #endif
14201 
14202  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
14203  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14204  memset(&m_MemProps, 0, sizeof(m_MemProps));
14205 
14206  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14207  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
14208 
14209  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14210  {
14211  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
14212  }
14213 
14214  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14215  {
14216  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14217  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14218  }
14219 
14220  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14221 
14222  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14223  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14224 
14225  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14226  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14227  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14228  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14229 
14230  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14231  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14232 
14233  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14234  {
14235  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14236  {
14237  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14238  if(limit != VK_WHOLE_SIZE)
14239  {
14240  m_HeapSizeLimit[heapIndex] = limit;
14241  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14242  {
14243  m_MemProps.memoryHeaps[heapIndex].size = limit;
14244  }
14245  }
14246  }
14247  }
14248 
14249  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14250  {
14251  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14252 
14253  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14254  this,
14255  VK_NULL_HANDLE, // hParentPool
14256  memTypeIndex,
14257  preferredBlockSize,
14258  0,
14259  SIZE_MAX,
14260  GetBufferImageGranularity(),
14261  pCreateInfo->frameInUseCount,
14262  false, // isCustomPool
14263  false, // explicitBlockSize
14264  false); // linearAlgorithm
14265  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14266  // becase minBlockCount is 0.
14267  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14268 
14269  }
14270 }
14271 
14272 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14273 {
14274  VkResult res = VK_SUCCESS;
14275 
14276  if(pCreateInfo->pRecordSettings != VMA_NULL &&
14277  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14278  {
14279 #if VMA_RECORDING_ENABLED
14280  m_pRecorder = vma_new(this, VmaRecorder)();
14281  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14282  if(res != VK_SUCCESS)
14283  {
14284  return res;
14285  }
14286  m_pRecorder->WriteConfiguration(
14287  m_PhysicalDeviceProperties,
14288  m_MemProps,
14289  m_UseKhrDedicatedAllocation);
14290  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14291 #else
14292  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14293  return VK_ERROR_FEATURE_NOT_PRESENT;
14294 #endif
14295  }
14296 
14297  return res;
14298 }
14299 
14300 VmaAllocator_T::~VmaAllocator_T()
14301 {
14302 #if VMA_RECORDING_ENABLED
14303  if(m_pRecorder != VMA_NULL)
14304  {
14305  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14306  vma_delete(this, m_pRecorder);
14307  }
14308 #endif
14309 
14310  VMA_ASSERT(m_Pools.empty());
14311 
14312  for(size_t i = GetMemoryTypeCount(); i--; )
14313  {
14314  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
14315  {
14316  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
14317  }
14318 
14319  vma_delete(this, m_pDedicatedAllocations[i]);
14320  vma_delete(this, m_pBlockVectors[i]);
14321  }
14322 }
14323 
14324 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14325 {
14326 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14327  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
14328  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
14329  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
14330  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
14331  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
14332  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
14333  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
14334  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
14335  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
14336  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
14337  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
14338  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
14339  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
14340  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
14341  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
14342  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
14343  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
14344 #if VMA_DEDICATED_ALLOCATION
14345  if(m_UseKhrDedicatedAllocation)
14346  {
14347  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14348  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14349  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14350  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14351  }
14352 #endif // #if VMA_DEDICATED_ALLOCATION
14353 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14354 
14355 #define VMA_COPY_IF_NOT_NULL(funcName) \
14356  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14357 
14358  if(pVulkanFunctions != VMA_NULL)
14359  {
14360  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14361  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14362  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14363  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14364  VMA_COPY_IF_NOT_NULL(vkMapMemory);
14365  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14366  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14367  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14368  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14369  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14370  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14371  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14372  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14373  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14374  VMA_COPY_IF_NOT_NULL(vkCreateImage);
14375  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14376  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14377 #if VMA_DEDICATED_ALLOCATION
14378  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14379  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14380 #endif
14381  }
14382 
14383 #undef VMA_COPY_IF_NOT_NULL
14384 
14385  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14386  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14387  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14388  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14389  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14390  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14391  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14392  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14393  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14394  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14395  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14396  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14397  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14398  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14399  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14400  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14401  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14402  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14403  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14404 #if VMA_DEDICATED_ALLOCATION
14405  if(m_UseKhrDedicatedAllocation)
14406  {
14407  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14408  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14409  }
14410 #endif
14411 }
14412 
14413 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14414 {
14415  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14416  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14417  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14418  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
14419 }
14420 
14421 VkResult VmaAllocator_T::AllocateMemoryOfType(
14422  VkDeviceSize size,
14423  VkDeviceSize alignment,
14424  bool dedicatedAllocation,
14425  VkBuffer dedicatedBuffer,
14426  VkImage dedicatedImage,
14427  const VmaAllocationCreateInfo& createInfo,
14428  uint32_t memTypeIndex,
14429  VmaSuballocationType suballocType,
14430  size_t allocationCount,
14431  VmaAllocation* pAllocations)
14432 {
14433  VMA_ASSERT(pAllocations != VMA_NULL);
14434  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
14435 
14436  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14437 
14438  // If memory type is not HOST_VISIBLE, disable MAPPED.
14439  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14440  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14441  {
14442  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14443  }
14444 
14445  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14446  VMA_ASSERT(blockVector);
14447 
14448  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14449  bool preferDedicatedMemory =
14450  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14451  dedicatedAllocation ||
14452  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14453  size > preferredBlockSize / 2;
14454 
14455  if(preferDedicatedMemory &&
14456  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14457  finalCreateInfo.pool == VK_NULL_HANDLE)
14458  {
14460  }
14461 
14462  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14463  {
14464  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14465  {
14466  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14467  }
14468  else
14469  {
14470  return AllocateDedicatedMemory(
14471  size,
14472  suballocType,
14473  memTypeIndex,
14474  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14475  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14476  finalCreateInfo.pUserData,
14477  dedicatedBuffer,
14478  dedicatedImage,
14479  allocationCount,
14480  pAllocations);
14481  }
14482  }
14483  else
14484  {
14485  VkResult res = blockVector->Allocate(
14486  m_CurrentFrameIndex.load(),
14487  size,
14488  alignment,
14489  finalCreateInfo,
14490  suballocType,
14491  allocationCount,
14492  pAllocations);
14493  if(res == VK_SUCCESS)
14494  {
14495  return res;
14496  }
14497 
14498  // 5. Try dedicated memory.
14499  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14500  {
14501  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14502  }
14503  else
14504  {
14505  res = AllocateDedicatedMemory(
14506  size,
14507  suballocType,
14508  memTypeIndex,
14509  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14510  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14511  finalCreateInfo.pUserData,
14512  dedicatedBuffer,
14513  dedicatedImage,
14514  allocationCount,
14515  pAllocations);
14516  if(res == VK_SUCCESS)
14517  {
14518  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14519  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14520  return VK_SUCCESS;
14521  }
14522  else
14523  {
14524  // Everything failed: Return error code.
14525  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14526  return res;
14527  }
14528  }
14529  }
14530 }
14531 
14532 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14533  VkDeviceSize size,
14534  VmaSuballocationType suballocType,
14535  uint32_t memTypeIndex,
14536  bool map,
14537  bool isUserDataString,
14538  void* pUserData,
14539  VkBuffer dedicatedBuffer,
14540  VkImage dedicatedImage,
14541  size_t allocationCount,
14542  VmaAllocation* pAllocations)
14543 {
14544  VMA_ASSERT(allocationCount > 0 && pAllocations);
14545 
14546  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14547  allocInfo.memoryTypeIndex = memTypeIndex;
14548  allocInfo.allocationSize = size;
14549 
14550 #if VMA_DEDICATED_ALLOCATION
14551  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14552  if(m_UseKhrDedicatedAllocation)
14553  {
14554  if(dedicatedBuffer != VK_NULL_HANDLE)
14555  {
14556  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14557  dedicatedAllocInfo.buffer = dedicatedBuffer;
14558  allocInfo.pNext = &dedicatedAllocInfo;
14559  }
14560  else if(dedicatedImage != VK_NULL_HANDLE)
14561  {
14562  dedicatedAllocInfo.image = dedicatedImage;
14563  allocInfo.pNext = &dedicatedAllocInfo;
14564  }
14565  }
14566 #endif // #if VMA_DEDICATED_ALLOCATION
14567 
14568  size_t allocIndex;
14569  VkResult res = VK_SUCCESS;
14570  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14571  {
14572  res = AllocateDedicatedMemoryPage(
14573  size,
14574  suballocType,
14575  memTypeIndex,
14576  allocInfo,
14577  map,
14578  isUserDataString,
14579  pUserData,
14580  pAllocations + allocIndex);
14581  if(res != VK_SUCCESS)
14582  {
14583  break;
14584  }
14585  }
14586 
14587  if(res == VK_SUCCESS)
14588  {
14589  // Register them in m_pDedicatedAllocations.
14590  {
14591  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14592  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14593  VMA_ASSERT(pDedicatedAllocations);
14594  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14595  {
14596  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
14597  }
14598  }
14599 
14600  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14601  }
14602  else
14603  {
14604  // Free all already created allocations.
14605  while(allocIndex--)
14606  {
14607  VmaAllocation currAlloc = pAllocations[allocIndex];
14608  VkDeviceMemory hMemory = currAlloc->GetMemory();
14609 
14610  /*
14611  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14612  before vkFreeMemory.
14613 
14614  if(currAlloc->GetMappedData() != VMA_NULL)
14615  {
14616  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14617  }
14618  */
14619 
14620  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14621 
14622  currAlloc->SetUserData(this, VMA_NULL);
14623  currAlloc->Dtor();
14624  m_AllocationObjectAllocator.Free(currAlloc);
14625  }
14626 
14627  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14628  }
14629 
14630  return res;
14631 }
14632 
14633 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14634  VkDeviceSize size,
14635  VmaSuballocationType suballocType,
14636  uint32_t memTypeIndex,
14637  const VkMemoryAllocateInfo& allocInfo,
14638  bool map,
14639  bool isUserDataString,
14640  void* pUserData,
14641  VmaAllocation* pAllocation)
14642 {
14643  VkDeviceMemory hMemory = VK_NULL_HANDLE;
14644  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14645  if(res < 0)
14646  {
14647  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14648  return res;
14649  }
14650 
14651  void* pMappedData = VMA_NULL;
14652  if(map)
14653  {
14654  res = (*m_VulkanFunctions.vkMapMemory)(
14655  m_hDevice,
14656  hMemory,
14657  0,
14658  VK_WHOLE_SIZE,
14659  0,
14660  &pMappedData);
14661  if(res < 0)
14662  {
14663  VMA_DEBUG_LOG(" vkMapMemory FAILED");
14664  FreeVulkanMemory(memTypeIndex, size, hMemory);
14665  return res;
14666  }
14667  }
14668 
14669  *pAllocation = m_AllocationObjectAllocator.Allocate();
14670  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
14671  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14672  (*pAllocation)->SetUserData(this, pUserData);
14673  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14674  {
14675  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14676  }
14677 
14678  return VK_SUCCESS;
14679 }
14680 
14681 void VmaAllocator_T::GetBufferMemoryRequirements(
14682  VkBuffer hBuffer,
14683  VkMemoryRequirements& memReq,
14684  bool& requiresDedicatedAllocation,
14685  bool& prefersDedicatedAllocation) const
14686 {
14687 #if VMA_DEDICATED_ALLOCATION
14688  if(m_UseKhrDedicatedAllocation)
14689  {
14690  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14691  memReqInfo.buffer = hBuffer;
14692 
14693  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14694 
14695  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14696  memReq2.pNext = &memDedicatedReq;
14697 
14698  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14699 
14700  memReq = memReq2.memoryRequirements;
14701  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14702  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14703  }
14704  else
14705 #endif // #if VMA_DEDICATED_ALLOCATION
14706  {
14707  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14708  requiresDedicatedAllocation = false;
14709  prefersDedicatedAllocation = false;
14710  }
14711 }
14712 
14713 void VmaAllocator_T::GetImageMemoryRequirements(
14714  VkImage hImage,
14715  VkMemoryRequirements& memReq,
14716  bool& requiresDedicatedAllocation,
14717  bool& prefersDedicatedAllocation) const
14718 {
14719 #if VMA_DEDICATED_ALLOCATION
14720  if(m_UseKhrDedicatedAllocation)
14721  {
14722  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14723  memReqInfo.image = hImage;
14724 
14725  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14726 
14727  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14728  memReq2.pNext = &memDedicatedReq;
14729 
14730  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14731 
14732  memReq = memReq2.memoryRequirements;
14733  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14734  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14735  }
14736  else
14737 #endif // #if VMA_DEDICATED_ALLOCATION
14738  {
14739  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14740  requiresDedicatedAllocation = false;
14741  prefersDedicatedAllocation = false;
14742  }
14743 }
14744 
14745 VkResult VmaAllocator_T::AllocateMemory(
14746  const VkMemoryRequirements& vkMemReq,
14747  bool requiresDedicatedAllocation,
14748  bool prefersDedicatedAllocation,
14749  VkBuffer dedicatedBuffer,
14750  VkImage dedicatedImage,
14751  const VmaAllocationCreateInfo& createInfo,
14752  VmaSuballocationType suballocType,
14753  size_t allocationCount,
14754  VmaAllocation* pAllocations)
14755 {
14756  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14757 
14758  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14759 
14760  if(vkMemReq.size == 0)
14761  {
14762  return VK_ERROR_VALIDATION_FAILED_EXT;
14763  }
14764  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14765  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14766  {
14767  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14768  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14769  }
14770  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14772  {
14773  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14774  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14775  }
14776  if(requiresDedicatedAllocation)
14777  {
14778  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14779  {
14780  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14781  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14782  }
14783  if(createInfo.pool != VK_NULL_HANDLE)
14784  {
14785  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
14786  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14787  }
14788  }
14789  if((createInfo.pool != VK_NULL_HANDLE) &&
14790  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
14791  {
14792  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14793  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14794  }
14795 
14796  if(createInfo.pool != VK_NULL_HANDLE)
14797  {
14798  const VkDeviceSize alignmentForPool = VMA_MAX(
14799  vkMemReq.alignment,
14800  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
14801  return createInfo.pool->m_BlockVector.Allocate(
14802  m_CurrentFrameIndex.load(),
14803  vkMemReq.size,
14804  alignmentForPool,
14805  createInfo,
14806  suballocType,
14807  allocationCount,
14808  pAllocations);
14809  }
14810  else
14811  {
14812  // Bit mask of memory Vulkan types acceptable for this allocation.
14813  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14814  uint32_t memTypeIndex = UINT32_MAX;
14815  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14816  if(res == VK_SUCCESS)
14817  {
14818  VkDeviceSize alignmentForMemType = VMA_MAX(
14819  vkMemReq.alignment,
14820  GetMemoryTypeMinAlignment(memTypeIndex));
14821 
14822  res = AllocateMemoryOfType(
14823  vkMemReq.size,
14824  alignmentForMemType,
14825  requiresDedicatedAllocation || prefersDedicatedAllocation,
14826  dedicatedBuffer,
14827  dedicatedImage,
14828  createInfo,
14829  memTypeIndex,
14830  suballocType,
14831  allocationCount,
14832  pAllocations);
14833  // Succeeded on first try.
14834  if(res == VK_SUCCESS)
14835  {
14836  return res;
14837  }
14838  // Allocation from this memory type failed. Try other compatible memory types.
14839  else
14840  {
14841  for(;;)
14842  {
14843  // Remove old memTypeIndex from list of possibilities.
14844  memoryTypeBits &= ~(1u << memTypeIndex);
14845  // Find alternative memTypeIndex.
14846  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14847  if(res == VK_SUCCESS)
14848  {
14849  alignmentForMemType = VMA_MAX(
14850  vkMemReq.alignment,
14851  GetMemoryTypeMinAlignment(memTypeIndex));
14852 
14853  res = AllocateMemoryOfType(
14854  vkMemReq.size,
14855  alignmentForMemType,
14856  requiresDedicatedAllocation || prefersDedicatedAllocation,
14857  dedicatedBuffer,
14858  dedicatedImage,
14859  createInfo,
14860  memTypeIndex,
14861  suballocType,
14862  allocationCount,
14863  pAllocations);
14864  // Allocation from this alternative memory type succeeded.
14865  if(res == VK_SUCCESS)
14866  {
14867  return res;
14868  }
14869  // else: Allocation from this memory type failed. Try next one - next loop iteration.
14870  }
14871  // No other matching memory type index could be found.
14872  else
14873  {
14874  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
14875  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14876  }
14877  }
14878  }
14879  }
14880  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
14881  else
14882  return res;
14883  }
14884 }
14885 
14886 void VmaAllocator_T::FreeMemory(
14887  size_t allocationCount,
14888  const VmaAllocation* pAllocations)
14889 {
14890  VMA_ASSERT(pAllocations);
14891 
14892  for(size_t allocIndex = allocationCount; allocIndex--; )
14893  {
14894  VmaAllocation allocation = pAllocations[allocIndex];
14895 
14896  if(allocation != VK_NULL_HANDLE)
14897  {
14898  if(TouchAllocation(allocation))
14899  {
14900  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14901  {
14902  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14903  }
14904 
14905  switch(allocation->GetType())
14906  {
14907  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14908  {
14909  VmaBlockVector* pBlockVector = VMA_NULL;
14910  VmaPool hPool = allocation->GetBlock()->GetParentPool();
14911  if(hPool != VK_NULL_HANDLE)
14912  {
14913  pBlockVector = &hPool->m_BlockVector;
14914  }
14915  else
14916  {
14917  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14918  pBlockVector = m_pBlockVectors[memTypeIndex];
14919  }
14920  pBlockVector->Free(allocation);
14921  }
14922  break;
14923  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14924  FreeDedicatedMemory(allocation);
14925  break;
14926  default:
14927  VMA_ASSERT(0);
14928  }
14929  }
14930 
14931  allocation->SetUserData(this, VMA_NULL);
14932  allocation->Dtor();
14933  m_AllocationObjectAllocator.Free(allocation);
14934  }
14935  }
14936 }
14937 
14938 VkResult VmaAllocator_T::ResizeAllocation(
14939  const VmaAllocation alloc,
14940  VkDeviceSize newSize)
14941 {
14942  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14943  {
14944  return VK_ERROR_VALIDATION_FAILED_EXT;
14945  }
14946  if(newSize == alloc->GetSize())
14947  {
14948  return VK_SUCCESS;
14949  }
14950 
14951  switch(alloc->GetType())
14952  {
14953  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14954  return VK_ERROR_FEATURE_NOT_PRESENT;
14955  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14956  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
14957  {
14958  alloc->ChangeSize(newSize);
14959  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
14960  return VK_SUCCESS;
14961  }
14962  else
14963  {
14964  return VK_ERROR_OUT_OF_POOL_MEMORY;
14965  }
14966  default:
14967  VMA_ASSERT(0);
14968  return VK_ERROR_VALIDATION_FAILED_EXT;
14969  }
14970 }
14971 
14972 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
14973 {
14974  // Initialize.
14975  InitStatInfo(pStats->total);
14976  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
14977  InitStatInfo(pStats->memoryType[i]);
14978  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14979  InitStatInfo(pStats->memoryHeap[i]);
14980 
14981  // Process default pools.
14982  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14983  {
14984  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14985  VMA_ASSERT(pBlockVector);
14986  pBlockVector->AddStats(pStats);
14987  }
14988 
14989  // Process custom pools.
14990  {
14991  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14992  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14993  {
14994  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
14995  }
14996  }
14997 
14998  // Process dedicated allocations.
14999  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15000  {
15001  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15002  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15003  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15004  VMA_ASSERT(pDedicatedAllocVector);
15005  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
15006  {
15007  VmaStatInfo allocationStatInfo;
15008  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
15009  VmaAddStatInfo(pStats->total, allocationStatInfo);
15010  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
15011  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
15012  }
15013  }
15014 
15015  // Postprocess.
15016  VmaPostprocessCalcStatInfo(pStats->total);
15017  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
15018  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
15019  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
15020  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
15021 }
15022 
15023 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
15024 
15025 VkResult VmaAllocator_T::DefragmentationBegin(
15026  const VmaDefragmentationInfo2& info,
15027  VmaDefragmentationStats* pStats,
15028  VmaDefragmentationContext* pContext)
15029 {
15030  if(info.pAllocationsChanged != VMA_NULL)
15031  {
15032  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
15033  }
15034 
15035  *pContext = vma_new(this, VmaDefragmentationContext_T)(
15036  this, m_CurrentFrameIndex.load(), info.flags, pStats);
15037 
15038  (*pContext)->AddPools(info.poolCount, info.pPools);
15039  (*pContext)->AddAllocations(
15041 
15042  VkResult res = (*pContext)->Defragment(
15045  info.commandBuffer, pStats);
15046 
15047  if(res != VK_NOT_READY)
15048  {
15049  vma_delete(this, *pContext);
15050  *pContext = VMA_NULL;
15051  }
15052 
15053  return res;
15054 }
15055 
15056 VkResult VmaAllocator_T::DefragmentationEnd(
15057  VmaDefragmentationContext context)
15058 {
15059  vma_delete(this, context);
15060  return VK_SUCCESS;
15061 }
15062 
15063 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
15064 {
15065  if(hAllocation->CanBecomeLost())
15066  {
15067  /*
15068  Warning: This is a carefully designed algorithm.
15069  Do not modify unless you really know what you're doing :)
15070  */
15071  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15072  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15073  for(;;)
15074  {
15075  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15076  {
15077  pAllocationInfo->memoryType = UINT32_MAX;
15078  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
15079  pAllocationInfo->offset = 0;
15080  pAllocationInfo->size = hAllocation->GetSize();
15081  pAllocationInfo->pMappedData = VMA_NULL;
15082  pAllocationInfo->pUserData = hAllocation->GetUserData();
15083  return;
15084  }
15085  else if(localLastUseFrameIndex == localCurrFrameIndex)
15086  {
15087  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15088  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15089  pAllocationInfo->offset = hAllocation->GetOffset();
15090  pAllocationInfo->size = hAllocation->GetSize();
15091  pAllocationInfo->pMappedData = VMA_NULL;
15092  pAllocationInfo->pUserData = hAllocation->GetUserData();
15093  return;
15094  }
15095  else // Last use time earlier than current time.
15096  {
15097  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15098  {
15099  localLastUseFrameIndex = localCurrFrameIndex;
15100  }
15101  }
15102  }
15103  }
15104  else
15105  {
15106 #if VMA_STATS_STRING_ENABLED
15107  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15108  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15109  for(;;)
15110  {
15111  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15112  if(localLastUseFrameIndex == localCurrFrameIndex)
15113  {
15114  break;
15115  }
15116  else // Last use time earlier than current time.
15117  {
15118  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15119  {
15120  localLastUseFrameIndex = localCurrFrameIndex;
15121  }
15122  }
15123  }
15124 #endif
15125 
15126  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15127  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15128  pAllocationInfo->offset = hAllocation->GetOffset();
15129  pAllocationInfo->size = hAllocation->GetSize();
15130  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
15131  pAllocationInfo->pUserData = hAllocation->GetUserData();
15132  }
15133 }
15134 
15135 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
15136 {
15137  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
15138  if(hAllocation->CanBecomeLost())
15139  {
15140  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15141  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15142  for(;;)
15143  {
15144  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15145  {
15146  return false;
15147  }
15148  else if(localLastUseFrameIndex == localCurrFrameIndex)
15149  {
15150  return true;
15151  }
15152  else // Last use time earlier than current time.
15153  {
15154  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15155  {
15156  localLastUseFrameIndex = localCurrFrameIndex;
15157  }
15158  }
15159  }
15160  }
15161  else
15162  {
15163 #if VMA_STATS_STRING_ENABLED
15164  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15165  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15166  for(;;)
15167  {
15168  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15169  if(localLastUseFrameIndex == localCurrFrameIndex)
15170  {
15171  break;
15172  }
15173  else // Last use time earlier than current time.
15174  {
15175  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15176  {
15177  localLastUseFrameIndex = localCurrFrameIndex;
15178  }
15179  }
15180  }
15181 #endif
15182 
15183  return true;
15184  }
15185 }
15186 
15187 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
15188 {
15189  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
15190 
15191  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
15192 
15193  if(newCreateInfo.maxBlockCount == 0)
15194  {
15195  newCreateInfo.maxBlockCount = SIZE_MAX;
15196  }
15197  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
15198  {
15199  return VK_ERROR_INITIALIZATION_FAILED;
15200  }
15201 
15202  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
15203 
15204  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
15205 
15206  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15207  if(res != VK_SUCCESS)
15208  {
15209  vma_delete(this, *pPool);
15210  *pPool = VMA_NULL;
15211  return res;
15212  }
15213 
15214  // Add to m_Pools.
15215  {
15216  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15217  (*pPool)->SetId(m_NextPoolId++);
15218  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
15219  }
15220 
15221  return VK_SUCCESS;
15222 }
15223 
15224 void VmaAllocator_T::DestroyPool(VmaPool pool)
15225 {
15226  // Remove from m_Pools.
15227  {
15228  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15229  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15230  VMA_ASSERT(success && "Pool not found in Allocator.");
15231  }
15232 
15233  vma_delete(this, pool);
15234 }
15235 
15236 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15237 {
15238  pool->m_BlockVector.GetPoolStats(pPoolStats);
15239 }
15240 
15241 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15242 {
15243  m_CurrentFrameIndex.store(frameIndex);
15244 }
15245 
15246 void VmaAllocator_T::MakePoolAllocationsLost(
15247  VmaPool hPool,
15248  size_t* pLostAllocationCount)
15249 {
15250  hPool->m_BlockVector.MakePoolAllocationsLost(
15251  m_CurrentFrameIndex.load(),
15252  pLostAllocationCount);
15253 }
15254 
15255 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15256 {
15257  return hPool->m_BlockVector.CheckCorruption();
15258 }
15259 
15260 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15261 {
15262  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15263 
15264  // Process default pools.
15265  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15266  {
15267  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15268  {
15269  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15270  VMA_ASSERT(pBlockVector);
15271  VkResult localRes = pBlockVector->CheckCorruption();
15272  switch(localRes)
15273  {
15274  case VK_ERROR_FEATURE_NOT_PRESENT:
15275  break;
15276  case VK_SUCCESS:
15277  finalRes = VK_SUCCESS;
15278  break;
15279  default:
15280  return localRes;
15281  }
15282  }
15283  }
15284 
15285  // Process custom pools.
15286  {
15287  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15288  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15289  {
15290  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15291  {
15292  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15293  switch(localRes)
15294  {
15295  case VK_ERROR_FEATURE_NOT_PRESENT:
15296  break;
15297  case VK_SUCCESS:
15298  finalRes = VK_SUCCESS;
15299  break;
15300  default:
15301  return localRes;
15302  }
15303  }
15304  }
15305  }
15306 
15307  return finalRes;
15308 }
15309 
15310 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15311 {
15312  *pAllocation = m_AllocationObjectAllocator.Allocate();
15313  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
15314  (*pAllocation)->InitLost();
15315 }
15316 
15317 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15318 {
15319  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15320 
15321  VkResult res;
15322  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15323  {
15324  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15325  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
15326  {
15327  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15328  if(res == VK_SUCCESS)
15329  {
15330  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
15331  }
15332  }
15333  else
15334  {
15335  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
15336  }
15337  }
15338  else
15339  {
15340  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15341  }
15342 
15343  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15344  {
15345  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15346  }
15347 
15348  return res;
15349 }
15350 
15351 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15352 {
15353  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15354  {
15355  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15356  }
15357 
15358  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15359 
15360  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15361  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15362  {
15363  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15364  m_HeapSizeLimit[heapIndex] += size;
15365  }
15366 }
15367 
15368 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15369 {
15370  if(hAllocation->CanBecomeLost())
15371  {
15372  return VK_ERROR_MEMORY_MAP_FAILED;
15373  }
15374 
15375  switch(hAllocation->GetType())
15376  {
15377  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15378  {
15379  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15380  char *pBytes = VMA_NULL;
15381  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15382  if(res == VK_SUCCESS)
15383  {
15384  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15385  hAllocation->BlockAllocMap();
15386  }
15387  return res;
15388  }
15389  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15390  return hAllocation->DedicatedAllocMap(this, ppData);
15391  default:
15392  VMA_ASSERT(0);
15393  return VK_ERROR_MEMORY_MAP_FAILED;
15394  }
15395 }
15396 
15397 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15398 {
15399  switch(hAllocation->GetType())
15400  {
15401  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15402  {
15403  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15404  hAllocation->BlockAllocUnmap();
15405  pBlock->Unmap(this, 1);
15406  }
15407  break;
15408  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15409  hAllocation->DedicatedAllocUnmap(this);
15410  break;
15411  default:
15412  VMA_ASSERT(0);
15413  }
15414 }
15415 
15416 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
15417 {
15418  VkResult res = VK_SUCCESS;
15419  switch(hAllocation->GetType())
15420  {
15421  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15422  res = GetVulkanFunctions().vkBindBufferMemory(
15423  m_hDevice,
15424  hBuffer,
15425  hAllocation->GetMemory(),
15426  0); //memoryOffset
15427  break;
15428  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15429  {
15430  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15431  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
15432  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
15433  break;
15434  }
15435  default:
15436  VMA_ASSERT(0);
15437  }
15438  return res;
15439 }
15440 
15441 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
15442 {
15443  VkResult res = VK_SUCCESS;
15444  switch(hAllocation->GetType())
15445  {
15446  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15447  res = GetVulkanFunctions().vkBindImageMemory(
15448  m_hDevice,
15449  hImage,
15450  hAllocation->GetMemory(),
15451  0); //memoryOffset
15452  break;
15453  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15454  {
15455  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15456  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
15457  res = pBlock->BindImageMemory(this, hAllocation, hImage);
15458  break;
15459  }
15460  default:
15461  VMA_ASSERT(0);
15462  }
15463  return res;
15464 }
15465 
15466 void VmaAllocator_T::FlushOrInvalidateAllocation(
15467  VmaAllocation hAllocation,
15468  VkDeviceSize offset, VkDeviceSize size,
15469  VMA_CACHE_OPERATION op)
15470 {
15471  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
15472  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15473  {
15474  const VkDeviceSize allocationSize = hAllocation->GetSize();
15475  VMA_ASSERT(offset <= allocationSize);
15476 
15477  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15478 
15479  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
15480  memRange.memory = hAllocation->GetMemory();
15481 
15482  switch(hAllocation->GetType())
15483  {
15484  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15485  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15486  if(size == VK_WHOLE_SIZE)
15487  {
15488  memRange.size = allocationSize - memRange.offset;
15489  }
15490  else
15491  {
15492  VMA_ASSERT(offset + size <= allocationSize);
15493  memRange.size = VMA_MIN(
15494  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
15495  allocationSize - memRange.offset);
15496  }
15497  break;
15498 
15499  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15500  {
15501  // 1. Still within this allocation.
15502  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15503  if(size == VK_WHOLE_SIZE)
15504  {
15505  size = allocationSize - offset;
15506  }
15507  else
15508  {
15509  VMA_ASSERT(offset + size <= allocationSize);
15510  }
15511  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
15512 
15513  // 2. Adjust to whole block.
15514  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
15515  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15516  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
15517  memRange.offset += allocationOffset;
15518  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
15519 
15520  break;
15521  }
15522 
15523  default:
15524  VMA_ASSERT(0);
15525  }
15526 
15527  switch(op)
15528  {
15529  case VMA_CACHE_FLUSH:
15530  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15531  break;
15532  case VMA_CACHE_INVALIDATE:
15533  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15534  break;
15535  default:
15536  VMA_ASSERT(0);
15537  }
15538  }
15539  // else: Just ignore this call.
15540 }
15541 
15542 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
15543 {
15544  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15545 
15546  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15547  {
15548  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15549  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15550  VMA_ASSERT(pDedicatedAllocations);
15551  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
15552  VMA_ASSERT(success);
15553  }
15554 
15555  VkDeviceMemory hMemory = allocation->GetMemory();
15556 
15557  /*
15558  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15559  before vkFreeMemory.
15560 
15561  if(allocation->GetMappedData() != VMA_NULL)
15562  {
15563  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15564  }
15565  */
15566 
15567  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15568 
15569  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15570 }
15571 
15572 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
15573 {
15574  VkBufferCreateInfo dummyBufCreateInfo;
15575  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
15576 
15577  uint32_t memoryTypeBits = 0;
15578 
15579  // Create buffer.
15580  VkBuffer buf = VMA_NULL;
15581  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
15582  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
15583  if(res == VK_SUCCESS)
15584  {
15585  // Query for supported memory types.
15586  VkMemoryRequirements memReq;
15587  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
15588  memoryTypeBits = memReq.memoryTypeBits;
15589 
15590  // Destroy buffer.
15591  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
15592  }
15593 
15594  return memoryTypeBits;
15595 }
15596 
15597 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15598 {
15599  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15600  !hAllocation->CanBecomeLost() &&
15601  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15602  {
15603  void* pData = VMA_NULL;
15604  VkResult res = Map(hAllocation, &pData);
15605  if(res == VK_SUCCESS)
15606  {
15607  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15608  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15609  Unmap(hAllocation);
15610  }
15611  else
15612  {
15613  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15614  }
15615  }
15616 }
15617 
15618 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
15619 {
15620  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
15621  if(memoryTypeBits == UINT32_MAX)
15622  {
15623  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
15624  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
15625  }
15626  return memoryTypeBits;
15627 }
15628 
15629 #if VMA_STATS_STRING_ENABLED
15630 
15631 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15632 {
15633  bool dedicatedAllocationsStarted = false;
15634  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15635  {
15636  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15637  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15638  VMA_ASSERT(pDedicatedAllocVector);
15639  if(pDedicatedAllocVector->empty() == false)
15640  {
15641  if(dedicatedAllocationsStarted == false)
15642  {
15643  dedicatedAllocationsStarted = true;
15644  json.WriteString("DedicatedAllocations");
15645  json.BeginObject();
15646  }
15647 
15648  json.BeginString("Type ");
15649  json.ContinueString(memTypeIndex);
15650  json.EndString();
15651 
15652  json.BeginArray();
15653 
15654  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15655  {
15656  json.BeginObject(true);
15657  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
15658  hAlloc->PrintParameters(json);
15659  json.EndObject();
15660  }
15661 
15662  json.EndArray();
15663  }
15664  }
15665  if(dedicatedAllocationsStarted)
15666  {
15667  json.EndObject();
15668  }
15669 
15670  {
15671  bool allocationsStarted = false;
15672  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15673  {
15674  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
15675  {
15676  if(allocationsStarted == false)
15677  {
15678  allocationsStarted = true;
15679  json.WriteString("DefaultPools");
15680  json.BeginObject();
15681  }
15682 
15683  json.BeginString("Type ");
15684  json.ContinueString(memTypeIndex);
15685  json.EndString();
15686 
15687  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15688  }
15689  }
15690  if(allocationsStarted)
15691  {
15692  json.EndObject();
15693  }
15694  }
15695 
15696  // Custom pools
15697  {
15698  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15699  const size_t poolCount = m_Pools.size();
15700  if(poolCount > 0)
15701  {
15702  json.WriteString("Pools");
15703  json.BeginObject();
15704  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15705  {
15706  json.BeginString();
15707  json.ContinueString(m_Pools[poolIndex]->GetId());
15708  json.EndString();
15709 
15710  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15711  }
15712  json.EndObject();
15713  }
15714  }
15715 }
15716 
15717 #endif // #if VMA_STATS_STRING_ENABLED
15718 
15720 // Public interface
15721 
15722 VkResult vmaCreateAllocator(
15723  const VmaAllocatorCreateInfo* pCreateInfo,
15724  VmaAllocator* pAllocator)
15725 {
15726  VMA_ASSERT(pCreateInfo && pAllocator);
15727  VMA_DEBUG_LOG("vmaCreateAllocator");
15728  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
15729  return (*pAllocator)->Init(pCreateInfo);
15730 }
15731 
15732 void vmaDestroyAllocator(
15733  VmaAllocator allocator)
15734 {
15735  if(allocator != VK_NULL_HANDLE)
15736  {
15737  VMA_DEBUG_LOG("vmaDestroyAllocator");
15738  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15739  vma_delete(&allocationCallbacks, allocator);
15740  }
15741 }
15742 
15744  VmaAllocator allocator,
15745  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15746 {
15747  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15748  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15749 }
15750 
15752  VmaAllocator allocator,
15753  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15754 {
15755  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15756  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15757 }
15758 
15760  VmaAllocator allocator,
15761  uint32_t memoryTypeIndex,
15762  VkMemoryPropertyFlags* pFlags)
15763 {
15764  VMA_ASSERT(allocator && pFlags);
15765  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15766  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15767 }
15768 
15770  VmaAllocator allocator,
15771  uint32_t frameIndex)
15772 {
15773  VMA_ASSERT(allocator);
15774  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15775 
15776  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15777 
15778  allocator->SetCurrentFrameIndex(frameIndex);
15779 }
15780 
15781 void vmaCalculateStats(
15782  VmaAllocator allocator,
15783  VmaStats* pStats)
15784 {
15785  VMA_ASSERT(allocator && pStats);
15786  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15787  allocator->CalculateStats(pStats);
15788 }
15789 
15790 #if VMA_STATS_STRING_ENABLED
15791 
15792 void vmaBuildStatsString(
15793  VmaAllocator allocator,
15794  char** ppStatsString,
15795  VkBool32 detailedMap)
15796 {
15797  VMA_ASSERT(allocator && ppStatsString);
15798  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15799 
15800  VmaStringBuilder sb(allocator);
15801  {
15802  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15803  json.BeginObject();
15804 
15805  VmaStats stats;
15806  allocator->CalculateStats(&stats);
15807 
15808  json.WriteString("Total");
15809  VmaPrintStatInfo(json, stats.total);
15810 
15811  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15812  {
15813  json.BeginString("Heap ");
15814  json.ContinueString(heapIndex);
15815  json.EndString();
15816  json.BeginObject();
15817 
15818  json.WriteString("Size");
15819  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15820 
15821  json.WriteString("Flags");
15822  json.BeginArray(true);
15823  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15824  {
15825  json.WriteString("DEVICE_LOCAL");
15826  }
15827  json.EndArray();
15828 
15829  if(stats.memoryHeap[heapIndex].blockCount > 0)
15830  {
15831  json.WriteString("Stats");
15832  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
15833  }
15834 
15835  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15836  {
15837  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15838  {
15839  json.BeginString("Type ");
15840  json.ContinueString(typeIndex);
15841  json.EndString();
15842 
15843  json.BeginObject();
15844 
15845  json.WriteString("Flags");
15846  json.BeginArray(true);
15847  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15848  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15849  {
15850  json.WriteString("DEVICE_LOCAL");
15851  }
15852  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15853  {
15854  json.WriteString("HOST_VISIBLE");
15855  }
15856  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15857  {
15858  json.WriteString("HOST_COHERENT");
15859  }
15860  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15861  {
15862  json.WriteString("HOST_CACHED");
15863  }
15864  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15865  {
15866  json.WriteString("LAZILY_ALLOCATED");
15867  }
15868  json.EndArray();
15869 
15870  if(stats.memoryType[typeIndex].blockCount > 0)
15871  {
15872  json.WriteString("Stats");
15873  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
15874  }
15875 
15876  json.EndObject();
15877  }
15878  }
15879 
15880  json.EndObject();
15881  }
15882  if(detailedMap == VK_TRUE)
15883  {
15884  allocator->PrintDetailedMap(json);
15885  }
15886 
15887  json.EndObject();
15888  }
15889 
15890  const size_t len = sb.GetLength();
15891  char* const pChars = vma_new_array(allocator, char, len + 1);
15892  if(len > 0)
15893  {
15894  memcpy(pChars, sb.GetData(), len);
15895  }
15896  pChars[len] = '\0';
15897  *ppStatsString = pChars;
15898 }
15899 
15900 void vmaFreeStatsString(
15901  VmaAllocator allocator,
15902  char* pStatsString)
15903 {
15904  if(pStatsString != VMA_NULL)
15905  {
15906  VMA_ASSERT(allocator);
15907  size_t len = strlen(pStatsString);
15908  vma_delete_array(allocator, pStatsString, len + 1);
15909  }
15910 }
15911 
15912 #endif // #if VMA_STATS_STRING_ENABLED
15913 
15914 /*
15915 This function is not protected by any mutex because it just reads immutable data.
15916 */
15917 VkResult vmaFindMemoryTypeIndex(
15918  VmaAllocator allocator,
15919  uint32_t memoryTypeBits,
15920  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15921  uint32_t* pMemoryTypeIndex)
15922 {
15923  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15924  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15925  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15926 
15927  if(pAllocationCreateInfo->memoryTypeBits != 0)
15928  {
15929  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
15930  }
15931 
15932  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
15933  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
15934 
15935  // Convert usage to requiredFlags and preferredFlags.
15936  switch(pAllocationCreateInfo->usage)
15937  {
15939  break;
15941  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15942  {
15943  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15944  }
15945  break;
15947  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15948  break;
15950  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15951  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15952  {
15953  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15954  }
15955  break;
15957  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15958  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
15959  break;
15960  default:
15961  break;
15962  }
15963 
15964  *pMemoryTypeIndex = UINT32_MAX;
15965  uint32_t minCost = UINT32_MAX;
15966  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
15967  memTypeIndex < allocator->GetMemoryTypeCount();
15968  ++memTypeIndex, memTypeBit <<= 1)
15969  {
15970  // This memory type is acceptable according to memoryTypeBits bitmask.
15971  if((memTypeBit & memoryTypeBits) != 0)
15972  {
15973  const VkMemoryPropertyFlags currFlags =
15974  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
15975  // This memory type contains requiredFlags.
15976  if((requiredFlags & ~currFlags) == 0)
15977  {
15978  // Calculate cost as number of bits from preferredFlags not present in this memory type.
15979  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
15980  // Remember memory type with lowest cost.
15981  if(currCost < minCost)
15982  {
15983  *pMemoryTypeIndex = memTypeIndex;
15984  if(currCost == 0)
15985  {
15986  return VK_SUCCESS;
15987  }
15988  minCost = currCost;
15989  }
15990  }
15991  }
15992  }
15993  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
15994 }
15995 
15997  VmaAllocator allocator,
15998  const VkBufferCreateInfo* pBufferCreateInfo,
15999  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16000  uint32_t* pMemoryTypeIndex)
16001 {
16002  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16003  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
16004  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16005  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16006 
16007  const VkDevice hDev = allocator->m_hDevice;
16008  VkBuffer hBuffer = VK_NULL_HANDLE;
16009  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
16010  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
16011  if(res == VK_SUCCESS)
16012  {
16013  VkMemoryRequirements memReq = {};
16014  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
16015  hDev, hBuffer, &memReq);
16016 
16017  res = vmaFindMemoryTypeIndex(
16018  allocator,
16019  memReq.memoryTypeBits,
16020  pAllocationCreateInfo,
16021  pMemoryTypeIndex);
16022 
16023  allocator->GetVulkanFunctions().vkDestroyBuffer(
16024  hDev, hBuffer, allocator->GetAllocationCallbacks());
16025  }
16026  return res;
16027 }
16028 
16030  VmaAllocator allocator,
16031  const VkImageCreateInfo* pImageCreateInfo,
16032  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16033  uint32_t* pMemoryTypeIndex)
16034 {
16035  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16036  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
16037  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16038  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16039 
16040  const VkDevice hDev = allocator->m_hDevice;
16041  VkImage hImage = VK_NULL_HANDLE;
16042  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
16043  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
16044  if(res == VK_SUCCESS)
16045  {
16046  VkMemoryRequirements memReq = {};
16047  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
16048  hDev, hImage, &memReq);
16049 
16050  res = vmaFindMemoryTypeIndex(
16051  allocator,
16052  memReq.memoryTypeBits,
16053  pAllocationCreateInfo,
16054  pMemoryTypeIndex);
16055 
16056  allocator->GetVulkanFunctions().vkDestroyImage(
16057  hDev, hImage, allocator->GetAllocationCallbacks());
16058  }
16059  return res;
16060 }
16061 
16062 VkResult vmaCreatePool(
16063  VmaAllocator allocator,
16064  const VmaPoolCreateInfo* pCreateInfo,
16065  VmaPool* pPool)
16066 {
16067  VMA_ASSERT(allocator && pCreateInfo && pPool);
16068 
16069  VMA_DEBUG_LOG("vmaCreatePool");
16070 
16071  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16072 
16073  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
16074 
16075 #if VMA_RECORDING_ENABLED
16076  if(allocator->GetRecorder() != VMA_NULL)
16077  {
16078  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
16079  }
16080 #endif
16081 
16082  return res;
16083 }
16084 
16085 void vmaDestroyPool(
16086  VmaAllocator allocator,
16087  VmaPool pool)
16088 {
16089  VMA_ASSERT(allocator);
16090 
16091  if(pool == VK_NULL_HANDLE)
16092  {
16093  return;
16094  }
16095 
16096  VMA_DEBUG_LOG("vmaDestroyPool");
16097 
16098  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16099 
16100 #if VMA_RECORDING_ENABLED
16101  if(allocator->GetRecorder() != VMA_NULL)
16102  {
16103  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
16104  }
16105 #endif
16106 
16107  allocator->DestroyPool(pool);
16108 }
16109 
16110 void vmaGetPoolStats(
16111  VmaAllocator allocator,
16112  VmaPool pool,
16113  VmaPoolStats* pPoolStats)
16114 {
16115  VMA_ASSERT(allocator && pool && pPoolStats);
16116 
16117  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16118 
16119  allocator->GetPoolStats(pool, pPoolStats);
16120 }
16121 
16123  VmaAllocator allocator,
16124  VmaPool pool,
16125  size_t* pLostAllocationCount)
16126 {
16127  VMA_ASSERT(allocator && pool);
16128 
16129  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16130 
16131 #if VMA_RECORDING_ENABLED
16132  if(allocator->GetRecorder() != VMA_NULL)
16133  {
16134  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
16135  }
16136 #endif
16137 
16138  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
16139 }
16140 
16141 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
16142 {
16143  VMA_ASSERT(allocator && pool);
16144 
16145  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16146 
16147  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
16148 
16149  return allocator->CheckPoolCorruption(pool);
16150 }
16151 
16152 VkResult vmaAllocateMemory(
16153  VmaAllocator allocator,
16154  const VkMemoryRequirements* pVkMemoryRequirements,
16155  const VmaAllocationCreateInfo* pCreateInfo,
16156  VmaAllocation* pAllocation,
16157  VmaAllocationInfo* pAllocationInfo)
16158 {
16159  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
16160 
16161  VMA_DEBUG_LOG("vmaAllocateMemory");
16162 
16163  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16164 
16165  VkResult result = allocator->AllocateMemory(
16166  *pVkMemoryRequirements,
16167  false, // requiresDedicatedAllocation
16168  false, // prefersDedicatedAllocation
16169  VK_NULL_HANDLE, // dedicatedBuffer
16170  VK_NULL_HANDLE, // dedicatedImage
16171  *pCreateInfo,
16172  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16173  1, // allocationCount
16174  pAllocation);
16175 
16176 #if VMA_RECORDING_ENABLED
16177  if(allocator->GetRecorder() != VMA_NULL)
16178  {
16179  allocator->GetRecorder()->RecordAllocateMemory(
16180  allocator->GetCurrentFrameIndex(),
16181  *pVkMemoryRequirements,
16182  *pCreateInfo,
16183  *pAllocation);
16184  }
16185 #endif
16186 
16187  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16188  {
16189  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16190  }
16191 
16192  return result;
16193 }
16194 
16195 VkResult vmaAllocateMemoryPages(
16196  VmaAllocator allocator,
16197  const VkMemoryRequirements* pVkMemoryRequirements,
16198  const VmaAllocationCreateInfo* pCreateInfo,
16199  size_t allocationCount,
16200  VmaAllocation* pAllocations,
16201  VmaAllocationInfo* pAllocationInfo)
16202 {
16203  if(allocationCount == 0)
16204  {
16205  return VK_SUCCESS;
16206  }
16207 
16208  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
16209 
16210  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
16211 
16212  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16213 
16214  VkResult result = allocator->AllocateMemory(
16215  *pVkMemoryRequirements,
16216  false, // requiresDedicatedAllocation
16217  false, // prefersDedicatedAllocation
16218  VK_NULL_HANDLE, // dedicatedBuffer
16219  VK_NULL_HANDLE, // dedicatedImage
16220  *pCreateInfo,
16221  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16222  allocationCount,
16223  pAllocations);
16224 
16225 #if VMA_RECORDING_ENABLED
16226  if(allocator->GetRecorder() != VMA_NULL)
16227  {
16228  allocator->GetRecorder()->RecordAllocateMemoryPages(
16229  allocator->GetCurrentFrameIndex(),
16230  *pVkMemoryRequirements,
16231  *pCreateInfo,
16232  (uint64_t)allocationCount,
16233  pAllocations);
16234  }
16235 #endif
16236 
16237  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16238  {
16239  for(size_t i = 0; i < allocationCount; ++i)
16240  {
16241  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16242  }
16243  }
16244 
16245  return result;
16246 }
16247 
16249  VmaAllocator allocator,
16250  VkBuffer buffer,
16251  const VmaAllocationCreateInfo* pCreateInfo,
16252  VmaAllocation* pAllocation,
16253  VmaAllocationInfo* pAllocationInfo)
16254 {
16255  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16256 
16257  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16258 
16259  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16260 
16261  VkMemoryRequirements vkMemReq = {};
16262  bool requiresDedicatedAllocation = false;
16263  bool prefersDedicatedAllocation = false;
16264  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16265  requiresDedicatedAllocation,
16266  prefersDedicatedAllocation);
16267 
16268  VkResult result = allocator->AllocateMemory(
16269  vkMemReq,
16270  requiresDedicatedAllocation,
16271  prefersDedicatedAllocation,
16272  buffer, // dedicatedBuffer
16273  VK_NULL_HANDLE, // dedicatedImage
16274  *pCreateInfo,
16275  VMA_SUBALLOCATION_TYPE_BUFFER,
16276  1, // allocationCount
16277  pAllocation);
16278 
16279 #if VMA_RECORDING_ENABLED
16280  if(allocator->GetRecorder() != VMA_NULL)
16281  {
16282  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16283  allocator->GetCurrentFrameIndex(),
16284  vkMemReq,
16285  requiresDedicatedAllocation,
16286  prefersDedicatedAllocation,
16287  *pCreateInfo,
16288  *pAllocation);
16289  }
16290 #endif
16291 
16292  if(pAllocationInfo && result == VK_SUCCESS)
16293  {
16294  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16295  }
16296 
16297  return result;
16298 }
16299 
16300 VkResult vmaAllocateMemoryForImage(
16301  VmaAllocator allocator,
16302  VkImage image,
16303  const VmaAllocationCreateInfo* pCreateInfo,
16304  VmaAllocation* pAllocation,
16305  VmaAllocationInfo* pAllocationInfo)
16306 {
16307  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16308 
16309  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
16310 
16311  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16312 
16313  VkMemoryRequirements vkMemReq = {};
16314  bool requiresDedicatedAllocation = false;
16315  bool prefersDedicatedAllocation = false;
16316  allocator->GetImageMemoryRequirements(image, vkMemReq,
16317  requiresDedicatedAllocation, prefersDedicatedAllocation);
16318 
16319  VkResult result = allocator->AllocateMemory(
16320  vkMemReq,
16321  requiresDedicatedAllocation,
16322  prefersDedicatedAllocation,
16323  VK_NULL_HANDLE, // dedicatedBuffer
16324  image, // dedicatedImage
16325  *pCreateInfo,
16326  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16327  1, // allocationCount
16328  pAllocation);
16329 
16330 #if VMA_RECORDING_ENABLED
16331  if(allocator->GetRecorder() != VMA_NULL)
16332  {
16333  allocator->GetRecorder()->RecordAllocateMemoryForImage(
16334  allocator->GetCurrentFrameIndex(),
16335  vkMemReq,
16336  requiresDedicatedAllocation,
16337  prefersDedicatedAllocation,
16338  *pCreateInfo,
16339  *pAllocation);
16340  }
16341 #endif
16342 
16343  if(pAllocationInfo && result == VK_SUCCESS)
16344  {
16345  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16346  }
16347 
16348  return result;
16349 }
16350 
16351 void vmaFreeMemory(
16352  VmaAllocator allocator,
16353  VmaAllocation allocation)
16354 {
16355  VMA_ASSERT(allocator);
16356 
16357  if(allocation == VK_NULL_HANDLE)
16358  {
16359  return;
16360  }
16361 
16362  VMA_DEBUG_LOG("vmaFreeMemory");
16363 
16364  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16365 
16366 #if VMA_RECORDING_ENABLED
16367  if(allocator->GetRecorder() != VMA_NULL)
16368  {
16369  allocator->GetRecorder()->RecordFreeMemory(
16370  allocator->GetCurrentFrameIndex(),
16371  allocation);
16372  }
16373 #endif
16374 
16375  allocator->FreeMemory(
16376  1, // allocationCount
16377  &allocation);
16378 }
16379 
16380 void vmaFreeMemoryPages(
16381  VmaAllocator allocator,
16382  size_t allocationCount,
16383  VmaAllocation* pAllocations)
16384 {
16385  if(allocationCount == 0)
16386  {
16387  return;
16388  }
16389 
16390  VMA_ASSERT(allocator);
16391 
16392  VMA_DEBUG_LOG("vmaFreeMemoryPages");
16393 
16394  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16395 
16396 #if VMA_RECORDING_ENABLED
16397  if(allocator->GetRecorder() != VMA_NULL)
16398  {
16399  allocator->GetRecorder()->RecordFreeMemoryPages(
16400  allocator->GetCurrentFrameIndex(),
16401  (uint64_t)allocationCount,
16402  pAllocations);
16403  }
16404 #endif
16405 
16406  allocator->FreeMemory(allocationCount, pAllocations);
16407 }
16408 
16409 VkResult vmaResizeAllocation(
16410  VmaAllocator allocator,
16411  VmaAllocation allocation,
16412  VkDeviceSize newSize)
16413 {
16414  VMA_ASSERT(allocator && allocation);
16415 
16416  VMA_DEBUG_LOG("vmaResizeAllocation");
16417 
16418  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16419 
16420 #if VMA_RECORDING_ENABLED
16421  if(allocator->GetRecorder() != VMA_NULL)
16422  {
16423  allocator->GetRecorder()->RecordResizeAllocation(
16424  allocator->GetCurrentFrameIndex(),
16425  allocation,
16426  newSize);
16427  }
16428 #endif
16429 
16430  return allocator->ResizeAllocation(allocation, newSize);
16431 }
16432 
16434  VmaAllocator allocator,
16435  VmaAllocation allocation,
16436  VmaAllocationInfo* pAllocationInfo)
16437 {
16438  VMA_ASSERT(allocator && allocation && pAllocationInfo);
16439 
16440  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16441 
16442 #if VMA_RECORDING_ENABLED
16443  if(allocator->GetRecorder() != VMA_NULL)
16444  {
16445  allocator->GetRecorder()->RecordGetAllocationInfo(
16446  allocator->GetCurrentFrameIndex(),
16447  allocation);
16448  }
16449 #endif
16450 
16451  allocator->GetAllocationInfo(allocation, pAllocationInfo);
16452 }
16453 
16454 VkBool32 vmaTouchAllocation(
16455  VmaAllocator allocator,
16456  VmaAllocation allocation)
16457 {
16458  VMA_ASSERT(allocator && allocation);
16459 
16460  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16461 
16462 #if VMA_RECORDING_ENABLED
16463  if(allocator->GetRecorder() != VMA_NULL)
16464  {
16465  allocator->GetRecorder()->RecordTouchAllocation(
16466  allocator->GetCurrentFrameIndex(),
16467  allocation);
16468  }
16469 #endif
16470 
16471  return allocator->TouchAllocation(allocation);
16472 }
16473 
16475  VmaAllocator allocator,
16476  VmaAllocation allocation,
16477  void* pUserData)
16478 {
16479  VMA_ASSERT(allocator && allocation);
16480 
16481  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16482 
16483  allocation->SetUserData(allocator, pUserData);
16484 
16485 #if VMA_RECORDING_ENABLED
16486  if(allocator->GetRecorder() != VMA_NULL)
16487  {
16488  allocator->GetRecorder()->RecordSetAllocationUserData(
16489  allocator->GetCurrentFrameIndex(),
16490  allocation,
16491  pUserData);
16492  }
16493 #endif
16494 }
16495 
16497  VmaAllocator allocator,
16498  VmaAllocation* pAllocation)
16499 {
16500  VMA_ASSERT(allocator && pAllocation);
16501 
16502  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
16503 
16504  allocator->CreateLostAllocation(pAllocation);
16505 
16506 #if VMA_RECORDING_ENABLED
16507  if(allocator->GetRecorder() != VMA_NULL)
16508  {
16509  allocator->GetRecorder()->RecordCreateLostAllocation(
16510  allocator->GetCurrentFrameIndex(),
16511  *pAllocation);
16512  }
16513 #endif
16514 }
16515 
16516 VkResult vmaMapMemory(
16517  VmaAllocator allocator,
16518  VmaAllocation allocation,
16519  void** ppData)
16520 {
16521  VMA_ASSERT(allocator && allocation && ppData);
16522 
16523  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16524 
16525  VkResult res = allocator->Map(allocation, ppData);
16526 
16527 #if VMA_RECORDING_ENABLED
16528  if(allocator->GetRecorder() != VMA_NULL)
16529  {
16530  allocator->GetRecorder()->RecordMapMemory(
16531  allocator->GetCurrentFrameIndex(),
16532  allocation);
16533  }
16534 #endif
16535 
16536  return res;
16537 }
16538 
16539 void vmaUnmapMemory(
16540  VmaAllocator allocator,
16541  VmaAllocation allocation)
16542 {
16543  VMA_ASSERT(allocator && allocation);
16544 
16545  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16546 
16547 #if VMA_RECORDING_ENABLED
16548  if(allocator->GetRecorder() != VMA_NULL)
16549  {
16550  allocator->GetRecorder()->RecordUnmapMemory(
16551  allocator->GetCurrentFrameIndex(),
16552  allocation);
16553  }
16554 #endif
16555 
16556  allocator->Unmap(allocation);
16557 }
16558 
16559 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16560 {
16561  VMA_ASSERT(allocator && allocation);
16562 
16563  VMA_DEBUG_LOG("vmaFlushAllocation");
16564 
16565  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16566 
16567  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
16568 
16569 #if VMA_RECORDING_ENABLED
16570  if(allocator->GetRecorder() != VMA_NULL)
16571  {
16572  allocator->GetRecorder()->RecordFlushAllocation(
16573  allocator->GetCurrentFrameIndex(),
16574  allocation, offset, size);
16575  }
16576 #endif
16577 }
16578 
16579 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16580 {
16581  VMA_ASSERT(allocator && allocation);
16582 
16583  VMA_DEBUG_LOG("vmaInvalidateAllocation");
16584 
16585  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16586 
16587  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
16588 
16589 #if VMA_RECORDING_ENABLED
16590  if(allocator->GetRecorder() != VMA_NULL)
16591  {
16592  allocator->GetRecorder()->RecordInvalidateAllocation(
16593  allocator->GetCurrentFrameIndex(),
16594  allocation, offset, size);
16595  }
16596 #endif
16597 }
16598 
16599 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
16600 {
16601  VMA_ASSERT(allocator);
16602 
16603  VMA_DEBUG_LOG("vmaCheckCorruption");
16604 
16605  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16606 
16607  return allocator->CheckCorruption(memoryTypeBits);
16608 }
16609 
16610 VkResult vmaDefragment(
16611  VmaAllocator allocator,
16612  VmaAllocation* pAllocations,
16613  size_t allocationCount,
16614  VkBool32* pAllocationsChanged,
16615  const VmaDefragmentationInfo *pDefragmentationInfo,
16616  VmaDefragmentationStats* pDefragmentationStats)
16617 {
16618  // Deprecated interface, reimplemented using new one.
16619 
16620  VmaDefragmentationInfo2 info2 = {};
16621  info2.allocationCount = (uint32_t)allocationCount;
16622  info2.pAllocations = pAllocations;
16623  info2.pAllocationsChanged = pAllocationsChanged;
16624  if(pDefragmentationInfo != VMA_NULL)
16625  {
16626  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
16627  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
16628  }
16629  else
16630  {
16631  info2.maxCpuAllocationsToMove = UINT32_MAX;
16632  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
16633  }
16634  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
16635 
16637  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
16638  if(res == VK_NOT_READY)
16639  {
16640  res = vmaDefragmentationEnd( allocator, ctx);
16641  }
16642  return res;
16643 }
16644 
16645 VkResult vmaDefragmentationBegin(
16646  VmaAllocator allocator,
16647  const VmaDefragmentationInfo2* pInfo,
16648  VmaDefragmentationStats* pStats,
16649  VmaDefragmentationContext *pContext)
16650 {
16651  VMA_ASSERT(allocator && pInfo && pContext);
16652 
16653  // Degenerate case: Nothing to defragment.
16654  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
16655  {
16656  return VK_SUCCESS;
16657  }
16658 
16659  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
16660  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
16661  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
16662  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
16663 
16664  VMA_DEBUG_LOG("vmaDefragmentationBegin");
16665 
16666  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16667 
16668  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
16669 
16670 #if VMA_RECORDING_ENABLED
16671  if(allocator->GetRecorder() != VMA_NULL)
16672  {
16673  allocator->GetRecorder()->RecordDefragmentationBegin(
16674  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
16675  }
16676 #endif
16677 
16678  return res;
16679 }
16680 
16681 VkResult vmaDefragmentationEnd(
16682  VmaAllocator allocator,
16683  VmaDefragmentationContext context)
16684 {
16685  VMA_ASSERT(allocator);
16686 
16687  VMA_DEBUG_LOG("vmaDefragmentationEnd");
16688 
16689  if(context != VK_NULL_HANDLE)
16690  {
16691  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16692 
16693 #if VMA_RECORDING_ENABLED
16694  if(allocator->GetRecorder() != VMA_NULL)
16695  {
16696  allocator->GetRecorder()->RecordDefragmentationEnd(
16697  allocator->GetCurrentFrameIndex(), context);
16698  }
16699 #endif
16700 
16701  return allocator->DefragmentationEnd(context);
16702  }
16703  else
16704  {
16705  return VK_SUCCESS;
16706  }
16707 }
16708 
16709 VkResult vmaBindBufferMemory(
16710  VmaAllocator allocator,
16711  VmaAllocation allocation,
16712  VkBuffer buffer)
16713 {
16714  VMA_ASSERT(allocator && allocation && buffer);
16715 
16716  VMA_DEBUG_LOG("vmaBindBufferMemory");
16717 
16718  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16719 
16720  return allocator->BindBufferMemory(allocation, buffer);
16721 }
16722 
16723 VkResult vmaBindImageMemory(
16724  VmaAllocator allocator,
16725  VmaAllocation allocation,
16726  VkImage image)
16727 {
16728  VMA_ASSERT(allocator && allocation && image);
16729 
16730  VMA_DEBUG_LOG("vmaBindImageMemory");
16731 
16732  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16733 
16734  return allocator->BindImageMemory(allocation, image);
16735 }
16736 
16737 VkResult vmaCreateBuffer(
16738  VmaAllocator allocator,
16739  const VkBufferCreateInfo* pBufferCreateInfo,
16740  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16741  VkBuffer* pBuffer,
16742  VmaAllocation* pAllocation,
16743  VmaAllocationInfo* pAllocationInfo)
16744 {
16745  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16746 
16747  if(pBufferCreateInfo->size == 0)
16748  {
16749  return VK_ERROR_VALIDATION_FAILED_EXT;
16750  }
16751 
16752  VMA_DEBUG_LOG("vmaCreateBuffer");
16753 
16754  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16755 
16756  *pBuffer = VK_NULL_HANDLE;
16757  *pAllocation = VK_NULL_HANDLE;
16758 
16759  // 1. Create VkBuffer.
16760  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16761  allocator->m_hDevice,
16762  pBufferCreateInfo,
16763  allocator->GetAllocationCallbacks(),
16764  pBuffer);
16765  if(res >= 0)
16766  {
16767  // 2. vkGetBufferMemoryRequirements.
16768  VkMemoryRequirements vkMemReq = {};
16769  bool requiresDedicatedAllocation = false;
16770  bool prefersDedicatedAllocation = false;
16771  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16772  requiresDedicatedAllocation, prefersDedicatedAllocation);
16773 
16774  // Make sure alignment requirements for specific buffer usages reported
16775  // in Physical Device Properties are included in alignment reported by memory requirements.
16776  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16777  {
16778  VMA_ASSERT(vkMemReq.alignment %
16779  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16780  }
16781  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16782  {
16783  VMA_ASSERT(vkMemReq.alignment %
16784  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16785  }
16786  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16787  {
16788  VMA_ASSERT(vkMemReq.alignment %
16789  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16790  }
16791 
16792  // 3. Allocate memory using allocator.
16793  res = allocator->AllocateMemory(
16794  vkMemReq,
16795  requiresDedicatedAllocation,
16796  prefersDedicatedAllocation,
16797  *pBuffer, // dedicatedBuffer
16798  VK_NULL_HANDLE, // dedicatedImage
16799  *pAllocationCreateInfo,
16800  VMA_SUBALLOCATION_TYPE_BUFFER,
16801  1, // allocationCount
16802  pAllocation);
16803 
16804 #if VMA_RECORDING_ENABLED
16805  if(allocator->GetRecorder() != VMA_NULL)
16806  {
16807  allocator->GetRecorder()->RecordCreateBuffer(
16808  allocator->GetCurrentFrameIndex(),
16809  *pBufferCreateInfo,
16810  *pAllocationCreateInfo,
16811  *pAllocation);
16812  }
16813 #endif
16814 
16815  if(res >= 0)
16816  {
16817  // 3. Bind buffer with memory.
16818  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
16819  {
16820  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
16821  }
16822  if(res >= 0)
16823  {
16824  // All steps succeeded.
16825  #if VMA_STATS_STRING_ENABLED
16826  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16827  #endif
16828  if(pAllocationInfo != VMA_NULL)
16829  {
16830  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16831  }
16832 
16833  return VK_SUCCESS;
16834  }
16835  allocator->FreeMemory(
16836  1, // allocationCount
16837  pAllocation);
16838  *pAllocation = VK_NULL_HANDLE;
16839  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16840  *pBuffer = VK_NULL_HANDLE;
16841  return res;
16842  }
16843  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16844  *pBuffer = VK_NULL_HANDLE;
16845  return res;
16846  }
16847  return res;
16848 }
16849 
16850 void vmaDestroyBuffer(
16851  VmaAllocator allocator,
16852  VkBuffer buffer,
16853  VmaAllocation allocation)
16854 {
16855  VMA_ASSERT(allocator);
16856 
16857  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16858  {
16859  return;
16860  }
16861 
16862  VMA_DEBUG_LOG("vmaDestroyBuffer");
16863 
16864  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16865 
16866 #if VMA_RECORDING_ENABLED
16867  if(allocator->GetRecorder() != VMA_NULL)
16868  {
16869  allocator->GetRecorder()->RecordDestroyBuffer(
16870  allocator->GetCurrentFrameIndex(),
16871  allocation);
16872  }
16873 #endif
16874 
16875  if(buffer != VK_NULL_HANDLE)
16876  {
16877  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16878  }
16879 
16880  if(allocation != VK_NULL_HANDLE)
16881  {
16882  allocator->FreeMemory(
16883  1, // allocationCount
16884  &allocation);
16885  }
16886 }
16887 
16888 VkResult vmaCreateImage(
16889  VmaAllocator allocator,
16890  const VkImageCreateInfo* pImageCreateInfo,
16891  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16892  VkImage* pImage,
16893  VmaAllocation* pAllocation,
16894  VmaAllocationInfo* pAllocationInfo)
16895 {
16896  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16897 
16898  if(pImageCreateInfo->extent.width == 0 ||
16899  pImageCreateInfo->extent.height == 0 ||
16900  pImageCreateInfo->extent.depth == 0 ||
16901  pImageCreateInfo->mipLevels == 0 ||
16902  pImageCreateInfo->arrayLayers == 0)
16903  {
16904  return VK_ERROR_VALIDATION_FAILED_EXT;
16905  }
16906 
16907  VMA_DEBUG_LOG("vmaCreateImage");
16908 
16909  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16910 
16911  *pImage = VK_NULL_HANDLE;
16912  *pAllocation = VK_NULL_HANDLE;
16913 
16914  // 1. Create VkImage.
16915  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16916  allocator->m_hDevice,
16917  pImageCreateInfo,
16918  allocator->GetAllocationCallbacks(),
16919  pImage);
16920  if(res >= 0)
16921  {
16922  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16923  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16924  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16925 
16926  // 2. Allocate memory using allocator.
16927  VkMemoryRequirements vkMemReq = {};
16928  bool requiresDedicatedAllocation = false;
16929  bool prefersDedicatedAllocation = false;
16930  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16931  requiresDedicatedAllocation, prefersDedicatedAllocation);
16932 
16933  res = allocator->AllocateMemory(
16934  vkMemReq,
16935  requiresDedicatedAllocation,
16936  prefersDedicatedAllocation,
16937  VK_NULL_HANDLE, // dedicatedBuffer
16938  *pImage, // dedicatedImage
16939  *pAllocationCreateInfo,
16940  suballocType,
16941  1, // allocationCount
16942  pAllocation);
16943 
16944 #if VMA_RECORDING_ENABLED
16945  if(allocator->GetRecorder() != VMA_NULL)
16946  {
16947  allocator->GetRecorder()->RecordCreateImage(
16948  allocator->GetCurrentFrameIndex(),
16949  *pImageCreateInfo,
16950  *pAllocationCreateInfo,
16951  *pAllocation);
16952  }
16953 #endif
16954 
16955  if(res >= 0)
16956  {
16957  // 3. Bind image with memory.
16958  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
16959  {
16960  res = allocator->BindImageMemory(*pAllocation, *pImage);
16961  }
16962  if(res >= 0)
16963  {
16964  // All steps succeeded.
16965  #if VMA_STATS_STRING_ENABLED
16966  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
16967  #endif
16968  if(pAllocationInfo != VMA_NULL)
16969  {
16970  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16971  }
16972 
16973  return VK_SUCCESS;
16974  }
16975  allocator->FreeMemory(
16976  1, // allocationCount
16977  pAllocation);
16978  *pAllocation = VK_NULL_HANDLE;
16979  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16980  *pImage = VK_NULL_HANDLE;
16981  return res;
16982  }
16983  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16984  *pImage = VK_NULL_HANDLE;
16985  return res;
16986  }
16987  return res;
16988 }
16989 
16990 void vmaDestroyImage(
16991  VmaAllocator allocator,
16992  VkImage image,
16993  VmaAllocation allocation)
16994 {
16995  VMA_ASSERT(allocator);
16996 
16997  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16998  {
16999  return;
17000  }
17001 
17002  VMA_DEBUG_LOG("vmaDestroyImage");
17003 
17004  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17005 
17006 #if VMA_RECORDING_ENABLED
17007  if(allocator->GetRecorder() != VMA_NULL)
17008  {
17009  allocator->GetRecorder()->RecordDestroyImage(
17010  allocator->GetCurrentFrameIndex(),
17011  allocation);
17012  }
17013 #endif
17014 
17015  if(image != VK_NULL_HANDLE)
17016  {
17017  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
17018  }
17019  if(allocation != VK_NULL_HANDLE)
17020  {
17021  allocator->FreeMemory(
17022  1, // allocationCount
17023  &allocation);
17024  }
17025 }
17026 
17027 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1744
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2044
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1802
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:2855
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1776
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2375
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1756
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2006
Definition: vk_mem_alloc.h:2110
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:2808
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1748
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2475
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1799
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2891
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2264
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1643
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2356
Definition: vk_mem_alloc.h:2081
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:2811
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1737
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2163
Definition: vk_mem_alloc.h:2033
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1811
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2292
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
Definition: vk_mem_alloc.h:1865
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1796
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2037
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1937
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1753
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:2845
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1936
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2895
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1828
VmaStatInfo total
Definition: vk_mem_alloc.h:1946
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2903
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2147
Definition: vk_mem_alloc.h:2105
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2886
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1754
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1679
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1805
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2306
Definition: vk_mem_alloc.h:2300
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:1760
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1872
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2485
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1749
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1774
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2184
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2326
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2362
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1735
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2309
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2860
VmaMemoryUsage
Definition: vk_mem_alloc.h:1984
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:2820
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2881
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:2899
Definition: vk_mem_alloc.h:2023
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2171
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1752
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1942
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1685
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:2799
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
Definition: vk_mem_alloc.h:2797
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:2826
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1706
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1778
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1711
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2901
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2158
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2372
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1745
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1925
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:2321
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1698
Definition: vk_mem_alloc.h:2296
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2088
Represents Opaque object that represents started defragmentation process.
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1938
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1702
Definition: vk_mem_alloc.h:2121
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2312
Definition: vk_mem_alloc.h:2032
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1751
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2153
Definition: vk_mem_alloc.h:2144
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1928
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1747
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2334
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1814
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2365
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2142
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2850
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2177
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1853
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1944
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2068
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1937
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1758
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1784
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:2796
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:2874
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1700
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1757
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2348
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1750
Definition: vk_mem_alloc.h:2099
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1792
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2499
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:1808
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1937
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1934
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2353
Parameters for defragmentation.
Definition: vk_mem_alloc.h:2805
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
Definition: vk_mem_alloc.h:2114
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:2480
Definition: vk_mem_alloc.h:2128
Definition: vk_mem_alloc.h:2140
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2897
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1743
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1932
Definition: vk_mem_alloc.h:1989
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2302
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1781
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1930
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1755
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1759
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2055
Definition: vk_mem_alloc.h:2135
Definition: vk_mem_alloc.h:2016
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2494
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1733
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1746
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2281
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Tries to resize an allocation in place, if there is enough free memory after it.
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2461
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2125
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2246
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1938
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1768
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1945
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2359
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1938
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:2865
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2466
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:2829