Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1644 /*
1645 Define this macro to 0/1 to disable/enable support for recording functionality,
1646 available through VmaAllocatorCreateInfo::pRecordSettings.
1647 */
1648 #ifndef VMA_RECORDING_ENABLED
1649  #ifdef _WIN32
1650  #define VMA_RECORDING_ENABLED 1
1651  #else
1652  #define VMA_RECORDING_ENABLED 0
1653  #endif
1654 #endif
1655 
1656 #ifndef NOMINMAX
1657  #define NOMINMAX // For windows.h
1658 #endif
1659 
1660 #ifndef VULKAN_H_
1661  #include <vulkan/vulkan.h>
1662 #endif
1663 
1664 #if VMA_RECORDING_ENABLED
1665  #include <windows.h>
1666 #endif
1667 
1668 #if !defined(VMA_DEDICATED_ALLOCATION)
1669  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1670  #define VMA_DEDICATED_ALLOCATION 1
1671  #else
1672  #define VMA_DEDICATED_ALLOCATION 0
1673  #endif
1674 #endif
1675 
1685 VK_DEFINE_HANDLE(VmaAllocator)
1686 
1687 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1689  VmaAllocator allocator,
1690  uint32_t memoryType,
1691  VkDeviceMemory memory,
1692  VkDeviceSize size);
1694 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1695  VmaAllocator allocator,
1696  uint32_t memoryType,
1697  VkDeviceMemory memory,
1698  VkDeviceSize size);
1699 
1713 
1743 
1746 typedef VkFlags VmaAllocatorCreateFlags;
1747 
1752 typedef struct VmaVulkanFunctions {
1753  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1754  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1755  PFN_vkAllocateMemory vkAllocateMemory;
1756  PFN_vkFreeMemory vkFreeMemory;
1757  PFN_vkMapMemory vkMapMemory;
1758  PFN_vkUnmapMemory vkUnmapMemory;
1759  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1760  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1761  PFN_vkBindBufferMemory vkBindBufferMemory;
1762  PFN_vkBindImageMemory vkBindImageMemory;
1763  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1764  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1765  PFN_vkCreateBuffer vkCreateBuffer;
1766  PFN_vkDestroyBuffer vkDestroyBuffer;
1767  PFN_vkCreateImage vkCreateImage;
1768  PFN_vkDestroyImage vkDestroyImage;
1769  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1770 #if VMA_DEDICATED_ALLOCATION
1771  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1772  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1773 #endif
1775 
1777 typedef enum VmaRecordFlagBits {
1784 
1787 typedef VkFlags VmaRecordFlags;
1788 
1790 typedef struct VmaRecordSettings
1791 {
1801  const char* pFilePath;
1803 
1806 {
1810 
1811  VkPhysicalDevice physicalDevice;
1813 
1814  VkDevice device;
1816 
1819 
1820  const VkAllocationCallbacks* pAllocationCallbacks;
1822 
1862  const VkDeviceSize* pHeapSizeLimit;
1883 
1885 VkResult vmaCreateAllocator(
1886  const VmaAllocatorCreateInfo* pCreateInfo,
1887  VmaAllocator* pAllocator);
1888 
1890 void vmaDestroyAllocator(
1891  VmaAllocator allocator);
1892 
1898  VmaAllocator allocator,
1899  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1900 
1906  VmaAllocator allocator,
1907  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1908 
1916  VmaAllocator allocator,
1917  uint32_t memoryTypeIndex,
1918  VkMemoryPropertyFlags* pFlags);
1919 
1929  VmaAllocator allocator,
1930  uint32_t frameIndex);
1931 
1934 typedef struct VmaStatInfo
1935 {
1937  uint32_t blockCount;
1943  VkDeviceSize usedBytes;
1945  VkDeviceSize unusedBytes;
1948 } VmaStatInfo;
1949 
1951 typedef struct VmaStats
1952 {
1953  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1954  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1956 } VmaStats;
1957 
1959 void vmaCalculateStats(
1960  VmaAllocator allocator,
1961  VmaStats* pStats);
1962 
1963 #ifndef VMA_STATS_STRING_ENABLED
1964 #define VMA_STATS_STRING_ENABLED 1
1965 #endif
1966 
1967 #if VMA_STATS_STRING_ENABLED
1968 
1970 
1972 void vmaBuildStatsString(
1973  VmaAllocator allocator,
1974  char** ppStatsString,
1975  VkBool32 detailedMap);
1976 
1977 void vmaFreeStatsString(
1978  VmaAllocator allocator,
1979  char* pStatsString);
1980 
1981 #endif // #if VMA_STATS_STRING_ENABLED
1982 
1991 VK_DEFINE_HANDLE(VmaPool)
1992 
1993 typedef enum VmaMemoryUsage
1994 {
2043 } VmaMemoryUsage;
2044 
2054 
2115 
2131 
2141 
2148 
2152 
2154 {
2167  VkMemoryPropertyFlags requiredFlags;
2172  VkMemoryPropertyFlags preferredFlags;
2180  uint32_t memoryTypeBits;
2193  void* pUserData;
2195 
2212 VkResult vmaFindMemoryTypeIndex(
2213  VmaAllocator allocator,
2214  uint32_t memoryTypeBits,
2215  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2216  uint32_t* pMemoryTypeIndex);
2217 
2231  VmaAllocator allocator,
2232  const VkBufferCreateInfo* pBufferCreateInfo,
2233  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2234  uint32_t* pMemoryTypeIndex);
2235 
2249  VmaAllocator allocator,
2250  const VkImageCreateInfo* pImageCreateInfo,
2251  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2252  uint32_t* pMemoryTypeIndex);
2253 
2274 
2291 
2302 
2308 
2311 typedef VkFlags VmaPoolCreateFlags;
2312 
2315 typedef struct VmaPoolCreateInfo {
2330  VkDeviceSize blockSize;
2359 
2362 typedef struct VmaPoolStats {
2365  VkDeviceSize size;
2368  VkDeviceSize unusedSize;
2381  VkDeviceSize unusedRangeSizeMax;
2384  size_t blockCount;
2385 } VmaPoolStats;
2386 
2393 VkResult vmaCreatePool(
2394  VmaAllocator allocator,
2395  const VmaPoolCreateInfo* pCreateInfo,
2396  VmaPool* pPool);
2397 
2400 void vmaDestroyPool(
2401  VmaAllocator allocator,
2402  VmaPool pool);
2403 
2410 void vmaGetPoolStats(
2411  VmaAllocator allocator,
2412  VmaPool pool,
2413  VmaPoolStats* pPoolStats);
2414 
2422  VmaAllocator allocator,
2423  VmaPool pool,
2424  size_t* pLostAllocationCount);
2425 
2440 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2441 
2466 VK_DEFINE_HANDLE(VmaAllocation)
2467 
2468 
2470 typedef struct VmaAllocationInfo {
2475  uint32_t memoryType;
2484  VkDeviceMemory deviceMemory;
2489  VkDeviceSize offset;
2494  VkDeviceSize size;
2508  void* pUserData;
2510 
2521 VkResult vmaAllocateMemory(
2522  VmaAllocator allocator,
2523  const VkMemoryRequirements* pVkMemoryRequirements,
2524  const VmaAllocationCreateInfo* pCreateInfo,
2525  VmaAllocation* pAllocation,
2526  VmaAllocationInfo* pAllocationInfo);
2527 
2547 VkResult vmaAllocateMemoryPages(
2548  VmaAllocator allocator,
2549  const VkMemoryRequirements* pVkMemoryRequirements,
2550  const VmaAllocationCreateInfo* pCreateInfo,
2551  size_t allocationCount,
2552  VmaAllocation* pAllocations,
2553  VmaAllocationInfo* pAllocationInfo);
2554 
2562  VmaAllocator allocator,
2563  VkBuffer buffer,
2564  const VmaAllocationCreateInfo* pCreateInfo,
2565  VmaAllocation* pAllocation,
2566  VmaAllocationInfo* pAllocationInfo);
2567 
2569 VkResult vmaAllocateMemoryForImage(
2570  VmaAllocator allocator,
2571  VkImage image,
2572  const VmaAllocationCreateInfo* pCreateInfo,
2573  VmaAllocation* pAllocation,
2574  VmaAllocationInfo* pAllocationInfo);
2575 
2580 void vmaFreeMemory(
2581  VmaAllocator allocator,
2582  VmaAllocation allocation);
2583 
2594 void vmaFreeMemoryPages(
2595  VmaAllocator allocator,
2596  size_t allocationCount,
2597  VmaAllocation* pAllocations);
2598 
2619 VkResult vmaResizeAllocation(
2620  VmaAllocator allocator,
2621  VmaAllocation allocation,
2622  VkDeviceSize newSize);
2623 
2641  VmaAllocator allocator,
2642  VmaAllocation allocation,
2643  VmaAllocationInfo* pAllocationInfo);
2644 
2659 VkBool32 vmaTouchAllocation(
2660  VmaAllocator allocator,
2661  VmaAllocation allocation);
2662 
2677  VmaAllocator allocator,
2678  VmaAllocation allocation,
2679  void* pUserData);
2680 
2692  VmaAllocator allocator,
2693  VmaAllocation* pAllocation);
2694 
2729 VkResult vmaMapMemory(
2730  VmaAllocator allocator,
2731  VmaAllocation allocation,
2732  void** ppData);
2733 
2738 void vmaUnmapMemory(
2739  VmaAllocator allocator,
2740  VmaAllocation allocation);
2741 
2758 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2759 
2776 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2777 
2794 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2795 
2802 VK_DEFINE_HANDLE(VmaDefragmentationContext)
2803 
2804 typedef enum VmaDefragmentationFlagBits {
2808 typedef VkFlags VmaDefragmentationFlags;
2809 
2814 typedef struct VmaDefragmentationInfo2 {
2838  uint32_t poolCount;
2859  VkDeviceSize maxCpuBytesToMove;
2869  VkDeviceSize maxGpuBytesToMove;
2883  VkCommandBuffer commandBuffer;
2885 
2890 typedef struct VmaDefragmentationInfo {
2895  VkDeviceSize maxBytesToMove;
2902 
2904 typedef struct VmaDefragmentationStats {
2906  VkDeviceSize bytesMoved;
2908  VkDeviceSize bytesFreed;
2914 
2941 VkResult vmaDefragmentationBegin(
2942  VmaAllocator allocator,
2943  const VmaDefragmentationInfo2* pInfo,
2944  VmaDefragmentationStats* pStats,
2945  VmaDefragmentationContext *pContext);
2946 
2952 VkResult vmaDefragmentationEnd(
2953  VmaAllocator allocator,
2954  VmaDefragmentationContext context);
2955 
2996 VkResult vmaDefragment(
2997  VmaAllocator allocator,
2998  VmaAllocation* pAllocations,
2999  size_t allocationCount,
3000  VkBool32* pAllocationsChanged,
3001  const VmaDefragmentationInfo *pDefragmentationInfo,
3002  VmaDefragmentationStats* pDefragmentationStats);
3003 
3016 VkResult vmaBindBufferMemory(
3017  VmaAllocator allocator,
3018  VmaAllocation allocation,
3019  VkBuffer buffer);
3020 
3033 VkResult vmaBindImageMemory(
3034  VmaAllocator allocator,
3035  VmaAllocation allocation,
3036  VkImage image);
3037 
3064 VkResult vmaCreateBuffer(
3065  VmaAllocator allocator,
3066  const VkBufferCreateInfo* pBufferCreateInfo,
3067  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3068  VkBuffer* pBuffer,
3069  VmaAllocation* pAllocation,
3070  VmaAllocationInfo* pAllocationInfo);
3071 
3083 void vmaDestroyBuffer(
3084  VmaAllocator allocator,
3085  VkBuffer buffer,
3086  VmaAllocation allocation);
3087 
3089 VkResult vmaCreateImage(
3090  VmaAllocator allocator,
3091  const VkImageCreateInfo* pImageCreateInfo,
3092  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3093  VkImage* pImage,
3094  VmaAllocation* pAllocation,
3095  VmaAllocationInfo* pAllocationInfo);
3096 
3108 void vmaDestroyImage(
3109  VmaAllocator allocator,
3110  VkImage image,
3111  VmaAllocation allocation);
3112 
3113 #ifdef __cplusplus
3114 }
3115 #endif
3116 
3117 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3118 
3119 // For Visual Studio IntelliSense.
3120 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3121 #define VMA_IMPLEMENTATION
3122 #endif
3123 
3124 #ifdef VMA_IMPLEMENTATION
3125 #undef VMA_IMPLEMENTATION
3126 
3127 #include <cstdint>
3128 #include <cstdlib>
3129 #include <cstring>
3130 
3131 /*******************************************************************************
3132 CONFIGURATION SECTION
3133 
3134 Define some of these macros before each #include of this header or change them
3135 here if you need other then default behavior depending on your environment.
3136 */
3137 
3138 /*
3139 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3140 internally, like:
3141 
3142  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3143 
3144 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3145 VmaAllocatorCreateInfo::pVulkanFunctions.
3146 */
3147 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3148 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3149 #endif
3150 
3151 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3152 //#define VMA_USE_STL_CONTAINERS 1
3153 
3154 /* Set this macro to 1 to make the library including and using STL containers:
3155 std::pair, std::vector, std::list, std::unordered_map.
3156 
3157 Set it to 0 or undefined to make the library using its own implementation of
3158 the containers.
3159 */
3160 #if VMA_USE_STL_CONTAINERS
3161  #define VMA_USE_STL_VECTOR 1
3162  #define VMA_USE_STL_UNORDERED_MAP 1
3163  #define VMA_USE_STL_LIST 1
3164 #endif
3165 
3166 #ifndef VMA_USE_STL_SHARED_MUTEX
3167  // Compiler conforms to C++17.
3168  #if __cplusplus >= 201703L
3169  #define VMA_USE_STL_SHARED_MUTEX 1
3170  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3171  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3172  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3173  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3174  #define VMA_USE_STL_SHARED_MUTEX 1
3175  #else
3176  #define VMA_USE_STL_SHARED_MUTEX 0
3177  #endif
3178 #endif
3179 
3180 #if VMA_USE_STL_VECTOR
3181  #include <vector>
3182 #endif
3183 
3184 #if VMA_USE_STL_UNORDERED_MAP
3185  #include <unordered_map>
3186 #endif
3187 
3188 #if VMA_USE_STL_LIST
3189  #include <list>
3190 #endif
3191 
3192 /*
3193 Following headers are used in this CONFIGURATION section only, so feel free to
3194 remove them if not needed.
3195 */
3196 #include <cassert> // for assert
3197 #include <algorithm> // for min, max
3198 #include <mutex>
3199 #include <atomic> // for std::atomic
3200 
3201 #ifndef VMA_NULL
3202  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3203  #define VMA_NULL nullptr
3204 #endif
3205 
3206 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3207 #include <cstdlib>
3208 void *aligned_alloc(size_t alignment, size_t size)
3209 {
3210  // alignment must be >= sizeof(void*)
3211  if(alignment < sizeof(void*))
3212  {
3213  alignment = sizeof(void*);
3214  }
3215 
3216  return memalign(alignment, size);
3217 }
3218 #elif defined(__APPLE__) || defined(__ANDROID__)
3219 #include <cstdlib>
3220 void *aligned_alloc(size_t alignment, size_t size)
3221 {
3222  // alignment must be >= sizeof(void*)
3223  if(alignment < sizeof(void*))
3224  {
3225  alignment = sizeof(void*);
3226  }
3227 
3228  void *pointer;
3229  if(posix_memalign(&pointer, alignment, size) == 0)
3230  return pointer;
3231  return VMA_NULL;
3232 }
3233 #endif
3234 
3235 // If your compiler is not compatible with C++11 and definition of
3236 // aligned_alloc() function is missing, uncommeting following line may help:
3237 
3238 //#include <malloc.h>
3239 
3240 // Normal assert to check for programmer's errors, especially in Debug configuration.
3241 #ifndef VMA_ASSERT
3242  #ifdef _DEBUG
3243  #define VMA_ASSERT(expr) assert(expr)
3244  #else
3245  #define VMA_ASSERT(expr)
3246  #endif
3247 #endif
3248 
3249 // Assert that will be called very often, like inside data structures e.g. operator[].
3250 // Making it non-empty can make program slow.
3251 #ifndef VMA_HEAVY_ASSERT
3252  #ifdef _DEBUG
3253  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3254  #else
3255  #define VMA_HEAVY_ASSERT(expr)
3256  #endif
3257 #endif
3258 
3259 #ifndef VMA_ALIGN_OF
3260  #define VMA_ALIGN_OF(type) (__alignof(type))
3261 #endif
3262 
3263 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3264  #if defined(_WIN32)
3265  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3266  #else
3267  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3268  #endif
3269 #endif
3270 
3271 #ifndef VMA_SYSTEM_FREE
3272  #if defined(_WIN32)
3273  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3274  #else
3275  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3276  #endif
3277 #endif
3278 
3279 #ifndef VMA_MIN
3280  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3281 #endif
3282 
3283 #ifndef VMA_MAX
3284  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3285 #endif
3286 
3287 #ifndef VMA_SWAP
3288  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3289 #endif
3290 
3291 #ifndef VMA_SORT
3292  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3293 #endif
3294 
3295 #ifndef VMA_DEBUG_LOG
3296  #define VMA_DEBUG_LOG(format, ...)
3297  /*
3298  #define VMA_DEBUG_LOG(format, ...) do { \
3299  printf(format, __VA_ARGS__); \
3300  printf("\n"); \
3301  } while(false)
3302  */
3303 #endif
3304 
3305 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3306 #if VMA_STATS_STRING_ENABLED
3307  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3308  {
3309  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3310  }
3311  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3312  {
3313  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3314  }
3315  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3316  {
3317  snprintf(outStr, strLen, "%p", ptr);
3318  }
3319 #endif
3320 
3321 #ifndef VMA_MUTEX
3322  class VmaMutex
3323  {
3324  public:
3325  void Lock() { m_Mutex.lock(); }
3326  void Unlock() { m_Mutex.unlock(); }
3327  private:
3328  std::mutex m_Mutex;
3329  };
3330  #define VMA_MUTEX VmaMutex
3331 #endif
3332 
3333 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3334 #ifndef VMA_RW_MUTEX
3335  #if VMA_USE_STL_SHARED_MUTEX
3336  // Use std::shared_mutex from C++17.
3337  #include <shared_mutex>
3338  class VmaRWMutex
3339  {
3340  public:
3341  void LockRead() { m_Mutex.lock_shared(); }
3342  void UnlockRead() { m_Mutex.unlock_shared(); }
3343  void LockWrite() { m_Mutex.lock(); }
3344  void UnlockWrite() { m_Mutex.unlock(); }
3345  private:
3346  std::shared_mutex m_Mutex;
3347  };
3348  #define VMA_RW_MUTEX VmaRWMutex
3349  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
3350  // Use SRWLOCK from WinAPI.
3351  // Minimum supported client = Windows Vista, server = Windows Server 2008.
3352  class VmaRWMutex
3353  {
3354  public:
3355  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3356  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3357  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3358  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3359  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3360  private:
3361  SRWLOCK m_Lock;
3362  };
3363  #define VMA_RW_MUTEX VmaRWMutex
3364  #else
3365  // Less efficient fallback: Use normal mutex.
3366  class VmaRWMutex
3367  {
3368  public:
3369  void LockRead() { m_Mutex.Lock(); }
3370  void UnlockRead() { m_Mutex.Unlock(); }
3371  void LockWrite() { m_Mutex.Lock(); }
3372  void UnlockWrite() { m_Mutex.Unlock(); }
3373  private:
3374  VMA_MUTEX m_Mutex;
3375  };
3376  #define VMA_RW_MUTEX VmaRWMutex
3377  #endif // #if VMA_USE_STL_SHARED_MUTEX
3378 #endif // #ifndef VMA_RW_MUTEX
3379 
3380 /*
3381 If providing your own implementation, you need to implement a subset of std::atomic:
3382 
3383 - Constructor(uint32_t desired)
3384 - uint32_t load() const
3385 - void store(uint32_t desired)
3386 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3387 */
3388 #ifndef VMA_ATOMIC_UINT32
3389  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3390 #endif
3391 
3392 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3393 
3397  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3398 #endif
3399 
3400 #ifndef VMA_DEBUG_ALIGNMENT
3401 
3405  #define VMA_DEBUG_ALIGNMENT (1)
3406 #endif
3407 
3408 #ifndef VMA_DEBUG_MARGIN
3409 
3413  #define VMA_DEBUG_MARGIN (0)
3414 #endif
3415 
3416 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3417 
3421  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3422 #endif
3423 
3424 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3425 
3430  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3431 #endif
3432 
3433 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3434 
3438  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3439 #endif
3440 
3441 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3442 
3446  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3447 #endif
3448 
3449 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3450  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3452 #endif
3453 
3454 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3455  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3457 #endif
3458 
3459 #ifndef VMA_CLASS_NO_COPY
3460  #define VMA_CLASS_NO_COPY(className) \
3461  private: \
3462  className(const className&) = delete; \
3463  className& operator=(const className&) = delete;
3464 #endif
3465 
3466 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3467 
3468 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3469 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3470 
3471 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3472 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3473 
3474 /*******************************************************************************
3475 END OF CONFIGURATION
3476 */
3477 
3478 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3479 
3480 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3481  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3482 
3483 // Returns number of bits set to 1 in (v).
3484 static inline uint32_t VmaCountBitsSet(uint32_t v)
3485 {
3486  uint32_t c = v - ((v >> 1) & 0x55555555);
3487  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3488  c = ((c >> 4) + c) & 0x0F0F0F0F;
3489  c = ((c >> 8) + c) & 0x00FF00FF;
3490  c = ((c >> 16) + c) & 0x0000FFFF;
3491  return c;
3492 }
3493 
3494 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3495 // Use types like uint32_t, uint64_t as T.
3496 template <typename T>
3497 static inline T VmaAlignUp(T val, T align)
3498 {
3499  return (val + align - 1) / align * align;
3500 }
3501 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3502 // Use types like uint32_t, uint64_t as T.
3503 template <typename T>
3504 static inline T VmaAlignDown(T val, T align)
3505 {
3506  return val / align * align;
3507 }
3508 
3509 // Division with mathematical rounding to nearest number.
3510 template <typename T>
3511 static inline T VmaRoundDiv(T x, T y)
3512 {
3513  return (x + (y / (T)2)) / y;
3514 }
3515 
3516 /*
3517 Returns true if given number is a power of two.
3518 T must be unsigned integer number or signed integer but always nonnegative.
3519 For 0 returns true.
3520 */
3521 template <typename T>
3522 inline bool VmaIsPow2(T x)
3523 {
3524  return (x & (x-1)) == 0;
3525 }
3526 
3527 // Returns smallest power of 2 greater or equal to v.
3528 static inline uint32_t VmaNextPow2(uint32_t v)
3529 {
3530  v--;
3531  v |= v >> 1;
3532  v |= v >> 2;
3533  v |= v >> 4;
3534  v |= v >> 8;
3535  v |= v >> 16;
3536  v++;
3537  return v;
3538 }
3539 static inline uint64_t VmaNextPow2(uint64_t v)
3540 {
3541  v--;
3542  v |= v >> 1;
3543  v |= v >> 2;
3544  v |= v >> 4;
3545  v |= v >> 8;
3546  v |= v >> 16;
3547  v |= v >> 32;
3548  v++;
3549  return v;
3550 }
3551 
3552 // Returns largest power of 2 less or equal to v.
3553 static inline uint32_t VmaPrevPow2(uint32_t v)
3554 {
3555  v |= v >> 1;
3556  v |= v >> 2;
3557  v |= v >> 4;
3558  v |= v >> 8;
3559  v |= v >> 16;
3560  v = v ^ (v >> 1);
3561  return v;
3562 }
3563 static inline uint64_t VmaPrevPow2(uint64_t v)
3564 {
3565  v |= v >> 1;
3566  v |= v >> 2;
3567  v |= v >> 4;
3568  v |= v >> 8;
3569  v |= v >> 16;
3570  v |= v >> 32;
3571  v = v ^ (v >> 1);
3572  return v;
3573 }
3574 
3575 static inline bool VmaStrIsEmpty(const char* pStr)
3576 {
3577  return pStr == VMA_NULL || *pStr == '\0';
3578 }
3579 
3580 #if VMA_STATS_STRING_ENABLED
3581 
3582 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3583 {
3584  switch(algorithm)
3585  {
3587  return "Linear";
3589  return "Buddy";
3590  case 0:
3591  return "Default";
3592  default:
3593  VMA_ASSERT(0);
3594  return "";
3595  }
3596 }
3597 
3598 #endif // #if VMA_STATS_STRING_ENABLED
3599 
3600 #ifndef VMA_SORT
3601 
3602 template<typename Iterator, typename Compare>
3603 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3604 {
3605  Iterator centerValue = end; --centerValue;
3606  Iterator insertIndex = beg;
3607  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3608  {
3609  if(cmp(*memTypeIndex, *centerValue))
3610  {
3611  if(insertIndex != memTypeIndex)
3612  {
3613  VMA_SWAP(*memTypeIndex, *insertIndex);
3614  }
3615  ++insertIndex;
3616  }
3617  }
3618  if(insertIndex != centerValue)
3619  {
3620  VMA_SWAP(*insertIndex, *centerValue);
3621  }
3622  return insertIndex;
3623 }
3624 
3625 template<typename Iterator, typename Compare>
3626 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3627 {
3628  if(beg < end)
3629  {
3630  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3631  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3632  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3633  }
3634 }
3635 
3636 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3637 
3638 #endif // #ifndef VMA_SORT
3639 
3640 /*
3641 Returns true if two memory blocks occupy overlapping pages.
3642 ResourceA must be in less memory offset than ResourceB.
3643 
3644 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3645 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3646 */
3647 static inline bool VmaBlocksOnSamePage(
3648  VkDeviceSize resourceAOffset,
3649  VkDeviceSize resourceASize,
3650  VkDeviceSize resourceBOffset,
3651  VkDeviceSize pageSize)
3652 {
3653  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3654  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3655  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3656  VkDeviceSize resourceBStart = resourceBOffset;
3657  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3658  return resourceAEndPage == resourceBStartPage;
3659 }
3660 
3661 enum VmaSuballocationType
3662 {
3663  VMA_SUBALLOCATION_TYPE_FREE = 0,
3664  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3665  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3666  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3667  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3668  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3669  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3670 };
3671 
3672 /*
3673 Returns true if given suballocation types could conflict and must respect
3674 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3675 or linear image and another one is optimal image. If type is unknown, behave
3676 conservatively.
3677 */
3678 static inline bool VmaIsBufferImageGranularityConflict(
3679  VmaSuballocationType suballocType1,
3680  VmaSuballocationType suballocType2)
3681 {
3682  if(suballocType1 > suballocType2)
3683  {
3684  VMA_SWAP(suballocType1, suballocType2);
3685  }
3686 
3687  switch(suballocType1)
3688  {
3689  case VMA_SUBALLOCATION_TYPE_FREE:
3690  return false;
3691  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3692  return true;
3693  case VMA_SUBALLOCATION_TYPE_BUFFER:
3694  return
3695  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3696  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3697  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3698  return
3699  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3700  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3701  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3702  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3703  return
3704  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3705  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3706  return false;
3707  default:
3708  VMA_ASSERT(0);
3709  return true;
3710  }
3711 }
3712 
3713 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3714 {
3715  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3716  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3717  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3718  {
3719  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3720  }
3721 }
3722 
3723 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3724 {
3725  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3726  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3727  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3728  {
3729  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3730  {
3731  return false;
3732  }
3733  }
3734  return true;
3735 }
3736 
3737 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3738 struct VmaMutexLock
3739 {
3740  VMA_CLASS_NO_COPY(VmaMutexLock)
3741 public:
3742  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
3743  m_pMutex(useMutex ? &mutex : VMA_NULL)
3744  { if(m_pMutex) { m_pMutex->Lock(); } }
3745  ~VmaMutexLock()
3746  { if(m_pMutex) { m_pMutex->Unlock(); } }
3747 private:
3748  VMA_MUTEX* m_pMutex;
3749 };
3750 
3751 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3752 struct VmaMutexLockRead
3753 {
3754  VMA_CLASS_NO_COPY(VmaMutexLockRead)
3755 public:
3756  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3757  m_pMutex(useMutex ? &mutex : VMA_NULL)
3758  { if(m_pMutex) { m_pMutex->LockRead(); } }
3759  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
3760 private:
3761  VMA_RW_MUTEX* m_pMutex;
3762 };
3763 
3764 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3765 struct VmaMutexLockWrite
3766 {
3767  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3768 public:
3769  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
3770  m_pMutex(useMutex ? &mutex : VMA_NULL)
3771  { if(m_pMutex) { m_pMutex->LockWrite(); } }
3772  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3773 private:
3774  VMA_RW_MUTEX* m_pMutex;
3775 };
3776 
3777 #if VMA_DEBUG_GLOBAL_MUTEX
3778  static VMA_MUTEX gDebugGlobalMutex;
3779  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3780 #else
3781  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3782 #endif
3783 
3784 // Minimum size of a free suballocation to register it in the free suballocation collection.
3785 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3786 
3787 /*
3788 Performs binary search and returns iterator to first element that is greater or
3789 equal to (key), according to comparison (cmp).
3790 
3791 Cmp should return true if first argument is less than second argument.
3792 
3793 Returned value is the found element, if present in the collection or place where
3794 new element with value (key) should be inserted.
3795 */
3796 template <typename CmpLess, typename IterT, typename KeyT>
3797 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3798 {
3799  size_t down = 0, up = (end - beg);
3800  while(down < up)
3801  {
3802  const size_t mid = (down + up) / 2;
3803  if(cmp(*(beg+mid), key))
3804  {
3805  down = mid + 1;
3806  }
3807  else
3808  {
3809  up = mid;
3810  }
3811  }
3812  return beg + down;
3813 }
3814 
3815 /*
3816 Returns true if all pointers in the array are not-null and unique.
3817 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3818 T must be pointer type, e.g. VmaAllocation, VmaPool.
3819 */
3820 template<typename T>
3821 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3822 {
3823  for(uint32_t i = 0; i < count; ++i)
3824  {
3825  const T iPtr = arr[i];
3826  if(iPtr == VMA_NULL)
3827  {
3828  return false;
3829  }
3830  for(uint32_t j = i + 1; j < count; ++j)
3831  {
3832  if(iPtr == arr[j])
3833  {
3834  return false;
3835  }
3836  }
3837  }
3838  return true;
3839 }
3840 
3842 // Memory allocation
3843 
3844 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3845 {
3846  if((pAllocationCallbacks != VMA_NULL) &&
3847  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3848  {
3849  return (*pAllocationCallbacks->pfnAllocation)(
3850  pAllocationCallbacks->pUserData,
3851  size,
3852  alignment,
3853  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3854  }
3855  else
3856  {
3857  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3858  }
3859 }
3860 
3861 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3862 {
3863  if((pAllocationCallbacks != VMA_NULL) &&
3864  (pAllocationCallbacks->pfnFree != VMA_NULL))
3865  {
3866  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3867  }
3868  else
3869  {
3870  VMA_SYSTEM_FREE(ptr);
3871  }
3872 }
3873 
3874 template<typename T>
3875 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3876 {
3877  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3878 }
3879 
3880 template<typename T>
3881 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3882 {
3883  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3884 }
3885 
3886 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3887 
3888 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3889 
3890 template<typename T>
3891 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3892 {
3893  ptr->~T();
3894  VmaFree(pAllocationCallbacks, ptr);
3895 }
3896 
3897 template<typename T>
3898 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3899 {
3900  if(ptr != VMA_NULL)
3901  {
3902  for(size_t i = count; i--; )
3903  {
3904  ptr[i].~T();
3905  }
3906  VmaFree(pAllocationCallbacks, ptr);
3907  }
3908 }
3909 
3910 // STL-compatible allocator.
3911 template<typename T>
3912 class VmaStlAllocator
3913 {
3914 public:
3915  const VkAllocationCallbacks* const m_pCallbacks;
3916  typedef T value_type;
3917 
3918  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3919  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3920 
3921  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3922  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3923 
3924  template<typename U>
3925  bool operator==(const VmaStlAllocator<U>& rhs) const
3926  {
3927  return m_pCallbacks == rhs.m_pCallbacks;
3928  }
3929  template<typename U>
3930  bool operator!=(const VmaStlAllocator<U>& rhs) const
3931  {
3932  return m_pCallbacks != rhs.m_pCallbacks;
3933  }
3934 
3935  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3936 };
3937 
3938 #if VMA_USE_STL_VECTOR
3939 
3940 #define VmaVector std::vector
3941 
3942 template<typename T, typename allocatorT>
3943 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3944 {
3945  vec.insert(vec.begin() + index, item);
3946 }
3947 
3948 template<typename T, typename allocatorT>
3949 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3950 {
3951  vec.erase(vec.begin() + index);
3952 }
3953 
3954 #else // #if VMA_USE_STL_VECTOR
3955 
3956 /* Class with interface compatible with subset of std::vector.
3957 T must be POD because constructors and destructors are not called and memcpy is
3958 used for these objects. */
3959 template<typename T, typename AllocatorT>
3960 class VmaVector
3961 {
3962 public:
3963  typedef T value_type;
3964 
3965  VmaVector(const AllocatorT& allocator) :
3966  m_Allocator(allocator),
3967  m_pArray(VMA_NULL),
3968  m_Count(0),
3969  m_Capacity(0)
3970  {
3971  }
3972 
3973  VmaVector(size_t count, const AllocatorT& allocator) :
3974  m_Allocator(allocator),
3975  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3976  m_Count(count),
3977  m_Capacity(count)
3978  {
3979  }
3980 
3981  VmaVector(const VmaVector<T, AllocatorT>& src) :
3982  m_Allocator(src.m_Allocator),
3983  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3984  m_Count(src.m_Count),
3985  m_Capacity(src.m_Count)
3986  {
3987  if(m_Count != 0)
3988  {
3989  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3990  }
3991  }
3992 
3993  ~VmaVector()
3994  {
3995  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3996  }
3997 
3998  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3999  {
4000  if(&rhs != this)
4001  {
4002  resize(rhs.m_Count);
4003  if(m_Count != 0)
4004  {
4005  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4006  }
4007  }
4008  return *this;
4009  }
4010 
4011  bool empty() const { return m_Count == 0; }
4012  size_t size() const { return m_Count; }
4013  T* data() { return m_pArray; }
4014  const T* data() const { return m_pArray; }
4015 
4016  T& operator[](size_t index)
4017  {
4018  VMA_HEAVY_ASSERT(index < m_Count);
4019  return m_pArray[index];
4020  }
4021  const T& operator[](size_t index) const
4022  {
4023  VMA_HEAVY_ASSERT(index < m_Count);
4024  return m_pArray[index];
4025  }
4026 
4027  T& front()
4028  {
4029  VMA_HEAVY_ASSERT(m_Count > 0);
4030  return m_pArray[0];
4031  }
4032  const T& front() const
4033  {
4034  VMA_HEAVY_ASSERT(m_Count > 0);
4035  return m_pArray[0];
4036  }
4037  T& back()
4038  {
4039  VMA_HEAVY_ASSERT(m_Count > 0);
4040  return m_pArray[m_Count - 1];
4041  }
4042  const T& back() const
4043  {
4044  VMA_HEAVY_ASSERT(m_Count > 0);
4045  return m_pArray[m_Count - 1];
4046  }
4047 
4048  void reserve(size_t newCapacity, bool freeMemory = false)
4049  {
4050  newCapacity = VMA_MAX(newCapacity, m_Count);
4051 
4052  if((newCapacity < m_Capacity) && !freeMemory)
4053  {
4054  newCapacity = m_Capacity;
4055  }
4056 
4057  if(newCapacity != m_Capacity)
4058  {
4059  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4060  if(m_Count != 0)
4061  {
4062  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4063  }
4064  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4065  m_Capacity = newCapacity;
4066  m_pArray = newArray;
4067  }
4068  }
4069 
4070  void resize(size_t newCount, bool freeMemory = false)
4071  {
4072  size_t newCapacity = m_Capacity;
4073  if(newCount > m_Capacity)
4074  {
4075  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4076  }
4077  else if(freeMemory)
4078  {
4079  newCapacity = newCount;
4080  }
4081 
4082  if(newCapacity != m_Capacity)
4083  {
4084  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4085  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4086  if(elementsToCopy != 0)
4087  {
4088  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4089  }
4090  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4091  m_Capacity = newCapacity;
4092  m_pArray = newArray;
4093  }
4094 
4095  m_Count = newCount;
4096  }
4097 
4098  void clear(bool freeMemory = false)
4099  {
4100  resize(0, freeMemory);
4101  }
4102 
4103  void insert(size_t index, const T& src)
4104  {
4105  VMA_HEAVY_ASSERT(index <= m_Count);
4106  const size_t oldCount = size();
4107  resize(oldCount + 1);
4108  if(index < oldCount)
4109  {
4110  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4111  }
4112  m_pArray[index] = src;
4113  }
4114 
4115  void remove(size_t index)
4116  {
4117  VMA_HEAVY_ASSERT(index < m_Count);
4118  const size_t oldCount = size();
4119  if(index < oldCount - 1)
4120  {
4121  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4122  }
4123  resize(oldCount - 1);
4124  }
4125 
4126  void push_back(const T& src)
4127  {
4128  const size_t newIndex = size();
4129  resize(newIndex + 1);
4130  m_pArray[newIndex] = src;
4131  }
4132 
4133  void pop_back()
4134  {
4135  VMA_HEAVY_ASSERT(m_Count > 0);
4136  resize(size() - 1);
4137  }
4138 
4139  void push_front(const T& src)
4140  {
4141  insert(0, src);
4142  }
4143 
4144  void pop_front()
4145  {
4146  VMA_HEAVY_ASSERT(m_Count > 0);
4147  remove(0);
4148  }
4149 
4150  typedef T* iterator;
4151 
4152  iterator begin() { return m_pArray; }
4153  iterator end() { return m_pArray + m_Count; }
4154 
4155 private:
4156  AllocatorT m_Allocator;
4157  T* m_pArray;
4158  size_t m_Count;
4159  size_t m_Capacity;
4160 };
4161 
4162 template<typename T, typename allocatorT>
4163 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4164 {
4165  vec.insert(index, item);
4166 }
4167 
4168 template<typename T, typename allocatorT>
4169 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4170 {
4171  vec.remove(index);
4172 }
4173 
4174 #endif // #if VMA_USE_STL_VECTOR
4175 
4176 template<typename CmpLess, typename VectorT>
4177 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4178 {
4179  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4180  vector.data(),
4181  vector.data() + vector.size(),
4182  value,
4183  CmpLess()) - vector.data();
4184  VmaVectorInsert(vector, indexToInsert, value);
4185  return indexToInsert;
4186 }
4187 
4188 template<typename CmpLess, typename VectorT>
4189 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4190 {
4191  CmpLess comparator;
4192  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4193  vector.begin(),
4194  vector.end(),
4195  value,
4196  comparator);
4197  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4198  {
4199  size_t indexToRemove = it - vector.begin();
4200  VmaVectorRemove(vector, indexToRemove);
4201  return true;
4202  }
4203  return false;
4204 }
4205 
4206 template<typename CmpLess, typename IterT, typename KeyT>
4207 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
4208 {
4209  CmpLess comparator;
4210  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4211  beg, end, value, comparator);
4212  if(it == end ||
4213  (!comparator(*it, value) && !comparator(value, *it)))
4214  {
4215  return it;
4216  }
4217  return end;
4218 }
4219 
4221 // class VmaPoolAllocator
4222 
4223 /*
4224 Allocator for objects of type T using a list of arrays (pools) to speed up
4225 allocation. Number of elements that can be allocated is not bounded because
4226 allocator can create multiple blocks.
4227 */
4228 template<typename T>
4229 class VmaPoolAllocator
4230 {
4231  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4232 public:
4233  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4234  ~VmaPoolAllocator();
4235  void Clear();
4236  T* Alloc();
4237  void Free(T* ptr);
4238 
4239 private:
4240  union Item
4241  {
4242  uint32_t NextFreeIndex;
4243  T Value;
4244  };
4245 
4246  struct ItemBlock
4247  {
4248  Item* pItems;
4249  uint32_t Capacity;
4250  uint32_t FirstFreeIndex;
4251  };
4252 
4253  const VkAllocationCallbacks* m_pAllocationCallbacks;
4254  const uint32_t m_FirstBlockCapacity;
4255  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4256 
4257  ItemBlock& CreateNewBlock();
4258 };
4259 
4260 template<typename T>
4261 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
4262  m_pAllocationCallbacks(pAllocationCallbacks),
4263  m_FirstBlockCapacity(firstBlockCapacity),
4264  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4265 {
4266  VMA_ASSERT(m_FirstBlockCapacity > 1);
4267 }
4268 
4269 template<typename T>
4270 VmaPoolAllocator<T>::~VmaPoolAllocator()
4271 {
4272  Clear();
4273 }
4274 
4275 template<typename T>
4276 void VmaPoolAllocator<T>::Clear()
4277 {
4278  for(size_t i = m_ItemBlocks.size(); i--; )
4279  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4280  m_ItemBlocks.clear();
4281 }
4282 
4283 template<typename T>
4284 T* VmaPoolAllocator<T>::Alloc()
4285 {
4286  for(size_t i = m_ItemBlocks.size(); i--; )
4287  {
4288  ItemBlock& block = m_ItemBlocks[i];
4289  // This block has some free items: Use first one.
4290  if(block.FirstFreeIndex != UINT32_MAX)
4291  {
4292  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4293  block.FirstFreeIndex = pItem->NextFreeIndex;
4294  return &pItem->Value;
4295  }
4296  }
4297 
4298  // No block has free item: Create new one and use it.
4299  ItemBlock& newBlock = CreateNewBlock();
4300  Item* const pItem = &newBlock.pItems[0];
4301  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4302  return &pItem->Value;
4303 }
4304 
4305 template<typename T>
4306 void VmaPoolAllocator<T>::Free(T* ptr)
4307 {
4308  // Search all memory blocks to find ptr.
4309  for(size_t i = m_ItemBlocks.size(); i--; )
4310  {
4311  ItemBlock& block = m_ItemBlocks[i];
4312 
4313  // Casting to union.
4314  Item* pItemPtr;
4315  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4316 
4317  // Check if pItemPtr is in address range of this block.
4318  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4319  {
4320  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4321  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4322  block.FirstFreeIndex = index;
4323  return;
4324  }
4325  }
4326  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4327 }
4328 
4329 template<typename T>
4330 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4331 {
4332  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4333  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4334 
4335  const ItemBlock newBlock = {
4336  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4337  newBlockCapacity,
4338  0 };
4339 
4340  m_ItemBlocks.push_back(newBlock);
4341 
4342  // Setup singly-linked list of all free items in this block.
4343  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4344  newBlock.pItems[i].NextFreeIndex = i + 1;
4345  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4346  return m_ItemBlocks.back();
4347 }
4348 
4350 // class VmaRawList, VmaList
4351 
4352 #if VMA_USE_STL_LIST
4353 
4354 #define VmaList std::list
4355 
4356 #else // #if VMA_USE_STL_LIST
4357 
4358 template<typename T>
4359 struct VmaListItem
4360 {
4361  VmaListItem* pPrev;
4362  VmaListItem* pNext;
4363  T Value;
4364 };
4365 
4366 // Doubly linked list.
4367 template<typename T>
4368 class VmaRawList
4369 {
4370  VMA_CLASS_NO_COPY(VmaRawList)
4371 public:
4372  typedef VmaListItem<T> ItemType;
4373 
4374  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4375  ~VmaRawList();
4376  void Clear();
4377 
4378  size_t GetCount() const { return m_Count; }
4379  bool IsEmpty() const { return m_Count == 0; }
4380 
4381  ItemType* Front() { return m_pFront; }
4382  const ItemType* Front() const { return m_pFront; }
4383  ItemType* Back() { return m_pBack; }
4384  const ItemType* Back() const { return m_pBack; }
4385 
4386  ItemType* PushBack();
4387  ItemType* PushFront();
4388  ItemType* PushBack(const T& value);
4389  ItemType* PushFront(const T& value);
4390  void PopBack();
4391  void PopFront();
4392 
4393  // Item can be null - it means PushBack.
4394  ItemType* InsertBefore(ItemType* pItem);
4395  // Item can be null - it means PushFront.
4396  ItemType* InsertAfter(ItemType* pItem);
4397 
4398  ItemType* InsertBefore(ItemType* pItem, const T& value);
4399  ItemType* InsertAfter(ItemType* pItem, const T& value);
4400 
4401  void Remove(ItemType* pItem);
4402 
4403 private:
4404  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4405  VmaPoolAllocator<ItemType> m_ItemAllocator;
4406  ItemType* m_pFront;
4407  ItemType* m_pBack;
4408  size_t m_Count;
4409 };
4410 
4411 template<typename T>
4412 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4413  m_pAllocationCallbacks(pAllocationCallbacks),
4414  m_ItemAllocator(pAllocationCallbacks, 128),
4415  m_pFront(VMA_NULL),
4416  m_pBack(VMA_NULL),
4417  m_Count(0)
4418 {
4419 }
4420 
4421 template<typename T>
4422 VmaRawList<T>::~VmaRawList()
4423 {
4424  // Intentionally not calling Clear, because that would be unnecessary
4425  // computations to return all items to m_ItemAllocator as free.
4426 }
4427 
4428 template<typename T>
4429 void VmaRawList<T>::Clear()
4430 {
4431  if(IsEmpty() == false)
4432  {
4433  ItemType* pItem = m_pBack;
4434  while(pItem != VMA_NULL)
4435  {
4436  ItemType* const pPrevItem = pItem->pPrev;
4437  m_ItemAllocator.Free(pItem);
4438  pItem = pPrevItem;
4439  }
4440  m_pFront = VMA_NULL;
4441  m_pBack = VMA_NULL;
4442  m_Count = 0;
4443  }
4444 }
4445 
4446 template<typename T>
4447 VmaListItem<T>* VmaRawList<T>::PushBack()
4448 {
4449  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4450  pNewItem->pNext = VMA_NULL;
4451  if(IsEmpty())
4452  {
4453  pNewItem->pPrev = VMA_NULL;
4454  m_pFront = pNewItem;
4455  m_pBack = pNewItem;
4456  m_Count = 1;
4457  }
4458  else
4459  {
4460  pNewItem->pPrev = m_pBack;
4461  m_pBack->pNext = pNewItem;
4462  m_pBack = pNewItem;
4463  ++m_Count;
4464  }
4465  return pNewItem;
4466 }
4467 
4468 template<typename T>
4469 VmaListItem<T>* VmaRawList<T>::PushFront()
4470 {
4471  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4472  pNewItem->pPrev = VMA_NULL;
4473  if(IsEmpty())
4474  {
4475  pNewItem->pNext = VMA_NULL;
4476  m_pFront = pNewItem;
4477  m_pBack = pNewItem;
4478  m_Count = 1;
4479  }
4480  else
4481  {
4482  pNewItem->pNext = m_pFront;
4483  m_pFront->pPrev = pNewItem;
4484  m_pFront = pNewItem;
4485  ++m_Count;
4486  }
4487  return pNewItem;
4488 }
4489 
4490 template<typename T>
4491 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4492 {
4493  ItemType* const pNewItem = PushBack();
4494  pNewItem->Value = value;
4495  return pNewItem;
4496 }
4497 
4498 template<typename T>
4499 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4500 {
4501  ItemType* const pNewItem = PushFront();
4502  pNewItem->Value = value;
4503  return pNewItem;
4504 }
4505 
4506 template<typename T>
4507 void VmaRawList<T>::PopBack()
4508 {
4509  VMA_HEAVY_ASSERT(m_Count > 0);
4510  ItemType* const pBackItem = m_pBack;
4511  ItemType* const pPrevItem = pBackItem->pPrev;
4512  if(pPrevItem != VMA_NULL)
4513  {
4514  pPrevItem->pNext = VMA_NULL;
4515  }
4516  m_pBack = pPrevItem;
4517  m_ItemAllocator.Free(pBackItem);
4518  --m_Count;
4519 }
4520 
4521 template<typename T>
4522 void VmaRawList<T>::PopFront()
4523 {
4524  VMA_HEAVY_ASSERT(m_Count > 0);
4525  ItemType* const pFrontItem = m_pFront;
4526  ItemType* const pNextItem = pFrontItem->pNext;
4527  if(pNextItem != VMA_NULL)
4528  {
4529  pNextItem->pPrev = VMA_NULL;
4530  }
4531  m_pFront = pNextItem;
4532  m_ItemAllocator.Free(pFrontItem);
4533  --m_Count;
4534 }
4535 
4536 template<typename T>
4537 void VmaRawList<T>::Remove(ItemType* pItem)
4538 {
4539  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4540  VMA_HEAVY_ASSERT(m_Count > 0);
4541 
4542  if(pItem->pPrev != VMA_NULL)
4543  {
4544  pItem->pPrev->pNext = pItem->pNext;
4545  }
4546  else
4547  {
4548  VMA_HEAVY_ASSERT(m_pFront == pItem);
4549  m_pFront = pItem->pNext;
4550  }
4551 
4552  if(pItem->pNext != VMA_NULL)
4553  {
4554  pItem->pNext->pPrev = pItem->pPrev;
4555  }
4556  else
4557  {
4558  VMA_HEAVY_ASSERT(m_pBack == pItem);
4559  m_pBack = pItem->pPrev;
4560  }
4561 
4562  m_ItemAllocator.Free(pItem);
4563  --m_Count;
4564 }
4565 
4566 template<typename T>
4567 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4568 {
4569  if(pItem != VMA_NULL)
4570  {
4571  ItemType* const prevItem = pItem->pPrev;
4572  ItemType* const newItem = m_ItemAllocator.Alloc();
4573  newItem->pPrev = prevItem;
4574  newItem->pNext = pItem;
4575  pItem->pPrev = newItem;
4576  if(prevItem != VMA_NULL)
4577  {
4578  prevItem->pNext = newItem;
4579  }
4580  else
4581  {
4582  VMA_HEAVY_ASSERT(m_pFront == pItem);
4583  m_pFront = newItem;
4584  }
4585  ++m_Count;
4586  return newItem;
4587  }
4588  else
4589  return PushBack();
4590 }
4591 
4592 template<typename T>
4593 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4594 {
4595  if(pItem != VMA_NULL)
4596  {
4597  ItemType* const nextItem = pItem->pNext;
4598  ItemType* const newItem = m_ItemAllocator.Alloc();
4599  newItem->pNext = nextItem;
4600  newItem->pPrev = pItem;
4601  pItem->pNext = newItem;
4602  if(nextItem != VMA_NULL)
4603  {
4604  nextItem->pPrev = newItem;
4605  }
4606  else
4607  {
4608  VMA_HEAVY_ASSERT(m_pBack == pItem);
4609  m_pBack = newItem;
4610  }
4611  ++m_Count;
4612  return newItem;
4613  }
4614  else
4615  return PushFront();
4616 }
4617 
4618 template<typename T>
4619 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4620 {
4621  ItemType* const newItem = InsertBefore(pItem);
4622  newItem->Value = value;
4623  return newItem;
4624 }
4625 
4626 template<typename T>
4627 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4628 {
4629  ItemType* const newItem = InsertAfter(pItem);
4630  newItem->Value = value;
4631  return newItem;
4632 }
4633 
4634 template<typename T, typename AllocatorT>
4635 class VmaList
4636 {
4637  VMA_CLASS_NO_COPY(VmaList)
4638 public:
4639  class iterator
4640  {
4641  public:
4642  iterator() :
4643  m_pList(VMA_NULL),
4644  m_pItem(VMA_NULL)
4645  {
4646  }
4647 
4648  T& operator*() const
4649  {
4650  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4651  return m_pItem->Value;
4652  }
4653  T* operator->() const
4654  {
4655  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4656  return &m_pItem->Value;
4657  }
4658 
4659  iterator& operator++()
4660  {
4661  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4662  m_pItem = m_pItem->pNext;
4663  return *this;
4664  }
4665  iterator& operator--()
4666  {
4667  if(m_pItem != VMA_NULL)
4668  {
4669  m_pItem = m_pItem->pPrev;
4670  }
4671  else
4672  {
4673  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4674  m_pItem = m_pList->Back();
4675  }
4676  return *this;
4677  }
4678 
4679  iterator operator++(int)
4680  {
4681  iterator result = *this;
4682  ++*this;
4683  return result;
4684  }
4685  iterator operator--(int)
4686  {
4687  iterator result = *this;
4688  --*this;
4689  return result;
4690  }
4691 
4692  bool operator==(const iterator& rhs) const
4693  {
4694  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4695  return m_pItem == rhs.m_pItem;
4696  }
4697  bool operator!=(const iterator& rhs) const
4698  {
4699  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4700  return m_pItem != rhs.m_pItem;
4701  }
4702 
4703  private:
4704  VmaRawList<T>* m_pList;
4705  VmaListItem<T>* m_pItem;
4706 
4707  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4708  m_pList(pList),
4709  m_pItem(pItem)
4710  {
4711  }
4712 
4713  friend class VmaList<T, AllocatorT>;
4714  };
4715 
4716  class const_iterator
4717  {
4718  public:
4719  const_iterator() :
4720  m_pList(VMA_NULL),
4721  m_pItem(VMA_NULL)
4722  {
4723  }
4724 
4725  const_iterator(const iterator& src) :
4726  m_pList(src.m_pList),
4727  m_pItem(src.m_pItem)
4728  {
4729  }
4730 
4731  const T& operator*() const
4732  {
4733  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4734  return m_pItem->Value;
4735  }
4736  const T* operator->() const
4737  {
4738  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4739  return &m_pItem->Value;
4740  }
4741 
4742  const_iterator& operator++()
4743  {
4744  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4745  m_pItem = m_pItem->pNext;
4746  return *this;
4747  }
4748  const_iterator& operator--()
4749  {
4750  if(m_pItem != VMA_NULL)
4751  {
4752  m_pItem = m_pItem->pPrev;
4753  }
4754  else
4755  {
4756  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4757  m_pItem = m_pList->Back();
4758  }
4759  return *this;
4760  }
4761 
4762  const_iterator operator++(int)
4763  {
4764  const_iterator result = *this;
4765  ++*this;
4766  return result;
4767  }
4768  const_iterator operator--(int)
4769  {
4770  const_iterator result = *this;
4771  --*this;
4772  return result;
4773  }
4774 
4775  bool operator==(const const_iterator& rhs) const
4776  {
4777  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4778  return m_pItem == rhs.m_pItem;
4779  }
4780  bool operator!=(const const_iterator& rhs) const
4781  {
4782  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4783  return m_pItem != rhs.m_pItem;
4784  }
4785 
4786  private:
4787  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4788  m_pList(pList),
4789  m_pItem(pItem)
4790  {
4791  }
4792 
4793  const VmaRawList<T>* m_pList;
4794  const VmaListItem<T>* m_pItem;
4795 
4796  friend class VmaList<T, AllocatorT>;
4797  };
4798 
4799  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4800 
4801  bool empty() const { return m_RawList.IsEmpty(); }
4802  size_t size() const { return m_RawList.GetCount(); }
4803 
4804  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4805  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4806 
4807  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4808  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4809 
4810  void clear() { m_RawList.Clear(); }
4811  void push_back(const T& value) { m_RawList.PushBack(value); }
4812  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4813  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4814 
4815 private:
4816  VmaRawList<T> m_RawList;
4817 };
4818 
4819 #endif // #if VMA_USE_STL_LIST
4820 
4822 // class VmaMap
4823 
4824 // Unused in this version.
4825 #if 0
4826 
4827 #if VMA_USE_STL_UNORDERED_MAP
4828 
4829 #define VmaPair std::pair
4830 
4831 #define VMA_MAP_TYPE(KeyT, ValueT) \
4832  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4833 
4834 #else // #if VMA_USE_STL_UNORDERED_MAP
4835 
4836 template<typename T1, typename T2>
4837 struct VmaPair
4838 {
4839  T1 first;
4840  T2 second;
4841 
4842  VmaPair() : first(), second() { }
4843  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4844 };
4845 
4846 /* Class compatible with subset of interface of std::unordered_map.
4847 KeyT, ValueT must be POD because they will be stored in VmaVector.
4848 */
4849 template<typename KeyT, typename ValueT>
4850 class VmaMap
4851 {
4852 public:
4853  typedef VmaPair<KeyT, ValueT> PairType;
4854  typedef PairType* iterator;
4855 
4856  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4857 
4858  iterator begin() { return m_Vector.begin(); }
4859  iterator end() { return m_Vector.end(); }
4860 
4861  void insert(const PairType& pair);
4862  iterator find(const KeyT& key);
4863  void erase(iterator it);
4864 
4865 private:
4866  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4867 };
4868 
4869 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4870 
4871 template<typename FirstT, typename SecondT>
4872 struct VmaPairFirstLess
4873 {
4874  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4875  {
4876  return lhs.first < rhs.first;
4877  }
4878  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4879  {
4880  return lhs.first < rhsFirst;
4881  }
4882 };
4883 
4884 template<typename KeyT, typename ValueT>
4885 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4886 {
4887  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4888  m_Vector.data(),
4889  m_Vector.data() + m_Vector.size(),
4890  pair,
4891  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4892  VmaVectorInsert(m_Vector, indexToInsert, pair);
4893 }
4894 
4895 template<typename KeyT, typename ValueT>
4896 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4897 {
4898  PairType* it = VmaBinaryFindFirstNotLess(
4899  m_Vector.data(),
4900  m_Vector.data() + m_Vector.size(),
4901  key,
4902  VmaPairFirstLess<KeyT, ValueT>());
4903  if((it != m_Vector.end()) && (it->first == key))
4904  {
4905  return it;
4906  }
4907  else
4908  {
4909  return m_Vector.end();
4910  }
4911 }
4912 
4913 template<typename KeyT, typename ValueT>
4914 void VmaMap<KeyT, ValueT>::erase(iterator it)
4915 {
4916  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4917 }
4918 
4919 #endif // #if VMA_USE_STL_UNORDERED_MAP
4920 
4921 #endif // #if 0
4922 
4924 
4925 class VmaDeviceMemoryBlock;
4926 
4927 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4928 
4929 struct VmaAllocation_T
4930 {
4931 private:
4932  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4933 
4934  enum FLAGS
4935  {
4936  FLAG_USER_DATA_STRING = 0x01,
4937  };
4938 
4939 public:
4940  enum ALLOCATION_TYPE
4941  {
4942  ALLOCATION_TYPE_NONE,
4943  ALLOCATION_TYPE_BLOCK,
4944  ALLOCATION_TYPE_DEDICATED,
4945  };
4946 
4947  /*
4948  This struct cannot have constructor or destructor. It must be POD because it is
4949  allocated using VmaPoolAllocator.
4950  */
4951 
4952  void Ctor(uint32_t currentFrameIndex, bool userDataString)
4953  {
4954  m_Alignment = 1;
4955  m_Size = 0;
4956  m_pUserData = VMA_NULL;
4957  m_LastUseFrameIndex = currentFrameIndex;
4958  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
4959  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
4960  m_MapCount = 0;
4961  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
4962 
4963 #if VMA_STATS_STRING_ENABLED
4964  m_CreationFrameIndex = currentFrameIndex;
4965  m_BufferImageUsage = 0;
4966 #endif
4967  }
4968 
4969  void Dtor()
4970  {
4971  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4972 
4973  // Check if owned string was freed.
4974  VMA_ASSERT(m_pUserData == VMA_NULL);
4975  }
4976 
4977  void InitBlockAllocation(
4978  VmaDeviceMemoryBlock* block,
4979  VkDeviceSize offset,
4980  VkDeviceSize alignment,
4981  VkDeviceSize size,
4982  VmaSuballocationType suballocationType,
4983  bool mapped,
4984  bool canBecomeLost)
4985  {
4986  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4987  VMA_ASSERT(block != VMA_NULL);
4988  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4989  m_Alignment = alignment;
4990  m_Size = size;
4991  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4992  m_SuballocationType = (uint8_t)suballocationType;
4993  m_BlockAllocation.m_Block = block;
4994  m_BlockAllocation.m_Offset = offset;
4995  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4996  }
4997 
4998  void InitLost()
4999  {
5000  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5001  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
5002  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5003  m_BlockAllocation.m_Block = VMA_NULL;
5004  m_BlockAllocation.m_Offset = 0;
5005  m_BlockAllocation.m_CanBecomeLost = true;
5006  }
5007 
5008  void ChangeBlockAllocation(
5009  VmaAllocator hAllocator,
5010  VmaDeviceMemoryBlock* block,
5011  VkDeviceSize offset);
5012 
5013  void ChangeSize(VkDeviceSize newSize);
5014  void ChangeOffset(VkDeviceSize newOffset);
5015 
5016  // pMappedData not null means allocation is created with MAPPED flag.
5017  void InitDedicatedAllocation(
5018  uint32_t memoryTypeIndex,
5019  VkDeviceMemory hMemory,
5020  VmaSuballocationType suballocationType,
5021  void* pMappedData,
5022  VkDeviceSize size)
5023  {
5024  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5025  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
5026  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
5027  m_Alignment = 0;
5028  m_Size = size;
5029  m_SuballocationType = (uint8_t)suballocationType;
5030  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5031  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
5032  m_DedicatedAllocation.m_hMemory = hMemory;
5033  m_DedicatedAllocation.m_pMappedData = pMappedData;
5034  }
5035 
5036  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5037  VkDeviceSize GetAlignment() const { return m_Alignment; }
5038  VkDeviceSize GetSize() const { return m_Size; }
5039  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5040  void* GetUserData() const { return m_pUserData; }
5041  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5042  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5043 
5044  VmaDeviceMemoryBlock* GetBlock() const
5045  {
5046  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5047  return m_BlockAllocation.m_Block;
5048  }
5049  VkDeviceSize GetOffset() const;
5050  VkDeviceMemory GetMemory() const;
5051  uint32_t GetMemoryTypeIndex() const;
5052  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5053  void* GetMappedData() const;
5054  bool CanBecomeLost() const;
5055 
5056  uint32_t GetLastUseFrameIndex() const
5057  {
5058  return m_LastUseFrameIndex.load();
5059  }
5060  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5061  {
5062  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5063  }
5064  /*
5065  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5066  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5067  - Else, returns false.
5068 
5069  If hAllocation is already lost, assert - you should not call it then.
5070  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5071  */
5072  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5073 
5074  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5075  {
5076  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5077  outInfo.blockCount = 1;
5078  outInfo.allocationCount = 1;
5079  outInfo.unusedRangeCount = 0;
5080  outInfo.usedBytes = m_Size;
5081  outInfo.unusedBytes = 0;
5082  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5083  outInfo.unusedRangeSizeMin = UINT64_MAX;
5084  outInfo.unusedRangeSizeMax = 0;
5085  }
5086 
5087  void BlockAllocMap();
5088  void BlockAllocUnmap();
5089  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5090  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5091 
5092 #if VMA_STATS_STRING_ENABLED
5093  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5094  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5095 
5096  void InitBufferImageUsage(uint32_t bufferImageUsage)
5097  {
5098  VMA_ASSERT(m_BufferImageUsage == 0);
5099  m_BufferImageUsage = bufferImageUsage;
5100  }
5101 
5102  void PrintParameters(class VmaJsonWriter& json) const;
5103 #endif
5104 
5105 private:
5106  VkDeviceSize m_Alignment;
5107  VkDeviceSize m_Size;
5108  void* m_pUserData;
5109  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5110  uint8_t m_Type; // ALLOCATION_TYPE
5111  uint8_t m_SuballocationType; // VmaSuballocationType
5112  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5113  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5114  uint8_t m_MapCount;
5115  uint8_t m_Flags; // enum FLAGS
5116 
5117  // Allocation out of VmaDeviceMemoryBlock.
5118  struct BlockAllocation
5119  {
5120  VmaDeviceMemoryBlock* m_Block;
5121  VkDeviceSize m_Offset;
5122  bool m_CanBecomeLost;
5123  };
5124 
5125  // Allocation for an object that has its own private VkDeviceMemory.
5126  struct DedicatedAllocation
5127  {
5128  uint32_t m_MemoryTypeIndex;
5129  VkDeviceMemory m_hMemory;
5130  void* m_pMappedData; // Not null means memory is mapped.
5131  };
5132 
5133  union
5134  {
5135  // Allocation out of VmaDeviceMemoryBlock.
5136  BlockAllocation m_BlockAllocation;
5137  // Allocation for an object that has its own private VkDeviceMemory.
5138  DedicatedAllocation m_DedicatedAllocation;
5139  };
5140 
5141 #if VMA_STATS_STRING_ENABLED
5142  uint32_t m_CreationFrameIndex;
5143  uint32_t m_BufferImageUsage; // 0 if unknown.
5144 #endif
5145 
5146  void FreeUserDataString(VmaAllocator hAllocator);
5147 };
5148 
5149 /*
5150 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5151 allocated memory block or free.
5152 */
5153 struct VmaSuballocation
5154 {
5155  VkDeviceSize offset;
5156  VkDeviceSize size;
5157  VmaAllocation hAllocation;
5158  VmaSuballocationType type;
5159 };
5160 
5161 // Comparator for offsets.
5162 struct VmaSuballocationOffsetLess
5163 {
5164  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5165  {
5166  return lhs.offset < rhs.offset;
5167  }
5168 };
5169 struct VmaSuballocationOffsetGreater
5170 {
5171  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5172  {
5173  return lhs.offset > rhs.offset;
5174  }
5175 };
5176 
5177 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5178 
5179 // Cost of one additional allocation lost, as equivalent in bytes.
5180 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5181 
5182 enum class VmaAllocationRequestType
5183 {
5184  Normal,
5185  // Used by "Linear" algorithm.
5186  UpperAddress,
5187  EndOf1st,
5188  EndOf2nd,
5189 };
5190 
5191 /*
5192 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5193 
5194 If canMakeOtherLost was false:
5195 - item points to a FREE suballocation.
5196 - itemsToMakeLostCount is 0.
5197 
5198 If canMakeOtherLost was true:
5199 - item points to first of sequence of suballocations, which are either FREE,
5200  or point to VmaAllocations that can become lost.
5201 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5202  the requested allocation to succeed.
5203 */
5204 struct VmaAllocationRequest
5205 {
5206  VkDeviceSize offset;
5207  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5208  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5209  VmaSuballocationList::iterator item;
5210  size_t itemsToMakeLostCount;
5211  void* customData;
5212  VmaAllocationRequestType type;
5213 
5214  VkDeviceSize CalcCost() const
5215  {
5216  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5217  }
5218 };
5219 
5220 /*
5221 Data structure used for bookkeeping of allocations and unused ranges of memory
5222 in a single VkDeviceMemory block.
5223 */
5224 class VmaBlockMetadata
5225 {
5226 public:
5227  VmaBlockMetadata(VmaAllocator hAllocator);
5228  virtual ~VmaBlockMetadata() { }
5229  virtual void Init(VkDeviceSize size) { m_Size = size; }
5230 
5231  // Validates all data structures inside this object. If not valid, returns false.
5232  virtual bool Validate() const = 0;
5233  VkDeviceSize GetSize() const { return m_Size; }
5234  virtual size_t GetAllocationCount() const = 0;
5235  virtual VkDeviceSize GetSumFreeSize() const = 0;
5236  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5237  // Returns true if this block is empty - contains only single free suballocation.
5238  virtual bool IsEmpty() const = 0;
5239 
5240  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5241  // Shouldn't modify blockCount.
5242  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5243 
5244 #if VMA_STATS_STRING_ENABLED
5245  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5246 #endif
5247 
5248  // Tries to find a place for suballocation with given parameters inside this block.
5249  // If succeeded, fills pAllocationRequest and returns true.
5250  // If failed, returns false.
5251  virtual bool CreateAllocationRequest(
5252  uint32_t currentFrameIndex,
5253  uint32_t frameInUseCount,
5254  VkDeviceSize bufferImageGranularity,
5255  VkDeviceSize allocSize,
5256  VkDeviceSize allocAlignment,
5257  bool upperAddress,
5258  VmaSuballocationType allocType,
5259  bool canMakeOtherLost,
5260  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5261  uint32_t strategy,
5262  VmaAllocationRequest* pAllocationRequest) = 0;
5263 
5264  virtual bool MakeRequestedAllocationsLost(
5265  uint32_t currentFrameIndex,
5266  uint32_t frameInUseCount,
5267  VmaAllocationRequest* pAllocationRequest) = 0;
5268 
5269  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5270 
5271  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5272 
5273  // Makes actual allocation based on request. Request must already be checked and valid.
5274  virtual void Alloc(
5275  const VmaAllocationRequest& request,
5276  VmaSuballocationType type,
5277  VkDeviceSize allocSize,
5278  VmaAllocation hAllocation) = 0;
5279 
5280  // Frees suballocation assigned to given memory region.
5281  virtual void Free(const VmaAllocation allocation) = 0;
5282  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5283 
5284  // Tries to resize (grow or shrink) space for given allocation, in place.
5285  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
5286 
5287 protected:
5288  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5289 
5290 #if VMA_STATS_STRING_ENABLED
5291  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5292  VkDeviceSize unusedBytes,
5293  size_t allocationCount,
5294  size_t unusedRangeCount) const;
5295  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5296  VkDeviceSize offset,
5297  VmaAllocation hAllocation) const;
5298  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5299  VkDeviceSize offset,
5300  VkDeviceSize size) const;
5301  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5302 #endif
5303 
5304 private:
5305  VkDeviceSize m_Size;
5306  const VkAllocationCallbacks* m_pAllocationCallbacks;
5307 };
5308 
5309 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5310  VMA_ASSERT(0 && "Validation failed: " #cond); \
5311  return false; \
5312  } } while(false)
5313 
5314 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5315 {
5316  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5317 public:
5318  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5319  virtual ~VmaBlockMetadata_Generic();
5320  virtual void Init(VkDeviceSize size);
5321 
5322  virtual bool Validate() const;
5323  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5324  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5325  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5326  virtual bool IsEmpty() const;
5327 
5328  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5329  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5330 
5331 #if VMA_STATS_STRING_ENABLED
5332  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5333 #endif
5334 
5335  virtual bool CreateAllocationRequest(
5336  uint32_t currentFrameIndex,
5337  uint32_t frameInUseCount,
5338  VkDeviceSize bufferImageGranularity,
5339  VkDeviceSize allocSize,
5340  VkDeviceSize allocAlignment,
5341  bool upperAddress,
5342  VmaSuballocationType allocType,
5343  bool canMakeOtherLost,
5344  uint32_t strategy,
5345  VmaAllocationRequest* pAllocationRequest);
5346 
5347  virtual bool MakeRequestedAllocationsLost(
5348  uint32_t currentFrameIndex,
5349  uint32_t frameInUseCount,
5350  VmaAllocationRequest* pAllocationRequest);
5351 
5352  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5353 
5354  virtual VkResult CheckCorruption(const void* pBlockData);
5355 
5356  virtual void Alloc(
5357  const VmaAllocationRequest& request,
5358  VmaSuballocationType type,
5359  VkDeviceSize allocSize,
5360  VmaAllocation hAllocation);
5361 
5362  virtual void Free(const VmaAllocation allocation);
5363  virtual void FreeAtOffset(VkDeviceSize offset);
5364 
5365  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
5366 
5368  // For defragmentation
5369 
5370  bool IsBufferImageGranularityConflictPossible(
5371  VkDeviceSize bufferImageGranularity,
5372  VmaSuballocationType& inOutPrevSuballocType) const;
5373 
5374 private:
5375  friend class VmaDefragmentationAlgorithm_Generic;
5376  friend class VmaDefragmentationAlgorithm_Fast;
5377 
5378  uint32_t m_FreeCount;
5379  VkDeviceSize m_SumFreeSize;
5380  VmaSuballocationList m_Suballocations;
5381  // Suballocations that are free and have size greater than certain threshold.
5382  // Sorted by size, ascending.
5383  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5384 
5385  bool ValidateFreeSuballocationList() const;
5386 
5387  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5388  // If yes, fills pOffset and returns true. If no, returns false.
5389  bool CheckAllocation(
5390  uint32_t currentFrameIndex,
5391  uint32_t frameInUseCount,
5392  VkDeviceSize bufferImageGranularity,
5393  VkDeviceSize allocSize,
5394  VkDeviceSize allocAlignment,
5395  VmaSuballocationType allocType,
5396  VmaSuballocationList::const_iterator suballocItem,
5397  bool canMakeOtherLost,
5398  VkDeviceSize* pOffset,
5399  size_t* itemsToMakeLostCount,
5400  VkDeviceSize* pSumFreeSize,
5401  VkDeviceSize* pSumItemSize) const;
5402  // Given free suballocation, it merges it with following one, which must also be free.
5403  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5404  // Releases given suballocation, making it free.
5405  // Merges it with adjacent free suballocations if applicable.
5406  // Returns iterator to new free suballocation at this place.
5407  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5408  // Given free suballocation, it inserts it into sorted list of
5409  // m_FreeSuballocationsBySize if it's suitable.
5410  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5411  // Given free suballocation, it removes it from sorted list of
5412  // m_FreeSuballocationsBySize if it's suitable.
5413  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5414 };
5415 
5416 /*
5417 Allocations and their references in internal data structure look like this:
5418 
5419 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5420 
5421  0 +-------+
5422  | |
5423  | |
5424  | |
5425  +-------+
5426  | Alloc | 1st[m_1stNullItemsBeginCount]
5427  +-------+
5428  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5429  +-------+
5430  | ... |
5431  +-------+
5432  | Alloc | 1st[1st.size() - 1]
5433  +-------+
5434  | |
5435  | |
5436  | |
5437 GetSize() +-------+
5438 
5439 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5440 
5441  0 +-------+
5442  | Alloc | 2nd[0]
5443  +-------+
5444  | Alloc | 2nd[1]
5445  +-------+
5446  | ... |
5447  +-------+
5448  | Alloc | 2nd[2nd.size() - 1]
5449  +-------+
5450  | |
5451  | |
5452  | |
5453  +-------+
5454  | Alloc | 1st[m_1stNullItemsBeginCount]
5455  +-------+
5456  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5457  +-------+
5458  | ... |
5459  +-------+
5460  | Alloc | 1st[1st.size() - 1]
5461  +-------+
5462  | |
5463 GetSize() +-------+
5464 
5465 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5466 
5467  0 +-------+
5468  | |
5469  | |
5470  | |
5471  +-------+
5472  | Alloc | 1st[m_1stNullItemsBeginCount]
5473  +-------+
5474  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5475  +-------+
5476  | ... |
5477  +-------+
5478  | Alloc | 1st[1st.size() - 1]
5479  +-------+
5480  | |
5481  | |
5482  | |
5483  +-------+
5484  | Alloc | 2nd[2nd.size() - 1]
5485  +-------+
5486  | ... |
5487  +-------+
5488  | Alloc | 2nd[1]
5489  +-------+
5490  | Alloc | 2nd[0]
5491 GetSize() +-------+
5492 
5493 */
5494 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5495 {
5496  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5497 public:
5498  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5499  virtual ~VmaBlockMetadata_Linear();
5500  virtual void Init(VkDeviceSize size);
5501 
5502  virtual bool Validate() const;
5503  virtual size_t GetAllocationCount() const;
5504  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5505  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5506  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5507 
5508  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5509  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5510 
5511 #if VMA_STATS_STRING_ENABLED
5512  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5513 #endif
5514 
5515  virtual bool CreateAllocationRequest(
5516  uint32_t currentFrameIndex,
5517  uint32_t frameInUseCount,
5518  VkDeviceSize bufferImageGranularity,
5519  VkDeviceSize allocSize,
5520  VkDeviceSize allocAlignment,
5521  bool upperAddress,
5522  VmaSuballocationType allocType,
5523  bool canMakeOtherLost,
5524  uint32_t strategy,
5525  VmaAllocationRequest* pAllocationRequest);
5526 
5527  virtual bool MakeRequestedAllocationsLost(
5528  uint32_t currentFrameIndex,
5529  uint32_t frameInUseCount,
5530  VmaAllocationRequest* pAllocationRequest);
5531 
5532  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5533 
5534  virtual VkResult CheckCorruption(const void* pBlockData);
5535 
5536  virtual void Alloc(
5537  const VmaAllocationRequest& request,
5538  VmaSuballocationType type,
5539  VkDeviceSize allocSize,
5540  VmaAllocation hAllocation);
5541 
5542  virtual void Free(const VmaAllocation allocation);
5543  virtual void FreeAtOffset(VkDeviceSize offset);
5544 
5545 private:
5546  /*
5547  There are two suballocation vectors, used in ping-pong way.
5548  The one with index m_1stVectorIndex is called 1st.
5549  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5550  2nd can be non-empty only when 1st is not empty.
5551  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5552  */
5553  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5554 
5555  enum SECOND_VECTOR_MODE
5556  {
5557  SECOND_VECTOR_EMPTY,
5558  /*
5559  Suballocations in 2nd vector are created later than the ones in 1st, but they
5560  all have smaller offset.
5561  */
5562  SECOND_VECTOR_RING_BUFFER,
5563  /*
5564  Suballocations in 2nd vector are upper side of double stack.
5565  They all have offsets higher than those in 1st vector.
5566  Top of this stack means smaller offsets, but higher indices in this vector.
5567  */
5568  SECOND_VECTOR_DOUBLE_STACK,
5569  };
5570 
5571  VkDeviceSize m_SumFreeSize;
5572  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5573  uint32_t m_1stVectorIndex;
5574  SECOND_VECTOR_MODE m_2ndVectorMode;
5575 
5576  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5577  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5578  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5579  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5580 
5581  // Number of items in 1st vector with hAllocation = null at the beginning.
5582  size_t m_1stNullItemsBeginCount;
5583  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5584  size_t m_1stNullItemsMiddleCount;
5585  // Number of items in 2nd vector with hAllocation = null.
5586  size_t m_2ndNullItemsCount;
5587 
5588  bool ShouldCompact1st() const;
5589  void CleanupAfterFree();
5590 
5591  bool CreateAllocationRequest_LowerAddress(
5592  uint32_t currentFrameIndex,
5593  uint32_t frameInUseCount,
5594  VkDeviceSize bufferImageGranularity,
5595  VkDeviceSize allocSize,
5596  VkDeviceSize allocAlignment,
5597  VmaSuballocationType allocType,
5598  bool canMakeOtherLost,
5599  uint32_t strategy,
5600  VmaAllocationRequest* pAllocationRequest);
5601  bool CreateAllocationRequest_UpperAddress(
5602  uint32_t currentFrameIndex,
5603  uint32_t frameInUseCount,
5604  VkDeviceSize bufferImageGranularity,
5605  VkDeviceSize allocSize,
5606  VkDeviceSize allocAlignment,
5607  VmaSuballocationType allocType,
5608  bool canMakeOtherLost,
5609  uint32_t strategy,
5610  VmaAllocationRequest* pAllocationRequest);
5611 };
5612 
5613 /*
5614 - GetSize() is the original size of allocated memory block.
5615 - m_UsableSize is this size aligned down to a power of two.
5616  All allocations and calculations happen relative to m_UsableSize.
5617 - GetUnusableSize() is the difference between them.
5618  It is repoted as separate, unused range, not available for allocations.
5619 
5620 Node at level 0 has size = m_UsableSize.
5621 Each next level contains nodes with size 2 times smaller than current level.
5622 m_LevelCount is the maximum number of levels to use in the current object.
5623 */
5624 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5625 {
5626  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5627 public:
5628  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5629  virtual ~VmaBlockMetadata_Buddy();
5630  virtual void Init(VkDeviceSize size);
5631 
5632  virtual bool Validate() const;
5633  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5634  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5635  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5636  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5637 
5638  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5639  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5640 
5641 #if VMA_STATS_STRING_ENABLED
5642  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5643 #endif
5644 
5645  virtual bool CreateAllocationRequest(
5646  uint32_t currentFrameIndex,
5647  uint32_t frameInUseCount,
5648  VkDeviceSize bufferImageGranularity,
5649  VkDeviceSize allocSize,
5650  VkDeviceSize allocAlignment,
5651  bool upperAddress,
5652  VmaSuballocationType allocType,
5653  bool canMakeOtherLost,
5654  uint32_t strategy,
5655  VmaAllocationRequest* pAllocationRequest);
5656 
5657  virtual bool MakeRequestedAllocationsLost(
5658  uint32_t currentFrameIndex,
5659  uint32_t frameInUseCount,
5660  VmaAllocationRequest* pAllocationRequest);
5661 
5662  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5663 
5664  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5665 
5666  virtual void Alloc(
5667  const VmaAllocationRequest& request,
5668  VmaSuballocationType type,
5669  VkDeviceSize allocSize,
5670  VmaAllocation hAllocation);
5671 
5672  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5673  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5674 
5675 private:
5676  static const VkDeviceSize MIN_NODE_SIZE = 32;
5677  static const size_t MAX_LEVELS = 30;
5678 
5679  struct ValidationContext
5680  {
5681  size_t calculatedAllocationCount;
5682  size_t calculatedFreeCount;
5683  VkDeviceSize calculatedSumFreeSize;
5684 
5685  ValidationContext() :
5686  calculatedAllocationCount(0),
5687  calculatedFreeCount(0),
5688  calculatedSumFreeSize(0) { }
5689  };
5690 
5691  struct Node
5692  {
5693  VkDeviceSize offset;
5694  enum TYPE
5695  {
5696  TYPE_FREE,
5697  TYPE_ALLOCATION,
5698  TYPE_SPLIT,
5699  TYPE_COUNT
5700  } type;
5701  Node* parent;
5702  Node* buddy;
5703 
5704  union
5705  {
5706  struct
5707  {
5708  Node* prev;
5709  Node* next;
5710  } free;
5711  struct
5712  {
5713  VmaAllocation alloc;
5714  } allocation;
5715  struct
5716  {
5717  Node* leftChild;
5718  } split;
5719  };
5720  };
5721 
5722  // Size of the memory block aligned down to a power of two.
5723  VkDeviceSize m_UsableSize;
5724  uint32_t m_LevelCount;
5725 
5726  Node* m_Root;
5727  struct {
5728  Node* front;
5729  Node* back;
5730  } m_FreeList[MAX_LEVELS];
5731  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5732  size_t m_AllocationCount;
5733  // Number of nodes in the tree with type == TYPE_FREE.
5734  size_t m_FreeCount;
5735  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5736  VkDeviceSize m_SumFreeSize;
5737 
5738  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5739  void DeleteNode(Node* node);
5740  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5741  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5742  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5743  // Alloc passed just for validation. Can be null.
5744  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5745  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5746  // Adds node to the front of FreeList at given level.
5747  // node->type must be FREE.
5748  // node->free.prev, next can be undefined.
5749  void AddToFreeListFront(uint32_t level, Node* node);
5750  // Removes node from FreeList at given level.
5751  // node->type must be FREE.
5752  // node->free.prev, next stay untouched.
5753  void RemoveFromFreeList(uint32_t level, Node* node);
5754 
5755 #if VMA_STATS_STRING_ENABLED
5756  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5757 #endif
5758 };
5759 
5760 /*
5761 Represents a single block of device memory (`VkDeviceMemory`) with all the
5762 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5763 
5764 Thread-safety: This class must be externally synchronized.
5765 */
5766 class VmaDeviceMemoryBlock
5767 {
5768  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5769 public:
5770  VmaBlockMetadata* m_pMetadata;
5771 
5772  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5773 
5774  ~VmaDeviceMemoryBlock()
5775  {
5776  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5777  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5778  }
5779 
5780  // Always call after construction.
5781  void Init(
5782  VmaAllocator hAllocator,
5783  VmaPool hParentPool,
5784  uint32_t newMemoryTypeIndex,
5785  VkDeviceMemory newMemory,
5786  VkDeviceSize newSize,
5787  uint32_t id,
5788  uint32_t algorithm);
5789  // Always call before destruction.
5790  void Destroy(VmaAllocator allocator);
5791 
5792  VmaPool GetParentPool() const { return m_hParentPool; }
5793  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5794  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5795  uint32_t GetId() const { return m_Id; }
5796  void* GetMappedData() const { return m_pMappedData; }
5797 
5798  // Validates all data structures inside this object. If not valid, returns false.
5799  bool Validate() const;
5800 
5801  VkResult CheckCorruption(VmaAllocator hAllocator);
5802 
5803  // ppData can be null.
5804  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5805  void Unmap(VmaAllocator hAllocator, uint32_t count);
5806 
5807  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5808  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5809 
5810  VkResult BindBufferMemory(
5811  const VmaAllocator hAllocator,
5812  const VmaAllocation hAllocation,
5813  VkBuffer hBuffer);
5814  VkResult BindImageMemory(
5815  const VmaAllocator hAllocator,
5816  const VmaAllocation hAllocation,
5817  VkImage hImage);
5818 
5819 private:
5820  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
5821  uint32_t m_MemoryTypeIndex;
5822  uint32_t m_Id;
5823  VkDeviceMemory m_hMemory;
5824 
5825  /*
5826  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5827  Also protects m_MapCount, m_pMappedData.
5828  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5829  */
5830  VMA_MUTEX m_Mutex;
5831  uint32_t m_MapCount;
5832  void* m_pMappedData;
5833 };
5834 
5835 struct VmaPointerLess
5836 {
5837  bool operator()(const void* lhs, const void* rhs) const
5838  {
5839  return lhs < rhs;
5840  }
5841 };
5842 
5843 struct VmaDefragmentationMove
5844 {
5845  size_t srcBlockIndex;
5846  size_t dstBlockIndex;
5847  VkDeviceSize srcOffset;
5848  VkDeviceSize dstOffset;
5849  VkDeviceSize size;
5850 };
5851 
5852 class VmaDefragmentationAlgorithm;
5853 
5854 /*
5855 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5856 Vulkan memory type.
5857 
5858 Synchronized internally with a mutex.
5859 */
5860 struct VmaBlockVector
5861 {
5862  VMA_CLASS_NO_COPY(VmaBlockVector)
5863 public:
5864  VmaBlockVector(
5865  VmaAllocator hAllocator,
5866  VmaPool hParentPool,
5867  uint32_t memoryTypeIndex,
5868  VkDeviceSize preferredBlockSize,
5869  size_t minBlockCount,
5870  size_t maxBlockCount,
5871  VkDeviceSize bufferImageGranularity,
5872  uint32_t frameInUseCount,
5873  bool isCustomPool,
5874  bool explicitBlockSize,
5875  uint32_t algorithm);
5876  ~VmaBlockVector();
5877 
5878  VkResult CreateMinBlocks();
5879 
5880  VmaPool GetParentPool() const { return m_hParentPool; }
5881  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5882  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5883  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5884  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5885  uint32_t GetAlgorithm() const { return m_Algorithm; }
5886 
5887  void GetPoolStats(VmaPoolStats* pStats);
5888 
5889  bool IsEmpty() const { return m_Blocks.empty(); }
5890  bool IsCorruptionDetectionEnabled() const;
5891 
5892  VkResult Allocate(
5893  uint32_t currentFrameIndex,
5894  VkDeviceSize size,
5895  VkDeviceSize alignment,
5896  const VmaAllocationCreateInfo& createInfo,
5897  VmaSuballocationType suballocType,
5898  size_t allocationCount,
5899  VmaAllocation* pAllocations);
5900 
5901  void Free(
5902  VmaAllocation hAllocation);
5903 
5904  // Adds statistics of this BlockVector to pStats.
5905  void AddStats(VmaStats* pStats);
5906 
5907 #if VMA_STATS_STRING_ENABLED
5908  void PrintDetailedMap(class VmaJsonWriter& json);
5909 #endif
5910 
5911  void MakePoolAllocationsLost(
5912  uint32_t currentFrameIndex,
5913  size_t* pLostAllocationCount);
5914  VkResult CheckCorruption();
5915 
5916  // Saves results in pCtx->res.
5917  void Defragment(
5918  class VmaBlockVectorDefragmentationContext* pCtx,
5919  VmaDefragmentationStats* pStats,
5920  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
5921  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
5922  VkCommandBuffer commandBuffer);
5923  void DefragmentationEnd(
5924  class VmaBlockVectorDefragmentationContext* pCtx,
5925  VmaDefragmentationStats* pStats);
5926 
5928  // To be used only while the m_Mutex is locked. Used during defragmentation.
5929 
5930  size_t GetBlockCount() const { return m_Blocks.size(); }
5931  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
5932  size_t CalcAllocationCount() const;
5933  bool IsBufferImageGranularityConflictPossible() const;
5934 
5935 private:
5936  friend class VmaDefragmentationAlgorithm_Generic;
5937 
5938  const VmaAllocator m_hAllocator;
5939  const VmaPool m_hParentPool;
5940  const uint32_t m_MemoryTypeIndex;
5941  const VkDeviceSize m_PreferredBlockSize;
5942  const size_t m_MinBlockCount;
5943  const size_t m_MaxBlockCount;
5944  const VkDeviceSize m_BufferImageGranularity;
5945  const uint32_t m_FrameInUseCount;
5946  const bool m_IsCustomPool;
5947  const bool m_ExplicitBlockSize;
5948  const uint32_t m_Algorithm;
5949  /* There can be at most one allocation that is completely empty - a
5950  hysteresis to avoid pessimistic case of alternating creation and destruction
5951  of a VkDeviceMemory. */
5952  bool m_HasEmptyBlock;
5953  VMA_RW_MUTEX m_Mutex;
5954  // Incrementally sorted by sumFreeSize, ascending.
5955  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5956  uint32_t m_NextBlockId;
5957 
5958  VkDeviceSize CalcMaxBlockSize() const;
5959 
5960  // Finds and removes given block from vector.
5961  void Remove(VmaDeviceMemoryBlock* pBlock);
5962 
5963  // Performs single step in sorting m_Blocks. They may not be fully sorted
5964  // after this call.
5965  void IncrementallySortBlocks();
5966 
5967  VkResult AllocatePage(
5968  uint32_t currentFrameIndex,
5969  VkDeviceSize size,
5970  VkDeviceSize alignment,
5971  const VmaAllocationCreateInfo& createInfo,
5972  VmaSuballocationType suballocType,
5973  VmaAllocation* pAllocation);
5974 
5975  // To be used only without CAN_MAKE_OTHER_LOST flag.
5976  VkResult AllocateFromBlock(
5977  VmaDeviceMemoryBlock* pBlock,
5978  uint32_t currentFrameIndex,
5979  VkDeviceSize size,
5980  VkDeviceSize alignment,
5981  VmaAllocationCreateFlags allocFlags,
5982  void* pUserData,
5983  VmaSuballocationType suballocType,
5984  uint32_t strategy,
5985  VmaAllocation* pAllocation);
5986 
5987  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5988 
5989  // Saves result to pCtx->res.
5990  void ApplyDefragmentationMovesCpu(
5991  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5992  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
5993  // Saves result to pCtx->res.
5994  void ApplyDefragmentationMovesGpu(
5995  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5996  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5997  VkCommandBuffer commandBuffer);
5998 
5999  /*
6000  Used during defragmentation. pDefragmentationStats is optional. It's in/out
6001  - updated with new data.
6002  */
6003  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
6004 };
6005 
6006 struct VmaPool_T
6007 {
6008  VMA_CLASS_NO_COPY(VmaPool_T)
6009 public:
6010  VmaBlockVector m_BlockVector;
6011 
6012  VmaPool_T(
6013  VmaAllocator hAllocator,
6014  const VmaPoolCreateInfo& createInfo,
6015  VkDeviceSize preferredBlockSize);
6016  ~VmaPool_T();
6017 
6018  uint32_t GetId() const { return m_Id; }
6019  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
6020 
6021 #if VMA_STATS_STRING_ENABLED
6022  //void PrintDetailedMap(class VmaStringBuilder& sb);
6023 #endif
6024 
6025 private:
6026  uint32_t m_Id;
6027 };
6028 
6029 /*
6030 Performs defragmentation:
6031 
6032 - Updates `pBlockVector->m_pMetadata`.
6033 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
6034 - Does not move actual data, only returns requested moves as `moves`.
6035 */
6036 class VmaDefragmentationAlgorithm
6037 {
6038  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
6039 public:
6040  VmaDefragmentationAlgorithm(
6041  VmaAllocator hAllocator,
6042  VmaBlockVector* pBlockVector,
6043  uint32_t currentFrameIndex) :
6044  m_hAllocator(hAllocator),
6045  m_pBlockVector(pBlockVector),
6046  m_CurrentFrameIndex(currentFrameIndex)
6047  {
6048  }
6049  virtual ~VmaDefragmentationAlgorithm()
6050  {
6051  }
6052 
6053  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
6054  virtual void AddAll() = 0;
6055 
6056  virtual VkResult Defragment(
6057  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6058  VkDeviceSize maxBytesToMove,
6059  uint32_t maxAllocationsToMove) = 0;
6060 
6061  virtual VkDeviceSize GetBytesMoved() const = 0;
6062  virtual uint32_t GetAllocationsMoved() const = 0;
6063 
6064 protected:
6065  VmaAllocator const m_hAllocator;
6066  VmaBlockVector* const m_pBlockVector;
6067  const uint32_t m_CurrentFrameIndex;
6068 
6069  struct AllocationInfo
6070  {
6071  VmaAllocation m_hAllocation;
6072  VkBool32* m_pChanged;
6073 
6074  AllocationInfo() :
6075  m_hAllocation(VK_NULL_HANDLE),
6076  m_pChanged(VMA_NULL)
6077  {
6078  }
6079  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6080  m_hAllocation(hAlloc),
6081  m_pChanged(pChanged)
6082  {
6083  }
6084  };
6085 };
6086 
6087 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6088 {
6089  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6090 public:
6091  VmaDefragmentationAlgorithm_Generic(
6092  VmaAllocator hAllocator,
6093  VmaBlockVector* pBlockVector,
6094  uint32_t currentFrameIndex,
6095  bool overlappingMoveSupported);
6096  virtual ~VmaDefragmentationAlgorithm_Generic();
6097 
6098  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6099  virtual void AddAll() { m_AllAllocations = true; }
6100 
6101  virtual VkResult Defragment(
6102  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6103  VkDeviceSize maxBytesToMove,
6104  uint32_t maxAllocationsToMove);
6105 
6106  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6107  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6108 
6109 private:
6110  uint32_t m_AllocationCount;
6111  bool m_AllAllocations;
6112 
6113  VkDeviceSize m_BytesMoved;
6114  uint32_t m_AllocationsMoved;
6115 
6116  struct AllocationInfoSizeGreater
6117  {
6118  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6119  {
6120  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6121  }
6122  };
6123 
6124  struct AllocationInfoOffsetGreater
6125  {
6126  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6127  {
6128  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6129  }
6130  };
6131 
6132  struct BlockInfo
6133  {
6134  size_t m_OriginalBlockIndex;
6135  VmaDeviceMemoryBlock* m_pBlock;
6136  bool m_HasNonMovableAllocations;
6137  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6138 
6139  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6140  m_OriginalBlockIndex(SIZE_MAX),
6141  m_pBlock(VMA_NULL),
6142  m_HasNonMovableAllocations(true),
6143  m_Allocations(pAllocationCallbacks)
6144  {
6145  }
6146 
6147  void CalcHasNonMovableAllocations()
6148  {
6149  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6150  const size_t defragmentAllocCount = m_Allocations.size();
6151  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6152  }
6153 
6154  void SortAllocationsBySizeDescending()
6155  {
6156  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6157  }
6158 
6159  void SortAllocationsByOffsetDescending()
6160  {
6161  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6162  }
6163  };
6164 
6165  struct BlockPointerLess
6166  {
6167  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6168  {
6169  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6170  }
6171  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6172  {
6173  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6174  }
6175  };
6176 
6177  // 1. Blocks with some non-movable allocations go first.
6178  // 2. Blocks with smaller sumFreeSize go first.
6179  struct BlockInfoCompareMoveDestination
6180  {
6181  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6182  {
6183  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6184  {
6185  return true;
6186  }
6187  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6188  {
6189  return false;
6190  }
6191  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6192  {
6193  return true;
6194  }
6195  return false;
6196  }
6197  };
6198 
6199  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6200  BlockInfoVector m_Blocks;
6201 
6202  VkResult DefragmentRound(
6203  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6204  VkDeviceSize maxBytesToMove,
6205  uint32_t maxAllocationsToMove);
6206 
6207  size_t CalcBlocksWithNonMovableCount() const;
6208 
6209  static bool MoveMakesSense(
6210  size_t dstBlockIndex, VkDeviceSize dstOffset,
6211  size_t srcBlockIndex, VkDeviceSize srcOffset);
6212 };
6213 
6214 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6215 {
6216  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6217 public:
6218  VmaDefragmentationAlgorithm_Fast(
6219  VmaAllocator hAllocator,
6220  VmaBlockVector* pBlockVector,
6221  uint32_t currentFrameIndex,
6222  bool overlappingMoveSupported);
6223  virtual ~VmaDefragmentationAlgorithm_Fast();
6224 
6225  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6226  virtual void AddAll() { m_AllAllocations = true; }
6227 
6228  virtual VkResult Defragment(
6229  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6230  VkDeviceSize maxBytesToMove,
6231  uint32_t maxAllocationsToMove);
6232 
6233  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6234  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6235 
6236 private:
6237  struct BlockInfo
6238  {
6239  size_t origBlockIndex;
6240  };
6241 
6242  class FreeSpaceDatabase
6243  {
6244  public:
6245  FreeSpaceDatabase()
6246  {
6247  FreeSpace s = {};
6248  s.blockInfoIndex = SIZE_MAX;
6249  for(size_t i = 0; i < MAX_COUNT; ++i)
6250  {
6251  m_FreeSpaces[i] = s;
6252  }
6253  }
6254 
6255  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6256  {
6257  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6258  {
6259  return;
6260  }
6261 
6262  // Find first invalid or the smallest structure.
6263  size_t bestIndex = SIZE_MAX;
6264  for(size_t i = 0; i < MAX_COUNT; ++i)
6265  {
6266  // Empty structure.
6267  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6268  {
6269  bestIndex = i;
6270  break;
6271  }
6272  if(m_FreeSpaces[i].size < size &&
6273  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6274  {
6275  bestIndex = i;
6276  }
6277  }
6278 
6279  if(bestIndex != SIZE_MAX)
6280  {
6281  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6282  m_FreeSpaces[bestIndex].offset = offset;
6283  m_FreeSpaces[bestIndex].size = size;
6284  }
6285  }
6286 
6287  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6288  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6289  {
6290  size_t bestIndex = SIZE_MAX;
6291  VkDeviceSize bestFreeSpaceAfter = 0;
6292  for(size_t i = 0; i < MAX_COUNT; ++i)
6293  {
6294  // Structure is valid.
6295  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6296  {
6297  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6298  // Allocation fits into this structure.
6299  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6300  {
6301  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6302  (dstOffset + size);
6303  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6304  {
6305  bestIndex = i;
6306  bestFreeSpaceAfter = freeSpaceAfter;
6307  }
6308  }
6309  }
6310  }
6311 
6312  if(bestIndex != SIZE_MAX)
6313  {
6314  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6315  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6316 
6317  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6318  {
6319  // Leave this structure for remaining empty space.
6320  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6321  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6322  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6323  }
6324  else
6325  {
6326  // This structure becomes invalid.
6327  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6328  }
6329 
6330  return true;
6331  }
6332 
6333  return false;
6334  }
6335 
6336  private:
6337  static const size_t MAX_COUNT = 4;
6338 
6339  struct FreeSpace
6340  {
6341  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6342  VkDeviceSize offset;
6343  VkDeviceSize size;
6344  } m_FreeSpaces[MAX_COUNT];
6345  };
6346 
6347  const bool m_OverlappingMoveSupported;
6348 
6349  uint32_t m_AllocationCount;
6350  bool m_AllAllocations;
6351 
6352  VkDeviceSize m_BytesMoved;
6353  uint32_t m_AllocationsMoved;
6354 
6355  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6356 
6357  void PreprocessMetadata();
6358  void PostprocessMetadata();
6359  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6360 };
6361 
6362 struct VmaBlockDefragmentationContext
6363 {
6364  enum BLOCK_FLAG
6365  {
6366  BLOCK_FLAG_USED = 0x00000001,
6367  };
6368  uint32_t flags;
6369  VkBuffer hBuffer;
6370 
6371  VmaBlockDefragmentationContext() :
6372  flags(0),
6373  hBuffer(VK_NULL_HANDLE)
6374  {
6375  }
6376 };
6377 
6378 class VmaBlockVectorDefragmentationContext
6379 {
6380  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6381 public:
6382  VkResult res;
6383  bool mutexLocked;
6384  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6385 
6386  VmaBlockVectorDefragmentationContext(
6387  VmaAllocator hAllocator,
6388  VmaPool hCustomPool, // Optional.
6389  VmaBlockVector* pBlockVector,
6390  uint32_t currFrameIndex,
6391  uint32_t flags);
6392  ~VmaBlockVectorDefragmentationContext();
6393 
6394  VmaPool GetCustomPool() const { return m_hCustomPool; }
6395  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6396  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6397 
6398  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6399  void AddAll() { m_AllAllocations = true; }
6400 
6401  void Begin(bool overlappingMoveSupported);
6402 
6403 private:
6404  const VmaAllocator m_hAllocator;
6405  // Null if not from custom pool.
6406  const VmaPool m_hCustomPool;
6407  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6408  VmaBlockVector* const m_pBlockVector;
6409  const uint32_t m_CurrFrameIndex;
6410  const uint32_t m_AlgorithmFlags;
6411  // Owner of this object.
6412  VmaDefragmentationAlgorithm* m_pAlgorithm;
6413 
6414  struct AllocInfo
6415  {
6416  VmaAllocation hAlloc;
6417  VkBool32* pChanged;
6418  };
6419  // Used between constructor and Begin.
6420  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6421  bool m_AllAllocations;
6422 };
6423 
6424 struct VmaDefragmentationContext_T
6425 {
6426 private:
6427  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6428 public:
6429  VmaDefragmentationContext_T(
6430  VmaAllocator hAllocator,
6431  uint32_t currFrameIndex,
6432  uint32_t flags,
6433  VmaDefragmentationStats* pStats);
6434  ~VmaDefragmentationContext_T();
6435 
6436  void AddPools(uint32_t poolCount, VmaPool* pPools);
6437  void AddAllocations(
6438  uint32_t allocationCount,
6439  VmaAllocation* pAllocations,
6440  VkBool32* pAllocationsChanged);
6441 
6442  /*
6443  Returns:
6444  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6445  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6446  - Negative value if error occured and object can be destroyed immediately.
6447  */
6448  VkResult Defragment(
6449  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6450  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6451  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6452 
6453 private:
6454  const VmaAllocator m_hAllocator;
6455  const uint32_t m_CurrFrameIndex;
6456  const uint32_t m_Flags;
6457  VmaDefragmentationStats* const m_pStats;
6458  // Owner of these objects.
6459  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6460  // Owner of these objects.
6461  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6462 };
6463 
6464 #if VMA_RECORDING_ENABLED
6465 
6466 class VmaRecorder
6467 {
6468 public:
6469  VmaRecorder();
6470  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6471  void WriteConfiguration(
6472  const VkPhysicalDeviceProperties& devProps,
6473  const VkPhysicalDeviceMemoryProperties& memProps,
6474  bool dedicatedAllocationExtensionEnabled);
6475  ~VmaRecorder();
6476 
6477  void RecordCreateAllocator(uint32_t frameIndex);
6478  void RecordDestroyAllocator(uint32_t frameIndex);
6479  void RecordCreatePool(uint32_t frameIndex,
6480  const VmaPoolCreateInfo& createInfo,
6481  VmaPool pool);
6482  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6483  void RecordAllocateMemory(uint32_t frameIndex,
6484  const VkMemoryRequirements& vkMemReq,
6485  const VmaAllocationCreateInfo& createInfo,
6486  VmaAllocation allocation);
6487  void RecordAllocateMemoryPages(uint32_t frameIndex,
6488  const VkMemoryRequirements& vkMemReq,
6489  const VmaAllocationCreateInfo& createInfo,
6490  uint64_t allocationCount,
6491  const VmaAllocation* pAllocations);
6492  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6493  const VkMemoryRequirements& vkMemReq,
6494  bool requiresDedicatedAllocation,
6495  bool prefersDedicatedAllocation,
6496  const VmaAllocationCreateInfo& createInfo,
6497  VmaAllocation allocation);
6498  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6499  const VkMemoryRequirements& vkMemReq,
6500  bool requiresDedicatedAllocation,
6501  bool prefersDedicatedAllocation,
6502  const VmaAllocationCreateInfo& createInfo,
6503  VmaAllocation allocation);
6504  void RecordFreeMemory(uint32_t frameIndex,
6505  VmaAllocation allocation);
6506  void RecordFreeMemoryPages(uint32_t frameIndex,
6507  uint64_t allocationCount,
6508  const VmaAllocation* pAllocations);
6509  void RecordResizeAllocation(
6510  uint32_t frameIndex,
6511  VmaAllocation allocation,
6512  VkDeviceSize newSize);
6513  void RecordSetAllocationUserData(uint32_t frameIndex,
6514  VmaAllocation allocation,
6515  const void* pUserData);
6516  void RecordCreateLostAllocation(uint32_t frameIndex,
6517  VmaAllocation allocation);
6518  void RecordMapMemory(uint32_t frameIndex,
6519  VmaAllocation allocation);
6520  void RecordUnmapMemory(uint32_t frameIndex,
6521  VmaAllocation allocation);
6522  void RecordFlushAllocation(uint32_t frameIndex,
6523  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6524  void RecordInvalidateAllocation(uint32_t frameIndex,
6525  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6526  void RecordCreateBuffer(uint32_t frameIndex,
6527  const VkBufferCreateInfo& bufCreateInfo,
6528  const VmaAllocationCreateInfo& allocCreateInfo,
6529  VmaAllocation allocation);
6530  void RecordCreateImage(uint32_t frameIndex,
6531  const VkImageCreateInfo& imageCreateInfo,
6532  const VmaAllocationCreateInfo& allocCreateInfo,
6533  VmaAllocation allocation);
6534  void RecordDestroyBuffer(uint32_t frameIndex,
6535  VmaAllocation allocation);
6536  void RecordDestroyImage(uint32_t frameIndex,
6537  VmaAllocation allocation);
6538  void RecordTouchAllocation(uint32_t frameIndex,
6539  VmaAllocation allocation);
6540  void RecordGetAllocationInfo(uint32_t frameIndex,
6541  VmaAllocation allocation);
6542  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6543  VmaPool pool);
6544  void RecordDefragmentationBegin(uint32_t frameIndex,
6545  const VmaDefragmentationInfo2& info,
6547  void RecordDefragmentationEnd(uint32_t frameIndex,
6549 
6550 private:
6551  struct CallParams
6552  {
6553  uint32_t threadId;
6554  double time;
6555  };
6556 
6557  class UserDataString
6558  {
6559  public:
6560  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6561  const char* GetString() const { return m_Str; }
6562 
6563  private:
6564  char m_PtrStr[17];
6565  const char* m_Str;
6566  };
6567 
6568  bool m_UseMutex;
6569  VmaRecordFlags m_Flags;
6570  FILE* m_File;
6571  VMA_MUTEX m_FileMutex;
6572  int64_t m_Freq;
6573  int64_t m_StartCounter;
6574 
6575  void GetBasicParams(CallParams& outParams);
6576 
6577  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6578  template<typename T>
6579  void PrintPointerList(uint64_t count, const T* pItems)
6580  {
6581  if(count)
6582  {
6583  fprintf(m_File, "%p", pItems[0]);
6584  for(uint64_t i = 1; i < count; ++i)
6585  {
6586  fprintf(m_File, " %p", pItems[i]);
6587  }
6588  }
6589  }
6590 
6591  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6592  void Flush();
6593 };
6594 
6595 #endif // #if VMA_RECORDING_ENABLED
6596 
6597 /*
6598 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
6599 */
6600 class VmaAllocationObjectAllocator
6601 {
6602  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
6603 public:
6604  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
6605 
6606  VmaAllocation Allocate();
6607  void Free(VmaAllocation hAlloc);
6608 
6609 private:
6610  VMA_MUTEX m_Mutex;
6611  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
6612 };
6613 
6614 // Main allocator object.
6615 struct VmaAllocator_T
6616 {
6617  VMA_CLASS_NO_COPY(VmaAllocator_T)
6618 public:
6619  bool m_UseMutex;
6620  bool m_UseKhrDedicatedAllocation;
6621  VkDevice m_hDevice;
6622  bool m_AllocationCallbacksSpecified;
6623  VkAllocationCallbacks m_AllocationCallbacks;
6624  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6625  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
6626 
6627  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
6628  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6629  VMA_MUTEX m_HeapSizeLimitMutex;
6630 
6631  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6632  VkPhysicalDeviceMemoryProperties m_MemProps;
6633 
6634  // Default pools.
6635  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6636 
6637  // Each vector is sorted by memory (handle value).
6638  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6639  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6640  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6641 
6642  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6643  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6644  ~VmaAllocator_T();
6645 
6646  const VkAllocationCallbacks* GetAllocationCallbacks() const
6647  {
6648  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6649  }
6650  const VmaVulkanFunctions& GetVulkanFunctions() const
6651  {
6652  return m_VulkanFunctions;
6653  }
6654 
6655  VkDeviceSize GetBufferImageGranularity() const
6656  {
6657  return VMA_MAX(
6658  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6659  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6660  }
6661 
6662  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
6663  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6664 
6665  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6666  {
6667  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6668  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6669  }
6670  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
6671  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6672  {
6673  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6674  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6675  }
6676  // Minimum alignment for all allocations in specific memory type.
6677  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6678  {
6679  return IsMemoryTypeNonCoherent(memTypeIndex) ?
6680  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6681  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6682  }
6683 
6684  bool IsIntegratedGpu() const
6685  {
6686  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6687  }
6688 
6689 #if VMA_RECORDING_ENABLED
6690  VmaRecorder* GetRecorder() const { return m_pRecorder; }
6691 #endif
6692 
6693  void GetBufferMemoryRequirements(
6694  VkBuffer hBuffer,
6695  VkMemoryRequirements& memReq,
6696  bool& requiresDedicatedAllocation,
6697  bool& prefersDedicatedAllocation) const;
6698  void GetImageMemoryRequirements(
6699  VkImage hImage,
6700  VkMemoryRequirements& memReq,
6701  bool& requiresDedicatedAllocation,
6702  bool& prefersDedicatedAllocation) const;
6703 
6704  // Main allocation function.
6705  VkResult AllocateMemory(
6706  const VkMemoryRequirements& vkMemReq,
6707  bool requiresDedicatedAllocation,
6708  bool prefersDedicatedAllocation,
6709  VkBuffer dedicatedBuffer,
6710  VkImage dedicatedImage,
6711  const VmaAllocationCreateInfo& createInfo,
6712  VmaSuballocationType suballocType,
6713  size_t allocationCount,
6714  VmaAllocation* pAllocations);
6715 
6716  // Main deallocation function.
6717  void FreeMemory(
6718  size_t allocationCount,
6719  const VmaAllocation* pAllocations);
6720 
6721  VkResult ResizeAllocation(
6722  const VmaAllocation alloc,
6723  VkDeviceSize newSize);
6724 
6725  void CalculateStats(VmaStats* pStats);
6726 
6727 #if VMA_STATS_STRING_ENABLED
6728  void PrintDetailedMap(class VmaJsonWriter& json);
6729 #endif
6730 
6731  VkResult DefragmentationBegin(
6732  const VmaDefragmentationInfo2& info,
6733  VmaDefragmentationStats* pStats,
6734  VmaDefragmentationContext* pContext);
6735  VkResult DefragmentationEnd(
6736  VmaDefragmentationContext context);
6737 
6738  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
6739  bool TouchAllocation(VmaAllocation hAllocation);
6740 
6741  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
6742  void DestroyPool(VmaPool pool);
6743  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
6744 
6745  void SetCurrentFrameIndex(uint32_t frameIndex);
6746  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6747 
6748  void MakePoolAllocationsLost(
6749  VmaPool hPool,
6750  size_t* pLostAllocationCount);
6751  VkResult CheckPoolCorruption(VmaPool hPool);
6752  VkResult CheckCorruption(uint32_t memoryTypeBits);
6753 
6754  void CreateLostAllocation(VmaAllocation* pAllocation);
6755 
6756  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6757  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6758 
6759  VkResult Map(VmaAllocation hAllocation, void** ppData);
6760  void Unmap(VmaAllocation hAllocation);
6761 
6762  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
6763  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
6764 
6765  void FlushOrInvalidateAllocation(
6766  VmaAllocation hAllocation,
6767  VkDeviceSize offset, VkDeviceSize size,
6768  VMA_CACHE_OPERATION op);
6769 
6770  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
6771 
6772 private:
6773  VkDeviceSize m_PreferredLargeHeapBlockSize;
6774 
6775  VkPhysicalDevice m_PhysicalDevice;
6776  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6777 
6778  VMA_RW_MUTEX m_PoolsMutex;
6779  // Protected by m_PoolsMutex. Sorted by pointer value.
6780  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6781  uint32_t m_NextPoolId;
6782 
6783  VmaVulkanFunctions m_VulkanFunctions;
6784 
6785 #if VMA_RECORDING_ENABLED
6786  VmaRecorder* m_pRecorder;
6787 #endif
6788 
6789  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
6790 
6791  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6792 
6793  VkResult AllocateMemoryOfType(
6794  VkDeviceSize size,
6795  VkDeviceSize alignment,
6796  bool dedicatedAllocation,
6797  VkBuffer dedicatedBuffer,
6798  VkImage dedicatedImage,
6799  const VmaAllocationCreateInfo& createInfo,
6800  uint32_t memTypeIndex,
6801  VmaSuballocationType suballocType,
6802  size_t allocationCount,
6803  VmaAllocation* pAllocations);
6804 
6805  // Helper function only to be used inside AllocateDedicatedMemory.
6806  VkResult AllocateDedicatedMemoryPage(
6807  VkDeviceSize size,
6808  VmaSuballocationType suballocType,
6809  uint32_t memTypeIndex,
6810  const VkMemoryAllocateInfo& allocInfo,
6811  bool map,
6812  bool isUserDataString,
6813  void* pUserData,
6814  VmaAllocation* pAllocation);
6815 
6816  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
6817  VkResult AllocateDedicatedMemory(
6818  VkDeviceSize size,
6819  VmaSuballocationType suballocType,
6820  uint32_t memTypeIndex,
6821  bool map,
6822  bool isUserDataString,
6823  void* pUserData,
6824  VkBuffer dedicatedBuffer,
6825  VkImage dedicatedImage,
6826  size_t allocationCount,
6827  VmaAllocation* pAllocations);
6828 
6829  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
6830  void FreeDedicatedMemory(VmaAllocation allocation);
6831 };
6832 
6834 // Memory allocation #2 after VmaAllocator_T definition
6835 
6836 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
6837 {
6838  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6839 }
6840 
6841 static void VmaFree(VmaAllocator hAllocator, void* ptr)
6842 {
6843  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6844 }
6845 
6846 template<typename T>
6847 static T* VmaAllocate(VmaAllocator hAllocator)
6848 {
6849  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
6850 }
6851 
6852 template<typename T>
6853 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
6854 {
6855  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
6856 }
6857 
6858 template<typename T>
6859 static void vma_delete(VmaAllocator hAllocator, T* ptr)
6860 {
6861  if(ptr != VMA_NULL)
6862  {
6863  ptr->~T();
6864  VmaFree(hAllocator, ptr);
6865  }
6866 }
6867 
6868 template<typename T>
6869 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6870 {
6871  if(ptr != VMA_NULL)
6872  {
6873  for(size_t i = count; i--; )
6874  ptr[i].~T();
6875  VmaFree(hAllocator, ptr);
6876  }
6877 }
6878 
6880 // VmaStringBuilder
6881 
6882 #if VMA_STATS_STRING_ENABLED
6883 
6884 class VmaStringBuilder
6885 {
6886 public:
6887  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
6888  size_t GetLength() const { return m_Data.size(); }
6889  const char* GetData() const { return m_Data.data(); }
6890 
6891  void Add(char ch) { m_Data.push_back(ch); }
6892  void Add(const char* pStr);
6893  void AddNewLine() { Add('\n'); }
6894  void AddNumber(uint32_t num);
6895  void AddNumber(uint64_t num);
6896  void AddPointer(const void* ptr);
6897 
6898 private:
6899  VmaVector< char, VmaStlAllocator<char> > m_Data;
6900 };
6901 
6902 void VmaStringBuilder::Add(const char* pStr)
6903 {
6904  const size_t strLen = strlen(pStr);
6905  if(strLen > 0)
6906  {
6907  const size_t oldCount = m_Data.size();
6908  m_Data.resize(oldCount + strLen);
6909  memcpy(m_Data.data() + oldCount, pStr, strLen);
6910  }
6911 }
6912 
6913 void VmaStringBuilder::AddNumber(uint32_t num)
6914 {
6915  char buf[11];
6916  VmaUint32ToStr(buf, sizeof(buf), num);
6917  Add(buf);
6918 }
6919 
6920 void VmaStringBuilder::AddNumber(uint64_t num)
6921 {
6922  char buf[21];
6923  VmaUint64ToStr(buf, sizeof(buf), num);
6924  Add(buf);
6925 }
6926 
6927 void VmaStringBuilder::AddPointer(const void* ptr)
6928 {
6929  char buf[21];
6930  VmaPtrToStr(buf, sizeof(buf), ptr);
6931  Add(buf);
6932 }
6933 
6934 #endif // #if VMA_STATS_STRING_ENABLED
6935 
6937 // VmaJsonWriter
6938 
6939 #if VMA_STATS_STRING_ENABLED
6940 
6941 class VmaJsonWriter
6942 {
6943  VMA_CLASS_NO_COPY(VmaJsonWriter)
6944 public:
6945  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6946  ~VmaJsonWriter();
6947 
6948  void BeginObject(bool singleLine = false);
6949  void EndObject();
6950 
6951  void BeginArray(bool singleLine = false);
6952  void EndArray();
6953 
6954  void WriteString(const char* pStr);
6955  void BeginString(const char* pStr = VMA_NULL);
6956  void ContinueString(const char* pStr);
6957  void ContinueString(uint32_t n);
6958  void ContinueString(uint64_t n);
6959  void ContinueString_Pointer(const void* ptr);
6960  void EndString(const char* pStr = VMA_NULL);
6961 
6962  void WriteNumber(uint32_t n);
6963  void WriteNumber(uint64_t n);
6964  void WriteBool(bool b);
6965  void WriteNull();
6966 
6967 private:
6968  static const char* const INDENT;
6969 
6970  enum COLLECTION_TYPE
6971  {
6972  COLLECTION_TYPE_OBJECT,
6973  COLLECTION_TYPE_ARRAY,
6974  };
6975  struct StackItem
6976  {
6977  COLLECTION_TYPE type;
6978  uint32_t valueCount;
6979  bool singleLineMode;
6980  };
6981 
6982  VmaStringBuilder& m_SB;
6983  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6984  bool m_InsideString;
6985 
6986  void BeginValue(bool isString);
6987  void WriteIndent(bool oneLess = false);
6988 };
6989 
6990 const char* const VmaJsonWriter::INDENT = " ";
6991 
6992 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6993  m_SB(sb),
6994  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6995  m_InsideString(false)
6996 {
6997 }
6998 
6999 VmaJsonWriter::~VmaJsonWriter()
7000 {
7001  VMA_ASSERT(!m_InsideString);
7002  VMA_ASSERT(m_Stack.empty());
7003 }
7004 
7005 void VmaJsonWriter::BeginObject(bool singleLine)
7006 {
7007  VMA_ASSERT(!m_InsideString);
7008 
7009  BeginValue(false);
7010  m_SB.Add('{');
7011 
7012  StackItem item;
7013  item.type = COLLECTION_TYPE_OBJECT;
7014  item.valueCount = 0;
7015  item.singleLineMode = singleLine;
7016  m_Stack.push_back(item);
7017 }
7018 
7019 void VmaJsonWriter::EndObject()
7020 {
7021  VMA_ASSERT(!m_InsideString);
7022 
7023  WriteIndent(true);
7024  m_SB.Add('}');
7025 
7026  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
7027  m_Stack.pop_back();
7028 }
7029 
7030 void VmaJsonWriter::BeginArray(bool singleLine)
7031 {
7032  VMA_ASSERT(!m_InsideString);
7033 
7034  BeginValue(false);
7035  m_SB.Add('[');
7036 
7037  StackItem item;
7038  item.type = COLLECTION_TYPE_ARRAY;
7039  item.valueCount = 0;
7040  item.singleLineMode = singleLine;
7041  m_Stack.push_back(item);
7042 }
7043 
7044 void VmaJsonWriter::EndArray()
7045 {
7046  VMA_ASSERT(!m_InsideString);
7047 
7048  WriteIndent(true);
7049  m_SB.Add(']');
7050 
7051  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
7052  m_Stack.pop_back();
7053 }
7054 
7055 void VmaJsonWriter::WriteString(const char* pStr)
7056 {
7057  BeginString(pStr);
7058  EndString();
7059 }
7060 
7061 void VmaJsonWriter::BeginString(const char* pStr)
7062 {
7063  VMA_ASSERT(!m_InsideString);
7064 
7065  BeginValue(true);
7066  m_SB.Add('"');
7067  m_InsideString = true;
7068  if(pStr != VMA_NULL && pStr[0] != '\0')
7069  {
7070  ContinueString(pStr);
7071  }
7072 }
7073 
7074 void VmaJsonWriter::ContinueString(const char* pStr)
7075 {
7076  VMA_ASSERT(m_InsideString);
7077 
7078  const size_t strLen = strlen(pStr);
7079  for(size_t i = 0; i < strLen; ++i)
7080  {
7081  char ch = pStr[i];
7082  if(ch == '\\')
7083  {
7084  m_SB.Add("\\\\");
7085  }
7086  else if(ch == '"')
7087  {
7088  m_SB.Add("\\\"");
7089  }
7090  else if(ch >= 32)
7091  {
7092  m_SB.Add(ch);
7093  }
7094  else switch(ch)
7095  {
7096  case '\b':
7097  m_SB.Add("\\b");
7098  break;
7099  case '\f':
7100  m_SB.Add("\\f");
7101  break;
7102  case '\n':
7103  m_SB.Add("\\n");
7104  break;
7105  case '\r':
7106  m_SB.Add("\\r");
7107  break;
7108  case '\t':
7109  m_SB.Add("\\t");
7110  break;
7111  default:
7112  VMA_ASSERT(0 && "Character not currently supported.");
7113  break;
7114  }
7115  }
7116 }
7117 
7118 void VmaJsonWriter::ContinueString(uint32_t n)
7119 {
7120  VMA_ASSERT(m_InsideString);
7121  m_SB.AddNumber(n);
7122 }
7123 
7124 void VmaJsonWriter::ContinueString(uint64_t n)
7125 {
7126  VMA_ASSERT(m_InsideString);
7127  m_SB.AddNumber(n);
7128 }
7129 
7130 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7131 {
7132  VMA_ASSERT(m_InsideString);
7133  m_SB.AddPointer(ptr);
7134 }
7135 
7136 void VmaJsonWriter::EndString(const char* pStr)
7137 {
7138  VMA_ASSERT(m_InsideString);
7139  if(pStr != VMA_NULL && pStr[0] != '\0')
7140  {
7141  ContinueString(pStr);
7142  }
7143  m_SB.Add('"');
7144  m_InsideString = false;
7145 }
7146 
7147 void VmaJsonWriter::WriteNumber(uint32_t n)
7148 {
7149  VMA_ASSERT(!m_InsideString);
7150  BeginValue(false);
7151  m_SB.AddNumber(n);
7152 }
7153 
7154 void VmaJsonWriter::WriteNumber(uint64_t n)
7155 {
7156  VMA_ASSERT(!m_InsideString);
7157  BeginValue(false);
7158  m_SB.AddNumber(n);
7159 }
7160 
7161 void VmaJsonWriter::WriteBool(bool b)
7162 {
7163  VMA_ASSERT(!m_InsideString);
7164  BeginValue(false);
7165  m_SB.Add(b ? "true" : "false");
7166 }
7167 
7168 void VmaJsonWriter::WriteNull()
7169 {
7170  VMA_ASSERT(!m_InsideString);
7171  BeginValue(false);
7172  m_SB.Add("null");
7173 }
7174 
7175 void VmaJsonWriter::BeginValue(bool isString)
7176 {
7177  if(!m_Stack.empty())
7178  {
7179  StackItem& currItem = m_Stack.back();
7180  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7181  currItem.valueCount % 2 == 0)
7182  {
7183  VMA_ASSERT(isString);
7184  }
7185 
7186  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7187  currItem.valueCount % 2 != 0)
7188  {
7189  m_SB.Add(": ");
7190  }
7191  else if(currItem.valueCount > 0)
7192  {
7193  m_SB.Add(", ");
7194  WriteIndent();
7195  }
7196  else
7197  {
7198  WriteIndent();
7199  }
7200  ++currItem.valueCount;
7201  }
7202 }
7203 
7204 void VmaJsonWriter::WriteIndent(bool oneLess)
7205 {
7206  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7207  {
7208  m_SB.AddNewLine();
7209 
7210  size_t count = m_Stack.size();
7211  if(count > 0 && oneLess)
7212  {
7213  --count;
7214  }
7215  for(size_t i = 0; i < count; ++i)
7216  {
7217  m_SB.Add(INDENT);
7218  }
7219  }
7220 }
7221 
7222 #endif // #if VMA_STATS_STRING_ENABLED
7223 
7225 
7226 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7227 {
7228  if(IsUserDataString())
7229  {
7230  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7231 
7232  FreeUserDataString(hAllocator);
7233 
7234  if(pUserData != VMA_NULL)
7235  {
7236  const char* const newStrSrc = (char*)pUserData;
7237  const size_t newStrLen = strlen(newStrSrc);
7238  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
7239  memcpy(newStrDst, newStrSrc, newStrLen + 1);
7240  m_pUserData = newStrDst;
7241  }
7242  }
7243  else
7244  {
7245  m_pUserData = pUserData;
7246  }
7247 }
7248 
7249 void VmaAllocation_T::ChangeBlockAllocation(
7250  VmaAllocator hAllocator,
7251  VmaDeviceMemoryBlock* block,
7252  VkDeviceSize offset)
7253 {
7254  VMA_ASSERT(block != VMA_NULL);
7255  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7256 
7257  // Move mapping reference counter from old block to new block.
7258  if(block != m_BlockAllocation.m_Block)
7259  {
7260  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7261  if(IsPersistentMap())
7262  ++mapRefCount;
7263  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7264  block->Map(hAllocator, mapRefCount, VMA_NULL);
7265  }
7266 
7267  m_BlockAllocation.m_Block = block;
7268  m_BlockAllocation.m_Offset = offset;
7269 }
7270 
7271 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
7272 {
7273  VMA_ASSERT(newSize > 0);
7274  m_Size = newSize;
7275 }
7276 
7277 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7278 {
7279  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7280  m_BlockAllocation.m_Offset = newOffset;
7281 }
7282 
7283 VkDeviceSize VmaAllocation_T::GetOffset() const
7284 {
7285  switch(m_Type)
7286  {
7287  case ALLOCATION_TYPE_BLOCK:
7288  return m_BlockAllocation.m_Offset;
7289  case ALLOCATION_TYPE_DEDICATED:
7290  return 0;
7291  default:
7292  VMA_ASSERT(0);
7293  return 0;
7294  }
7295 }
7296 
7297 VkDeviceMemory VmaAllocation_T::GetMemory() const
7298 {
7299  switch(m_Type)
7300  {
7301  case ALLOCATION_TYPE_BLOCK:
7302  return m_BlockAllocation.m_Block->GetDeviceMemory();
7303  case ALLOCATION_TYPE_DEDICATED:
7304  return m_DedicatedAllocation.m_hMemory;
7305  default:
7306  VMA_ASSERT(0);
7307  return VK_NULL_HANDLE;
7308  }
7309 }
7310 
7311 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7312 {
7313  switch(m_Type)
7314  {
7315  case ALLOCATION_TYPE_BLOCK:
7316  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7317  case ALLOCATION_TYPE_DEDICATED:
7318  return m_DedicatedAllocation.m_MemoryTypeIndex;
7319  default:
7320  VMA_ASSERT(0);
7321  return UINT32_MAX;
7322  }
7323 }
7324 
7325 void* VmaAllocation_T::GetMappedData() const
7326 {
7327  switch(m_Type)
7328  {
7329  case ALLOCATION_TYPE_BLOCK:
7330  if(m_MapCount != 0)
7331  {
7332  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7333  VMA_ASSERT(pBlockData != VMA_NULL);
7334  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7335  }
7336  else
7337  {
7338  return VMA_NULL;
7339  }
7340  break;
7341  case ALLOCATION_TYPE_DEDICATED:
7342  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7343  return m_DedicatedAllocation.m_pMappedData;
7344  default:
7345  VMA_ASSERT(0);
7346  return VMA_NULL;
7347  }
7348 }
7349 
7350 bool VmaAllocation_T::CanBecomeLost() const
7351 {
7352  switch(m_Type)
7353  {
7354  case ALLOCATION_TYPE_BLOCK:
7355  return m_BlockAllocation.m_CanBecomeLost;
7356  case ALLOCATION_TYPE_DEDICATED:
7357  return false;
7358  default:
7359  VMA_ASSERT(0);
7360  return false;
7361  }
7362 }
7363 
7364 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7365 {
7366  VMA_ASSERT(CanBecomeLost());
7367 
7368  /*
7369  Warning: This is a carefully designed algorithm.
7370  Do not modify unless you really know what you're doing :)
7371  */
7372  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7373  for(;;)
7374  {
7375  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7376  {
7377  VMA_ASSERT(0);
7378  return false;
7379  }
7380  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7381  {
7382  return false;
7383  }
7384  else // Last use time earlier than current time.
7385  {
7386  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7387  {
7388  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7389  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7390  return true;
7391  }
7392  }
7393  }
7394 }
7395 
7396 #if VMA_STATS_STRING_ENABLED
7397 
7398 // Correspond to values of enum VmaSuballocationType.
7399 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7400  "FREE",
7401  "UNKNOWN",
7402  "BUFFER",
7403  "IMAGE_UNKNOWN",
7404  "IMAGE_LINEAR",
7405  "IMAGE_OPTIMAL",
7406 };
7407 
7408 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7409 {
7410  json.WriteString("Type");
7411  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7412 
7413  json.WriteString("Size");
7414  json.WriteNumber(m_Size);
7415 
7416  if(m_pUserData != VMA_NULL)
7417  {
7418  json.WriteString("UserData");
7419  if(IsUserDataString())
7420  {
7421  json.WriteString((const char*)m_pUserData);
7422  }
7423  else
7424  {
7425  json.BeginString();
7426  json.ContinueString_Pointer(m_pUserData);
7427  json.EndString();
7428  }
7429  }
7430 
7431  json.WriteString("CreationFrameIndex");
7432  json.WriteNumber(m_CreationFrameIndex);
7433 
7434  json.WriteString("LastUseFrameIndex");
7435  json.WriteNumber(GetLastUseFrameIndex());
7436 
7437  if(m_BufferImageUsage != 0)
7438  {
7439  json.WriteString("Usage");
7440  json.WriteNumber(m_BufferImageUsage);
7441  }
7442 }
7443 
7444 #endif
7445 
7446 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7447 {
7448  VMA_ASSERT(IsUserDataString());
7449  if(m_pUserData != VMA_NULL)
7450  {
7451  char* const oldStr = (char*)m_pUserData;
7452  const size_t oldStrLen = strlen(oldStr);
7453  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7454  m_pUserData = VMA_NULL;
7455  }
7456 }
7457 
7458 void VmaAllocation_T::BlockAllocMap()
7459 {
7460  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7461 
7462  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7463  {
7464  ++m_MapCount;
7465  }
7466  else
7467  {
7468  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7469  }
7470 }
7471 
7472 void VmaAllocation_T::BlockAllocUnmap()
7473 {
7474  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7475 
7476  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7477  {
7478  --m_MapCount;
7479  }
7480  else
7481  {
7482  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7483  }
7484 }
7485 
7486 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7487 {
7488  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7489 
7490  if(m_MapCount != 0)
7491  {
7492  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7493  {
7494  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7495  *ppData = m_DedicatedAllocation.m_pMappedData;
7496  ++m_MapCount;
7497  return VK_SUCCESS;
7498  }
7499  else
7500  {
7501  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7502  return VK_ERROR_MEMORY_MAP_FAILED;
7503  }
7504  }
7505  else
7506  {
7507  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7508  hAllocator->m_hDevice,
7509  m_DedicatedAllocation.m_hMemory,
7510  0, // offset
7511  VK_WHOLE_SIZE,
7512  0, // flags
7513  ppData);
7514  if(result == VK_SUCCESS)
7515  {
7516  m_DedicatedAllocation.m_pMappedData = *ppData;
7517  m_MapCount = 1;
7518  }
7519  return result;
7520  }
7521 }
7522 
7523 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7524 {
7525  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7526 
7527  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7528  {
7529  --m_MapCount;
7530  if(m_MapCount == 0)
7531  {
7532  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7533  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7534  hAllocator->m_hDevice,
7535  m_DedicatedAllocation.m_hMemory);
7536  }
7537  }
7538  else
7539  {
7540  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7541  }
7542 }
7543 
7544 #if VMA_STATS_STRING_ENABLED
7545 
7546 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7547 {
7548  json.BeginObject();
7549 
7550  json.WriteString("Blocks");
7551  json.WriteNumber(stat.blockCount);
7552 
7553  json.WriteString("Allocations");
7554  json.WriteNumber(stat.allocationCount);
7555 
7556  json.WriteString("UnusedRanges");
7557  json.WriteNumber(stat.unusedRangeCount);
7558 
7559  json.WriteString("UsedBytes");
7560  json.WriteNumber(stat.usedBytes);
7561 
7562  json.WriteString("UnusedBytes");
7563  json.WriteNumber(stat.unusedBytes);
7564 
7565  if(stat.allocationCount > 1)
7566  {
7567  json.WriteString("AllocationSize");
7568  json.BeginObject(true);
7569  json.WriteString("Min");
7570  json.WriteNumber(stat.allocationSizeMin);
7571  json.WriteString("Avg");
7572  json.WriteNumber(stat.allocationSizeAvg);
7573  json.WriteString("Max");
7574  json.WriteNumber(stat.allocationSizeMax);
7575  json.EndObject();
7576  }
7577 
7578  if(stat.unusedRangeCount > 1)
7579  {
7580  json.WriteString("UnusedRangeSize");
7581  json.BeginObject(true);
7582  json.WriteString("Min");
7583  json.WriteNumber(stat.unusedRangeSizeMin);
7584  json.WriteString("Avg");
7585  json.WriteNumber(stat.unusedRangeSizeAvg);
7586  json.WriteString("Max");
7587  json.WriteNumber(stat.unusedRangeSizeMax);
7588  json.EndObject();
7589  }
7590 
7591  json.EndObject();
7592 }
7593 
7594 #endif // #if VMA_STATS_STRING_ENABLED
7595 
7596 struct VmaSuballocationItemSizeLess
7597 {
7598  bool operator()(
7599  const VmaSuballocationList::iterator lhs,
7600  const VmaSuballocationList::iterator rhs) const
7601  {
7602  return lhs->size < rhs->size;
7603  }
7604  bool operator()(
7605  const VmaSuballocationList::iterator lhs,
7606  VkDeviceSize rhsSize) const
7607  {
7608  return lhs->size < rhsSize;
7609  }
7610 };
7611 
7612 
7614 // class VmaBlockMetadata
7615 
7616 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7617  m_Size(0),
7618  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7619 {
7620 }
7621 
7622 #if VMA_STATS_STRING_ENABLED
7623 
7624 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7625  VkDeviceSize unusedBytes,
7626  size_t allocationCount,
7627  size_t unusedRangeCount) const
7628 {
7629  json.BeginObject();
7630 
7631  json.WriteString("TotalBytes");
7632  json.WriteNumber(GetSize());
7633 
7634  json.WriteString("UnusedBytes");
7635  json.WriteNumber(unusedBytes);
7636 
7637  json.WriteString("Allocations");
7638  json.WriteNumber((uint64_t)allocationCount);
7639 
7640  json.WriteString("UnusedRanges");
7641  json.WriteNumber((uint64_t)unusedRangeCount);
7642 
7643  json.WriteString("Suballocations");
7644  json.BeginArray();
7645 }
7646 
7647 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
7648  VkDeviceSize offset,
7649  VmaAllocation hAllocation) const
7650 {
7651  json.BeginObject(true);
7652 
7653  json.WriteString("Offset");
7654  json.WriteNumber(offset);
7655 
7656  hAllocation->PrintParameters(json);
7657 
7658  json.EndObject();
7659 }
7660 
7661 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
7662  VkDeviceSize offset,
7663  VkDeviceSize size) const
7664 {
7665  json.BeginObject(true);
7666 
7667  json.WriteString("Offset");
7668  json.WriteNumber(offset);
7669 
7670  json.WriteString("Type");
7671  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7672 
7673  json.WriteString("Size");
7674  json.WriteNumber(size);
7675 
7676  json.EndObject();
7677 }
7678 
7679 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
7680 {
7681  json.EndArray();
7682  json.EndObject();
7683 }
7684 
7685 #endif // #if VMA_STATS_STRING_ENABLED
7686 
7688 // class VmaBlockMetadata_Generic
7689 
7690 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
7691  VmaBlockMetadata(hAllocator),
7692  m_FreeCount(0),
7693  m_SumFreeSize(0),
7694  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7695  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7696 {
7697 }
7698 
7699 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7700 {
7701 }
7702 
7703 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7704 {
7705  VmaBlockMetadata::Init(size);
7706 
7707  m_FreeCount = 1;
7708  m_SumFreeSize = size;
7709 
7710  VmaSuballocation suballoc = {};
7711  suballoc.offset = 0;
7712  suballoc.size = size;
7713  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7714  suballoc.hAllocation = VK_NULL_HANDLE;
7715 
7716  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7717  m_Suballocations.push_back(suballoc);
7718  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7719  --suballocItem;
7720  m_FreeSuballocationsBySize.push_back(suballocItem);
7721 }
7722 
7723 bool VmaBlockMetadata_Generic::Validate() const
7724 {
7725  VMA_VALIDATE(!m_Suballocations.empty());
7726 
7727  // Expected offset of new suballocation as calculated from previous ones.
7728  VkDeviceSize calculatedOffset = 0;
7729  // Expected number of free suballocations as calculated from traversing their list.
7730  uint32_t calculatedFreeCount = 0;
7731  // Expected sum size of free suballocations as calculated from traversing their list.
7732  VkDeviceSize calculatedSumFreeSize = 0;
7733  // Expected number of free suballocations that should be registered in
7734  // m_FreeSuballocationsBySize calculated from traversing their list.
7735  size_t freeSuballocationsToRegister = 0;
7736  // True if previous visited suballocation was free.
7737  bool prevFree = false;
7738 
7739  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7740  suballocItem != m_Suballocations.cend();
7741  ++suballocItem)
7742  {
7743  const VmaSuballocation& subAlloc = *suballocItem;
7744 
7745  // Actual offset of this suballocation doesn't match expected one.
7746  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7747 
7748  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7749  // Two adjacent free suballocations are invalid. They should be merged.
7750  VMA_VALIDATE(!prevFree || !currFree);
7751 
7752  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7753 
7754  if(currFree)
7755  {
7756  calculatedSumFreeSize += subAlloc.size;
7757  ++calculatedFreeCount;
7758  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7759  {
7760  ++freeSuballocationsToRegister;
7761  }
7762 
7763  // Margin required between allocations - every free space must be at least that large.
7764  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7765  }
7766  else
7767  {
7768  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7769  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7770 
7771  // Margin required between allocations - previous allocation must be free.
7772  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7773  }
7774 
7775  calculatedOffset += subAlloc.size;
7776  prevFree = currFree;
7777  }
7778 
7779  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
7780  // match expected one.
7781  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7782 
7783  VkDeviceSize lastSize = 0;
7784  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7785  {
7786  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7787 
7788  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
7789  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7790  // They must be sorted by size ascending.
7791  VMA_VALIDATE(suballocItem->size >= lastSize);
7792 
7793  lastSize = suballocItem->size;
7794  }
7795 
7796  // Check if totals match calculacted values.
7797  VMA_VALIDATE(ValidateFreeSuballocationList());
7798  VMA_VALIDATE(calculatedOffset == GetSize());
7799  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7800  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7801 
7802  return true;
7803 }
7804 
7805 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
7806 {
7807  if(!m_FreeSuballocationsBySize.empty())
7808  {
7809  return m_FreeSuballocationsBySize.back()->size;
7810  }
7811  else
7812  {
7813  return 0;
7814  }
7815 }
7816 
7817 bool VmaBlockMetadata_Generic::IsEmpty() const
7818 {
7819  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7820 }
7821 
7822 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7823 {
7824  outInfo.blockCount = 1;
7825 
7826  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7827  outInfo.allocationCount = rangeCount - m_FreeCount;
7828  outInfo.unusedRangeCount = m_FreeCount;
7829 
7830  outInfo.unusedBytes = m_SumFreeSize;
7831  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
7832 
7833  outInfo.allocationSizeMin = UINT64_MAX;
7834  outInfo.allocationSizeMax = 0;
7835  outInfo.unusedRangeSizeMin = UINT64_MAX;
7836  outInfo.unusedRangeSizeMax = 0;
7837 
7838  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7839  suballocItem != m_Suballocations.cend();
7840  ++suballocItem)
7841  {
7842  const VmaSuballocation& suballoc = *suballocItem;
7843  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7844  {
7845  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7846  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
7847  }
7848  else
7849  {
7850  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
7851  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
7852  }
7853  }
7854 }
7855 
7856 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
7857 {
7858  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7859 
7860  inoutStats.size += GetSize();
7861  inoutStats.unusedSize += m_SumFreeSize;
7862  inoutStats.allocationCount += rangeCount - m_FreeCount;
7863  inoutStats.unusedRangeCount += m_FreeCount;
7864  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
7865 }
7866 
7867 #if VMA_STATS_STRING_ENABLED
7868 
7869 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7870 {
7871  PrintDetailedMap_Begin(json,
7872  m_SumFreeSize, // unusedBytes
7873  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7874  m_FreeCount); // unusedRangeCount
7875 
7876  size_t i = 0;
7877  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7878  suballocItem != m_Suballocations.cend();
7879  ++suballocItem, ++i)
7880  {
7881  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7882  {
7883  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
7884  }
7885  else
7886  {
7887  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
7888  }
7889  }
7890 
7891  PrintDetailedMap_End(json);
7892 }
7893 
7894 #endif // #if VMA_STATS_STRING_ENABLED
7895 
7896 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7897  uint32_t currentFrameIndex,
7898  uint32_t frameInUseCount,
7899  VkDeviceSize bufferImageGranularity,
7900  VkDeviceSize allocSize,
7901  VkDeviceSize allocAlignment,
7902  bool upperAddress,
7903  VmaSuballocationType allocType,
7904  bool canMakeOtherLost,
7905  uint32_t strategy,
7906  VmaAllocationRequest* pAllocationRequest)
7907 {
7908  VMA_ASSERT(allocSize > 0);
7909  VMA_ASSERT(!upperAddress);
7910  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7911  VMA_ASSERT(pAllocationRequest != VMA_NULL);
7912  VMA_HEAVY_ASSERT(Validate());
7913 
7914  pAllocationRequest->type = VmaAllocationRequestType::Normal;
7915 
7916  // There is not enough total free space in this block to fullfill the request: Early return.
7917  if(canMakeOtherLost == false &&
7918  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
7919  {
7920  return false;
7921  }
7922 
7923  // New algorithm, efficiently searching freeSuballocationsBySize.
7924  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7925  if(freeSuballocCount > 0)
7926  {
7928  {
7929  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7930  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7931  m_FreeSuballocationsBySize.data(),
7932  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7933  allocSize + 2 * VMA_DEBUG_MARGIN,
7934  VmaSuballocationItemSizeLess());
7935  size_t index = it - m_FreeSuballocationsBySize.data();
7936  for(; index < freeSuballocCount; ++index)
7937  {
7938  if(CheckAllocation(
7939  currentFrameIndex,
7940  frameInUseCount,
7941  bufferImageGranularity,
7942  allocSize,
7943  allocAlignment,
7944  allocType,
7945  m_FreeSuballocationsBySize[index],
7946  false, // canMakeOtherLost
7947  &pAllocationRequest->offset,
7948  &pAllocationRequest->itemsToMakeLostCount,
7949  &pAllocationRequest->sumFreeSize,
7950  &pAllocationRequest->sumItemSize))
7951  {
7952  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7953  return true;
7954  }
7955  }
7956  }
7957  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7958  {
7959  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7960  it != m_Suballocations.end();
7961  ++it)
7962  {
7963  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
7964  currentFrameIndex,
7965  frameInUseCount,
7966  bufferImageGranularity,
7967  allocSize,
7968  allocAlignment,
7969  allocType,
7970  it,
7971  false, // canMakeOtherLost
7972  &pAllocationRequest->offset,
7973  &pAllocationRequest->itemsToMakeLostCount,
7974  &pAllocationRequest->sumFreeSize,
7975  &pAllocationRequest->sumItemSize))
7976  {
7977  pAllocationRequest->item = it;
7978  return true;
7979  }
7980  }
7981  }
7982  else // WORST_FIT, FIRST_FIT
7983  {
7984  // Search staring from biggest suballocations.
7985  for(size_t index = freeSuballocCount; index--; )
7986  {
7987  if(CheckAllocation(
7988  currentFrameIndex,
7989  frameInUseCount,
7990  bufferImageGranularity,
7991  allocSize,
7992  allocAlignment,
7993  allocType,
7994  m_FreeSuballocationsBySize[index],
7995  false, // canMakeOtherLost
7996  &pAllocationRequest->offset,
7997  &pAllocationRequest->itemsToMakeLostCount,
7998  &pAllocationRequest->sumFreeSize,
7999  &pAllocationRequest->sumItemSize))
8000  {
8001  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8002  return true;
8003  }
8004  }
8005  }
8006  }
8007 
8008  if(canMakeOtherLost)
8009  {
8010  // Brute-force algorithm. TODO: Come up with something better.
8011 
8012  bool found = false;
8013  VmaAllocationRequest tmpAllocRequest = {};
8014  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
8015  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
8016  suballocIt != m_Suballocations.end();
8017  ++suballocIt)
8018  {
8019  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
8020  suballocIt->hAllocation->CanBecomeLost())
8021  {
8022  if(CheckAllocation(
8023  currentFrameIndex,
8024  frameInUseCount,
8025  bufferImageGranularity,
8026  allocSize,
8027  allocAlignment,
8028  allocType,
8029  suballocIt,
8030  canMakeOtherLost,
8031  &tmpAllocRequest.offset,
8032  &tmpAllocRequest.itemsToMakeLostCount,
8033  &tmpAllocRequest.sumFreeSize,
8034  &tmpAllocRequest.sumItemSize))
8035  {
8037  {
8038  *pAllocationRequest = tmpAllocRequest;
8039  pAllocationRequest->item = suballocIt;
8040  break;
8041  }
8042  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
8043  {
8044  *pAllocationRequest = tmpAllocRequest;
8045  pAllocationRequest->item = suballocIt;
8046  found = true;
8047  }
8048  }
8049  }
8050  }
8051 
8052  return found;
8053  }
8054 
8055  return false;
8056 }
8057 
8058 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
8059  uint32_t currentFrameIndex,
8060  uint32_t frameInUseCount,
8061  VmaAllocationRequest* pAllocationRequest)
8062 {
8063  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
8064 
8065  while(pAllocationRequest->itemsToMakeLostCount > 0)
8066  {
8067  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
8068  {
8069  ++pAllocationRequest->item;
8070  }
8071  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8072  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
8073  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
8074  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8075  {
8076  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
8077  --pAllocationRequest->itemsToMakeLostCount;
8078  }
8079  else
8080  {
8081  return false;
8082  }
8083  }
8084 
8085  VMA_HEAVY_ASSERT(Validate());
8086  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8087  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8088 
8089  return true;
8090 }
8091 
8092 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8093 {
8094  uint32_t lostAllocationCount = 0;
8095  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8096  it != m_Suballocations.end();
8097  ++it)
8098  {
8099  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8100  it->hAllocation->CanBecomeLost() &&
8101  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8102  {
8103  it = FreeSuballocation(it);
8104  ++lostAllocationCount;
8105  }
8106  }
8107  return lostAllocationCount;
8108 }
8109 
8110 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8111 {
8112  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8113  it != m_Suballocations.end();
8114  ++it)
8115  {
8116  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8117  {
8118  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8119  {
8120  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8121  return VK_ERROR_VALIDATION_FAILED_EXT;
8122  }
8123  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8124  {
8125  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8126  return VK_ERROR_VALIDATION_FAILED_EXT;
8127  }
8128  }
8129  }
8130 
8131  return VK_SUCCESS;
8132 }
8133 
8134 void VmaBlockMetadata_Generic::Alloc(
8135  const VmaAllocationRequest& request,
8136  VmaSuballocationType type,
8137  VkDeviceSize allocSize,
8138  VmaAllocation hAllocation)
8139 {
8140  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
8141  VMA_ASSERT(request.item != m_Suballocations.end());
8142  VmaSuballocation& suballoc = *request.item;
8143  // Given suballocation is a free block.
8144  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8145  // Given offset is inside this suballocation.
8146  VMA_ASSERT(request.offset >= suballoc.offset);
8147  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8148  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8149  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8150 
8151  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8152  // it to become used.
8153  UnregisterFreeSuballocation(request.item);
8154 
8155  suballoc.offset = request.offset;
8156  suballoc.size = allocSize;
8157  suballoc.type = type;
8158  suballoc.hAllocation = hAllocation;
8159 
8160  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8161  if(paddingEnd)
8162  {
8163  VmaSuballocation paddingSuballoc = {};
8164  paddingSuballoc.offset = request.offset + allocSize;
8165  paddingSuballoc.size = paddingEnd;
8166  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8167  VmaSuballocationList::iterator next = request.item;
8168  ++next;
8169  const VmaSuballocationList::iterator paddingEndItem =
8170  m_Suballocations.insert(next, paddingSuballoc);
8171  RegisterFreeSuballocation(paddingEndItem);
8172  }
8173 
8174  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8175  if(paddingBegin)
8176  {
8177  VmaSuballocation paddingSuballoc = {};
8178  paddingSuballoc.offset = request.offset - paddingBegin;
8179  paddingSuballoc.size = paddingBegin;
8180  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8181  const VmaSuballocationList::iterator paddingBeginItem =
8182  m_Suballocations.insert(request.item, paddingSuballoc);
8183  RegisterFreeSuballocation(paddingBeginItem);
8184  }
8185 
8186  // Update totals.
8187  m_FreeCount = m_FreeCount - 1;
8188  if(paddingBegin > 0)
8189  {
8190  ++m_FreeCount;
8191  }
8192  if(paddingEnd > 0)
8193  {
8194  ++m_FreeCount;
8195  }
8196  m_SumFreeSize -= allocSize;
8197 }
8198 
8199 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8200 {
8201  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8202  suballocItem != m_Suballocations.end();
8203  ++suballocItem)
8204  {
8205  VmaSuballocation& suballoc = *suballocItem;
8206  if(suballoc.hAllocation == allocation)
8207  {
8208  FreeSuballocation(suballocItem);
8209  VMA_HEAVY_ASSERT(Validate());
8210  return;
8211  }
8212  }
8213  VMA_ASSERT(0 && "Not found!");
8214 }
8215 
8216 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8217 {
8218  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8219  suballocItem != m_Suballocations.end();
8220  ++suballocItem)
8221  {
8222  VmaSuballocation& suballoc = *suballocItem;
8223  if(suballoc.offset == offset)
8224  {
8225  FreeSuballocation(suballocItem);
8226  return;
8227  }
8228  }
8229  VMA_ASSERT(0 && "Not found!");
8230 }
8231 
8232 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
8233 {
8234  typedef VmaSuballocationList::iterator iter_type;
8235  for(iter_type suballocItem = m_Suballocations.begin();
8236  suballocItem != m_Suballocations.end();
8237  ++suballocItem)
8238  {
8239  VmaSuballocation& suballoc = *suballocItem;
8240  if(suballoc.hAllocation == alloc)
8241  {
8242  iter_type nextItem = suballocItem;
8243  ++nextItem;
8244 
8245  // Should have been ensured on higher level.
8246  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
8247 
8248  // Shrinking.
8249  if(newSize < alloc->GetSize())
8250  {
8251  const VkDeviceSize sizeDiff = suballoc.size - newSize;
8252 
8253  // There is next item.
8254  if(nextItem != m_Suballocations.end())
8255  {
8256  // Next item is free.
8257  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8258  {
8259  // Grow this next item backward.
8260  UnregisterFreeSuballocation(nextItem);
8261  nextItem->offset -= sizeDiff;
8262  nextItem->size += sizeDiff;
8263  RegisterFreeSuballocation(nextItem);
8264  }
8265  // Next item is not free.
8266  else
8267  {
8268  // Create free item after current one.
8269  VmaSuballocation newFreeSuballoc;
8270  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8271  newFreeSuballoc.offset = suballoc.offset + newSize;
8272  newFreeSuballoc.size = sizeDiff;
8273  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8274  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
8275  RegisterFreeSuballocation(newFreeSuballocIt);
8276 
8277  ++m_FreeCount;
8278  }
8279  }
8280  // This is the last item.
8281  else
8282  {
8283  // Create free item at the end.
8284  VmaSuballocation newFreeSuballoc;
8285  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8286  newFreeSuballoc.offset = suballoc.offset + newSize;
8287  newFreeSuballoc.size = sizeDiff;
8288  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8289  m_Suballocations.push_back(newFreeSuballoc);
8290 
8291  iter_type newFreeSuballocIt = m_Suballocations.end();
8292  RegisterFreeSuballocation(--newFreeSuballocIt);
8293 
8294  ++m_FreeCount;
8295  }
8296 
8297  suballoc.size = newSize;
8298  m_SumFreeSize += sizeDiff;
8299  }
8300  // Growing.
8301  else
8302  {
8303  const VkDeviceSize sizeDiff = newSize - suballoc.size;
8304 
8305  // There is next item.
8306  if(nextItem != m_Suballocations.end())
8307  {
8308  // Next item is free.
8309  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8310  {
8311  // There is not enough free space, including margin.
8312  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
8313  {
8314  return false;
8315  }
8316 
8317  // There is more free space than required.
8318  if(nextItem->size > sizeDiff)
8319  {
8320  // Move and shrink this next item.
8321  UnregisterFreeSuballocation(nextItem);
8322  nextItem->offset += sizeDiff;
8323  nextItem->size -= sizeDiff;
8324  RegisterFreeSuballocation(nextItem);
8325  }
8326  // There is exactly the amount of free space required.
8327  else
8328  {
8329  // Remove this next free item.
8330  UnregisterFreeSuballocation(nextItem);
8331  m_Suballocations.erase(nextItem);
8332  --m_FreeCount;
8333  }
8334  }
8335  // Next item is not free - there is no space to grow.
8336  else
8337  {
8338  return false;
8339  }
8340  }
8341  // This is the last item - there is no space to grow.
8342  else
8343  {
8344  return false;
8345  }
8346 
8347  suballoc.size = newSize;
8348  m_SumFreeSize -= sizeDiff;
8349  }
8350 
8351  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
8352  return true;
8353  }
8354  }
8355  VMA_ASSERT(0 && "Not found!");
8356  return false;
8357 }
8358 
8359 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8360 {
8361  VkDeviceSize lastSize = 0;
8362  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8363  {
8364  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8365 
8366  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8367  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8368  VMA_VALIDATE(it->size >= lastSize);
8369  lastSize = it->size;
8370  }
8371  return true;
8372 }
8373 
8374 bool VmaBlockMetadata_Generic::CheckAllocation(
8375  uint32_t currentFrameIndex,
8376  uint32_t frameInUseCount,
8377  VkDeviceSize bufferImageGranularity,
8378  VkDeviceSize allocSize,
8379  VkDeviceSize allocAlignment,
8380  VmaSuballocationType allocType,
8381  VmaSuballocationList::const_iterator suballocItem,
8382  bool canMakeOtherLost,
8383  VkDeviceSize* pOffset,
8384  size_t* itemsToMakeLostCount,
8385  VkDeviceSize* pSumFreeSize,
8386  VkDeviceSize* pSumItemSize) const
8387 {
8388  VMA_ASSERT(allocSize > 0);
8389  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8390  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8391  VMA_ASSERT(pOffset != VMA_NULL);
8392 
8393  *itemsToMakeLostCount = 0;
8394  *pSumFreeSize = 0;
8395  *pSumItemSize = 0;
8396 
8397  if(canMakeOtherLost)
8398  {
8399  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8400  {
8401  *pSumFreeSize = suballocItem->size;
8402  }
8403  else
8404  {
8405  if(suballocItem->hAllocation->CanBecomeLost() &&
8406  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8407  {
8408  ++*itemsToMakeLostCount;
8409  *pSumItemSize = suballocItem->size;
8410  }
8411  else
8412  {
8413  return false;
8414  }
8415  }
8416 
8417  // Remaining size is too small for this request: Early return.
8418  if(GetSize() - suballocItem->offset < allocSize)
8419  {
8420  return false;
8421  }
8422 
8423  // Start from offset equal to beginning of this suballocation.
8424  *pOffset = suballocItem->offset;
8425 
8426  // Apply VMA_DEBUG_MARGIN at the beginning.
8427  if(VMA_DEBUG_MARGIN > 0)
8428  {
8429  *pOffset += VMA_DEBUG_MARGIN;
8430  }
8431 
8432  // Apply alignment.
8433  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8434 
8435  // Check previous suballocations for BufferImageGranularity conflicts.
8436  // Make bigger alignment if necessary.
8437  if(bufferImageGranularity > 1)
8438  {
8439  bool bufferImageGranularityConflict = false;
8440  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8441  while(prevSuballocItem != m_Suballocations.cbegin())
8442  {
8443  --prevSuballocItem;
8444  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8445  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8446  {
8447  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8448  {
8449  bufferImageGranularityConflict = true;
8450  break;
8451  }
8452  }
8453  else
8454  // Already on previous page.
8455  break;
8456  }
8457  if(bufferImageGranularityConflict)
8458  {
8459  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8460  }
8461  }
8462 
8463  // Now that we have final *pOffset, check if we are past suballocItem.
8464  // If yes, return false - this function should be called for another suballocItem as starting point.
8465  if(*pOffset >= suballocItem->offset + suballocItem->size)
8466  {
8467  return false;
8468  }
8469 
8470  // Calculate padding at the beginning based on current offset.
8471  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8472 
8473  // Calculate required margin at the end.
8474  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8475 
8476  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8477  // Another early return check.
8478  if(suballocItem->offset + totalSize > GetSize())
8479  {
8480  return false;
8481  }
8482 
8483  // Advance lastSuballocItem until desired size is reached.
8484  // Update itemsToMakeLostCount.
8485  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8486  if(totalSize > suballocItem->size)
8487  {
8488  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8489  while(remainingSize > 0)
8490  {
8491  ++lastSuballocItem;
8492  if(lastSuballocItem == m_Suballocations.cend())
8493  {
8494  return false;
8495  }
8496  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8497  {
8498  *pSumFreeSize += lastSuballocItem->size;
8499  }
8500  else
8501  {
8502  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8503  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8504  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8505  {
8506  ++*itemsToMakeLostCount;
8507  *pSumItemSize += lastSuballocItem->size;
8508  }
8509  else
8510  {
8511  return false;
8512  }
8513  }
8514  remainingSize = (lastSuballocItem->size < remainingSize) ?
8515  remainingSize - lastSuballocItem->size : 0;
8516  }
8517  }
8518 
8519  // Check next suballocations for BufferImageGranularity conflicts.
8520  // If conflict exists, we must mark more allocations lost or fail.
8521  if(bufferImageGranularity > 1)
8522  {
8523  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8524  ++nextSuballocItem;
8525  while(nextSuballocItem != m_Suballocations.cend())
8526  {
8527  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8528  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8529  {
8530  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8531  {
8532  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8533  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8534  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8535  {
8536  ++*itemsToMakeLostCount;
8537  }
8538  else
8539  {
8540  return false;
8541  }
8542  }
8543  }
8544  else
8545  {
8546  // Already on next page.
8547  break;
8548  }
8549  ++nextSuballocItem;
8550  }
8551  }
8552  }
8553  else
8554  {
8555  const VmaSuballocation& suballoc = *suballocItem;
8556  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8557 
8558  *pSumFreeSize = suballoc.size;
8559 
8560  // Size of this suballocation is too small for this request: Early return.
8561  if(suballoc.size < allocSize)
8562  {
8563  return false;
8564  }
8565 
8566  // Start from offset equal to beginning of this suballocation.
8567  *pOffset = suballoc.offset;
8568 
8569  // Apply VMA_DEBUG_MARGIN at the beginning.
8570  if(VMA_DEBUG_MARGIN > 0)
8571  {
8572  *pOffset += VMA_DEBUG_MARGIN;
8573  }
8574 
8575  // Apply alignment.
8576  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8577 
8578  // Check previous suballocations for BufferImageGranularity conflicts.
8579  // Make bigger alignment if necessary.
8580  if(bufferImageGranularity > 1)
8581  {
8582  bool bufferImageGranularityConflict = false;
8583  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8584  while(prevSuballocItem != m_Suballocations.cbegin())
8585  {
8586  --prevSuballocItem;
8587  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8588  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8589  {
8590  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8591  {
8592  bufferImageGranularityConflict = true;
8593  break;
8594  }
8595  }
8596  else
8597  // Already on previous page.
8598  break;
8599  }
8600  if(bufferImageGranularityConflict)
8601  {
8602  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8603  }
8604  }
8605 
8606  // Calculate padding at the beginning based on current offset.
8607  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8608 
8609  // Calculate required margin at the end.
8610  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8611 
8612  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8613  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8614  {
8615  return false;
8616  }
8617 
8618  // Check next suballocations for BufferImageGranularity conflicts.
8619  // If conflict exists, allocation cannot be made here.
8620  if(bufferImageGranularity > 1)
8621  {
8622  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8623  ++nextSuballocItem;
8624  while(nextSuballocItem != m_Suballocations.cend())
8625  {
8626  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8627  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8628  {
8629  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8630  {
8631  return false;
8632  }
8633  }
8634  else
8635  {
8636  // Already on next page.
8637  break;
8638  }
8639  ++nextSuballocItem;
8640  }
8641  }
8642  }
8643 
8644  // All tests passed: Success. pOffset is already filled.
8645  return true;
8646 }
8647 
8648 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8649 {
8650  VMA_ASSERT(item != m_Suballocations.end());
8651  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8652 
8653  VmaSuballocationList::iterator nextItem = item;
8654  ++nextItem;
8655  VMA_ASSERT(nextItem != m_Suballocations.end());
8656  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8657 
8658  item->size += nextItem->size;
8659  --m_FreeCount;
8660  m_Suballocations.erase(nextItem);
8661 }
8662 
8663 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8664 {
8665  // Change this suballocation to be marked as free.
8666  VmaSuballocation& suballoc = *suballocItem;
8667  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8668  suballoc.hAllocation = VK_NULL_HANDLE;
8669 
8670  // Update totals.
8671  ++m_FreeCount;
8672  m_SumFreeSize += suballoc.size;
8673 
8674  // Merge with previous and/or next suballocation if it's also free.
8675  bool mergeWithNext = false;
8676  bool mergeWithPrev = false;
8677 
8678  VmaSuballocationList::iterator nextItem = suballocItem;
8679  ++nextItem;
8680  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8681  {
8682  mergeWithNext = true;
8683  }
8684 
8685  VmaSuballocationList::iterator prevItem = suballocItem;
8686  if(suballocItem != m_Suballocations.begin())
8687  {
8688  --prevItem;
8689  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8690  {
8691  mergeWithPrev = true;
8692  }
8693  }
8694 
8695  if(mergeWithNext)
8696  {
8697  UnregisterFreeSuballocation(nextItem);
8698  MergeFreeWithNext(suballocItem);
8699  }
8700 
8701  if(mergeWithPrev)
8702  {
8703  UnregisterFreeSuballocation(prevItem);
8704  MergeFreeWithNext(prevItem);
8705  RegisterFreeSuballocation(prevItem);
8706  return prevItem;
8707  }
8708  else
8709  {
8710  RegisterFreeSuballocation(suballocItem);
8711  return suballocItem;
8712  }
8713 }
8714 
8715 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8716 {
8717  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8718  VMA_ASSERT(item->size > 0);
8719 
8720  // You may want to enable this validation at the beginning or at the end of
8721  // this function, depending on what do you want to check.
8722  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8723 
8724  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8725  {
8726  if(m_FreeSuballocationsBySize.empty())
8727  {
8728  m_FreeSuballocationsBySize.push_back(item);
8729  }
8730  else
8731  {
8732  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8733  }
8734  }
8735 
8736  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8737 }
8738 
8739 
8740 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8741 {
8742  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8743  VMA_ASSERT(item->size > 0);
8744 
8745  // You may want to enable this validation at the beginning or at the end of
8746  // this function, depending on what do you want to check.
8747  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8748 
8749  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8750  {
8751  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8752  m_FreeSuballocationsBySize.data(),
8753  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8754  item,
8755  VmaSuballocationItemSizeLess());
8756  for(size_t index = it - m_FreeSuballocationsBySize.data();
8757  index < m_FreeSuballocationsBySize.size();
8758  ++index)
8759  {
8760  if(m_FreeSuballocationsBySize[index] == item)
8761  {
8762  VmaVectorRemove(m_FreeSuballocationsBySize, index);
8763  return;
8764  }
8765  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8766  }
8767  VMA_ASSERT(0 && "Not found.");
8768  }
8769 
8770  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8771 }
8772 
8773 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8774  VkDeviceSize bufferImageGranularity,
8775  VmaSuballocationType& inOutPrevSuballocType) const
8776 {
8777  if(bufferImageGranularity == 1 || IsEmpty())
8778  {
8779  return false;
8780  }
8781 
8782  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8783  bool typeConflictFound = false;
8784  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8785  it != m_Suballocations.cend();
8786  ++it)
8787  {
8788  const VmaSuballocationType suballocType = it->type;
8789  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8790  {
8791  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8792  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8793  {
8794  typeConflictFound = true;
8795  }
8796  inOutPrevSuballocType = suballocType;
8797  }
8798  }
8799 
8800  return typeConflictFound || minAlignment >= bufferImageGranularity;
8801 }
8802 
8804 // class VmaBlockMetadata_Linear
8805 
8806 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
8807  VmaBlockMetadata(hAllocator),
8808  m_SumFreeSize(0),
8809  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8810  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8811  m_1stVectorIndex(0),
8812  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8813  m_1stNullItemsBeginCount(0),
8814  m_1stNullItemsMiddleCount(0),
8815  m_2ndNullItemsCount(0)
8816 {
8817 }
8818 
8819 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8820 {
8821 }
8822 
8823 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8824 {
8825  VmaBlockMetadata::Init(size);
8826  m_SumFreeSize = size;
8827 }
8828 
8829 bool VmaBlockMetadata_Linear::Validate() const
8830 {
8831  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8832  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8833 
8834  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8835  VMA_VALIDATE(!suballocations1st.empty() ||
8836  suballocations2nd.empty() ||
8837  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8838 
8839  if(!suballocations1st.empty())
8840  {
8841  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
8842  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8843  // Null item at the end should be just pop_back().
8844  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8845  }
8846  if(!suballocations2nd.empty())
8847  {
8848  // Null item at the end should be just pop_back().
8849  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8850  }
8851 
8852  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8853  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8854 
8855  VkDeviceSize sumUsedSize = 0;
8856  const size_t suballoc1stCount = suballocations1st.size();
8857  VkDeviceSize offset = VMA_DEBUG_MARGIN;
8858 
8859  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8860  {
8861  const size_t suballoc2ndCount = suballocations2nd.size();
8862  size_t nullItem2ndCount = 0;
8863  for(size_t i = 0; i < suballoc2ndCount; ++i)
8864  {
8865  const VmaSuballocation& suballoc = suballocations2nd[i];
8866  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8867 
8868  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8869  VMA_VALIDATE(suballoc.offset >= offset);
8870 
8871  if(!currFree)
8872  {
8873  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8874  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8875  sumUsedSize += suballoc.size;
8876  }
8877  else
8878  {
8879  ++nullItem2ndCount;
8880  }
8881 
8882  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8883  }
8884 
8885  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8886  }
8887 
8888  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8889  {
8890  const VmaSuballocation& suballoc = suballocations1st[i];
8891  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8892  suballoc.hAllocation == VK_NULL_HANDLE);
8893  }
8894 
8895  size_t nullItem1stCount = m_1stNullItemsBeginCount;
8896 
8897  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8898  {
8899  const VmaSuballocation& suballoc = suballocations1st[i];
8900  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8901 
8902  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8903  VMA_VALIDATE(suballoc.offset >= offset);
8904  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8905 
8906  if(!currFree)
8907  {
8908  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8909  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8910  sumUsedSize += suballoc.size;
8911  }
8912  else
8913  {
8914  ++nullItem1stCount;
8915  }
8916 
8917  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8918  }
8919  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8920 
8921  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8922  {
8923  const size_t suballoc2ndCount = suballocations2nd.size();
8924  size_t nullItem2ndCount = 0;
8925  for(size_t i = suballoc2ndCount; i--; )
8926  {
8927  const VmaSuballocation& suballoc = suballocations2nd[i];
8928  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8929 
8930  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8931  VMA_VALIDATE(suballoc.offset >= offset);
8932 
8933  if(!currFree)
8934  {
8935  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8936  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8937  sumUsedSize += suballoc.size;
8938  }
8939  else
8940  {
8941  ++nullItem2ndCount;
8942  }
8943 
8944  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8945  }
8946 
8947  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8948  }
8949 
8950  VMA_VALIDATE(offset <= GetSize());
8951  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
8952 
8953  return true;
8954 }
8955 
8956 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
8957 {
8958  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
8959  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
8960 }
8961 
8962 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
8963 {
8964  const VkDeviceSize size = GetSize();
8965 
8966  /*
8967  We don't consider gaps inside allocation vectors with freed allocations because
8968  they are not suitable for reuse in linear allocator. We consider only space that
8969  is available for new allocations.
8970  */
8971  if(IsEmpty())
8972  {
8973  return size;
8974  }
8975 
8976  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8977 
8978  switch(m_2ndVectorMode)
8979  {
8980  case SECOND_VECTOR_EMPTY:
8981  /*
8982  Available space is after end of 1st, as well as before beginning of 1st (which
8983  whould make it a ring buffer).
8984  */
8985  {
8986  const size_t suballocations1stCount = suballocations1st.size();
8987  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
8988  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8989  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
8990  return VMA_MAX(
8991  firstSuballoc.offset,
8992  size - (lastSuballoc.offset + lastSuballoc.size));
8993  }
8994  break;
8995 
8996  case SECOND_VECTOR_RING_BUFFER:
8997  /*
8998  Available space is only between end of 2nd and beginning of 1st.
8999  */
9000  {
9001  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9002  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
9003  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
9004  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
9005  }
9006  break;
9007 
9008  case SECOND_VECTOR_DOUBLE_STACK:
9009  /*
9010  Available space is only between end of 1st and top of 2nd.
9011  */
9012  {
9013  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9014  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
9015  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
9016  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
9017  }
9018  break;
9019 
9020  default:
9021  VMA_ASSERT(0);
9022  return 0;
9023  }
9024 }
9025 
9026 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9027 {
9028  const VkDeviceSize size = GetSize();
9029  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9030  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9031  const size_t suballoc1stCount = suballocations1st.size();
9032  const size_t suballoc2ndCount = suballocations2nd.size();
9033 
9034  outInfo.blockCount = 1;
9035  outInfo.allocationCount = (uint32_t)GetAllocationCount();
9036  outInfo.unusedRangeCount = 0;
9037  outInfo.usedBytes = 0;
9038  outInfo.allocationSizeMin = UINT64_MAX;
9039  outInfo.allocationSizeMax = 0;
9040  outInfo.unusedRangeSizeMin = UINT64_MAX;
9041  outInfo.unusedRangeSizeMax = 0;
9042 
9043  VkDeviceSize lastOffset = 0;
9044 
9045  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9046  {
9047  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9048  size_t nextAlloc2ndIndex = 0;
9049  while(lastOffset < freeSpace2ndTo1stEnd)
9050  {
9051  // Find next non-null allocation or move nextAllocIndex to the end.
9052  while(nextAlloc2ndIndex < suballoc2ndCount &&
9053  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9054  {
9055  ++nextAlloc2ndIndex;
9056  }
9057 
9058  // Found non-null allocation.
9059  if(nextAlloc2ndIndex < suballoc2ndCount)
9060  {
9061  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9062 
9063  // 1. Process free space before this allocation.
9064  if(lastOffset < suballoc.offset)
9065  {
9066  // There is free space from lastOffset to suballoc.offset.
9067  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9068  ++outInfo.unusedRangeCount;
9069  outInfo.unusedBytes += unusedRangeSize;
9070  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9071  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9072  }
9073 
9074  // 2. Process this allocation.
9075  // There is allocation with suballoc.offset, suballoc.size.
9076  outInfo.usedBytes += suballoc.size;
9077  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9078  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9079 
9080  // 3. Prepare for next iteration.
9081  lastOffset = suballoc.offset + suballoc.size;
9082  ++nextAlloc2ndIndex;
9083  }
9084  // We are at the end.
9085  else
9086  {
9087  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9088  if(lastOffset < freeSpace2ndTo1stEnd)
9089  {
9090  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9091  ++outInfo.unusedRangeCount;
9092  outInfo.unusedBytes += unusedRangeSize;
9093  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9094  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9095  }
9096 
9097  // End of loop.
9098  lastOffset = freeSpace2ndTo1stEnd;
9099  }
9100  }
9101  }
9102 
9103  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9104  const VkDeviceSize freeSpace1stTo2ndEnd =
9105  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9106  while(lastOffset < freeSpace1stTo2ndEnd)
9107  {
9108  // Find next non-null allocation or move nextAllocIndex to the end.
9109  while(nextAlloc1stIndex < suballoc1stCount &&
9110  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9111  {
9112  ++nextAlloc1stIndex;
9113  }
9114 
9115  // Found non-null allocation.
9116  if(nextAlloc1stIndex < suballoc1stCount)
9117  {
9118  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9119 
9120  // 1. Process free space before this allocation.
9121  if(lastOffset < suballoc.offset)
9122  {
9123  // There is free space from lastOffset to suballoc.offset.
9124  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9125  ++outInfo.unusedRangeCount;
9126  outInfo.unusedBytes += unusedRangeSize;
9127  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9128  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9129  }
9130 
9131  // 2. Process this allocation.
9132  // There is allocation with suballoc.offset, suballoc.size.
9133  outInfo.usedBytes += suballoc.size;
9134  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9135  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9136 
9137  // 3. Prepare for next iteration.
9138  lastOffset = suballoc.offset + suballoc.size;
9139  ++nextAlloc1stIndex;
9140  }
9141  // We are at the end.
9142  else
9143  {
9144  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9145  if(lastOffset < freeSpace1stTo2ndEnd)
9146  {
9147  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9148  ++outInfo.unusedRangeCount;
9149  outInfo.unusedBytes += unusedRangeSize;
9150  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9151  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9152  }
9153 
9154  // End of loop.
9155  lastOffset = freeSpace1stTo2ndEnd;
9156  }
9157  }
9158 
9159  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9160  {
9161  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9162  while(lastOffset < size)
9163  {
9164  // Find next non-null allocation or move nextAllocIndex to the end.
9165  while(nextAlloc2ndIndex != SIZE_MAX &&
9166  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9167  {
9168  --nextAlloc2ndIndex;
9169  }
9170 
9171  // Found non-null allocation.
9172  if(nextAlloc2ndIndex != SIZE_MAX)
9173  {
9174  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9175 
9176  // 1. Process free space before this allocation.
9177  if(lastOffset < suballoc.offset)
9178  {
9179  // There is free space from lastOffset to suballoc.offset.
9180  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9181  ++outInfo.unusedRangeCount;
9182  outInfo.unusedBytes += unusedRangeSize;
9183  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9184  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9185  }
9186 
9187  // 2. Process this allocation.
9188  // There is allocation with suballoc.offset, suballoc.size.
9189  outInfo.usedBytes += suballoc.size;
9190  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9191  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9192 
9193  // 3. Prepare for next iteration.
9194  lastOffset = suballoc.offset + suballoc.size;
9195  --nextAlloc2ndIndex;
9196  }
9197  // We are at the end.
9198  else
9199  {
9200  // There is free space from lastOffset to size.
9201  if(lastOffset < size)
9202  {
9203  const VkDeviceSize unusedRangeSize = size - lastOffset;
9204  ++outInfo.unusedRangeCount;
9205  outInfo.unusedBytes += unusedRangeSize;
9206  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9207  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9208  }
9209 
9210  // End of loop.
9211  lastOffset = size;
9212  }
9213  }
9214  }
9215 
9216  outInfo.unusedBytes = size - outInfo.usedBytes;
9217 }
9218 
9219 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9220 {
9221  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9222  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9223  const VkDeviceSize size = GetSize();
9224  const size_t suballoc1stCount = suballocations1st.size();
9225  const size_t suballoc2ndCount = suballocations2nd.size();
9226 
9227  inoutStats.size += size;
9228 
9229  VkDeviceSize lastOffset = 0;
9230 
9231  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9232  {
9233  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9234  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9235  while(lastOffset < freeSpace2ndTo1stEnd)
9236  {
9237  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9238  while(nextAlloc2ndIndex < suballoc2ndCount &&
9239  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9240  {
9241  ++nextAlloc2ndIndex;
9242  }
9243 
9244  // Found non-null allocation.
9245  if(nextAlloc2ndIndex < suballoc2ndCount)
9246  {
9247  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9248 
9249  // 1. Process free space before this allocation.
9250  if(lastOffset < suballoc.offset)
9251  {
9252  // There is free space from lastOffset to suballoc.offset.
9253  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9254  inoutStats.unusedSize += unusedRangeSize;
9255  ++inoutStats.unusedRangeCount;
9256  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9257  }
9258 
9259  // 2. Process this allocation.
9260  // There is allocation with suballoc.offset, suballoc.size.
9261  ++inoutStats.allocationCount;
9262 
9263  // 3. Prepare for next iteration.
9264  lastOffset = suballoc.offset + suballoc.size;
9265  ++nextAlloc2ndIndex;
9266  }
9267  // We are at the end.
9268  else
9269  {
9270  if(lastOffset < freeSpace2ndTo1stEnd)
9271  {
9272  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9273  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9274  inoutStats.unusedSize += unusedRangeSize;
9275  ++inoutStats.unusedRangeCount;
9276  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9277  }
9278 
9279  // End of loop.
9280  lastOffset = freeSpace2ndTo1stEnd;
9281  }
9282  }
9283  }
9284 
9285  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9286  const VkDeviceSize freeSpace1stTo2ndEnd =
9287  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9288  while(lastOffset < freeSpace1stTo2ndEnd)
9289  {
9290  // Find next non-null allocation or move nextAllocIndex to the end.
9291  while(nextAlloc1stIndex < suballoc1stCount &&
9292  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9293  {
9294  ++nextAlloc1stIndex;
9295  }
9296 
9297  // Found non-null allocation.
9298  if(nextAlloc1stIndex < suballoc1stCount)
9299  {
9300  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9301 
9302  // 1. Process free space before this allocation.
9303  if(lastOffset < suballoc.offset)
9304  {
9305  // There is free space from lastOffset to suballoc.offset.
9306  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9307  inoutStats.unusedSize += unusedRangeSize;
9308  ++inoutStats.unusedRangeCount;
9309  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9310  }
9311 
9312  // 2. Process this allocation.
9313  // There is allocation with suballoc.offset, suballoc.size.
9314  ++inoutStats.allocationCount;
9315 
9316  // 3. Prepare for next iteration.
9317  lastOffset = suballoc.offset + suballoc.size;
9318  ++nextAlloc1stIndex;
9319  }
9320  // We are at the end.
9321  else
9322  {
9323  if(lastOffset < freeSpace1stTo2ndEnd)
9324  {
9325  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9326  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9327  inoutStats.unusedSize += unusedRangeSize;
9328  ++inoutStats.unusedRangeCount;
9329  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9330  }
9331 
9332  // End of loop.
9333  lastOffset = freeSpace1stTo2ndEnd;
9334  }
9335  }
9336 
9337  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9338  {
9339  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9340  while(lastOffset < size)
9341  {
9342  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9343  while(nextAlloc2ndIndex != SIZE_MAX &&
9344  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9345  {
9346  --nextAlloc2ndIndex;
9347  }
9348 
9349  // Found non-null allocation.
9350  if(nextAlloc2ndIndex != SIZE_MAX)
9351  {
9352  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9353 
9354  // 1. Process free space before this allocation.
9355  if(lastOffset < suballoc.offset)
9356  {
9357  // There is free space from lastOffset to suballoc.offset.
9358  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9359  inoutStats.unusedSize += unusedRangeSize;
9360  ++inoutStats.unusedRangeCount;
9361  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9362  }
9363 
9364  // 2. Process this allocation.
9365  // There is allocation with suballoc.offset, suballoc.size.
9366  ++inoutStats.allocationCount;
9367 
9368  // 3. Prepare for next iteration.
9369  lastOffset = suballoc.offset + suballoc.size;
9370  --nextAlloc2ndIndex;
9371  }
9372  // We are at the end.
9373  else
9374  {
9375  if(lastOffset < size)
9376  {
9377  // There is free space from lastOffset to size.
9378  const VkDeviceSize unusedRangeSize = size - lastOffset;
9379  inoutStats.unusedSize += unusedRangeSize;
9380  ++inoutStats.unusedRangeCount;
9381  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9382  }
9383 
9384  // End of loop.
9385  lastOffset = size;
9386  }
9387  }
9388  }
9389 }
9390 
9391 #if VMA_STATS_STRING_ENABLED
9392 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9393 {
9394  const VkDeviceSize size = GetSize();
9395  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9396  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9397  const size_t suballoc1stCount = suballocations1st.size();
9398  const size_t suballoc2ndCount = suballocations2nd.size();
9399 
9400  // FIRST PASS
9401 
9402  size_t unusedRangeCount = 0;
9403  VkDeviceSize usedBytes = 0;
9404 
9405  VkDeviceSize lastOffset = 0;
9406 
9407  size_t alloc2ndCount = 0;
9408  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9409  {
9410  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9411  size_t nextAlloc2ndIndex = 0;
9412  while(lastOffset < freeSpace2ndTo1stEnd)
9413  {
9414  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9415  while(nextAlloc2ndIndex < suballoc2ndCount &&
9416  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9417  {
9418  ++nextAlloc2ndIndex;
9419  }
9420 
9421  // Found non-null allocation.
9422  if(nextAlloc2ndIndex < suballoc2ndCount)
9423  {
9424  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9425 
9426  // 1. Process free space before this allocation.
9427  if(lastOffset < suballoc.offset)
9428  {
9429  // There is free space from lastOffset to suballoc.offset.
9430  ++unusedRangeCount;
9431  }
9432 
9433  // 2. Process this allocation.
9434  // There is allocation with suballoc.offset, suballoc.size.
9435  ++alloc2ndCount;
9436  usedBytes += suballoc.size;
9437 
9438  // 3. Prepare for next iteration.
9439  lastOffset = suballoc.offset + suballoc.size;
9440  ++nextAlloc2ndIndex;
9441  }
9442  // We are at the end.
9443  else
9444  {
9445  if(lastOffset < freeSpace2ndTo1stEnd)
9446  {
9447  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9448  ++unusedRangeCount;
9449  }
9450 
9451  // End of loop.
9452  lastOffset = freeSpace2ndTo1stEnd;
9453  }
9454  }
9455  }
9456 
9457  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9458  size_t alloc1stCount = 0;
9459  const VkDeviceSize freeSpace1stTo2ndEnd =
9460  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9461  while(lastOffset < freeSpace1stTo2ndEnd)
9462  {
9463  // Find next non-null allocation or move nextAllocIndex to the end.
9464  while(nextAlloc1stIndex < suballoc1stCount &&
9465  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9466  {
9467  ++nextAlloc1stIndex;
9468  }
9469 
9470  // Found non-null allocation.
9471  if(nextAlloc1stIndex < suballoc1stCount)
9472  {
9473  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9474 
9475  // 1. Process free space before this allocation.
9476  if(lastOffset < suballoc.offset)
9477  {
9478  // There is free space from lastOffset to suballoc.offset.
9479  ++unusedRangeCount;
9480  }
9481 
9482  // 2. Process this allocation.
9483  // There is allocation with suballoc.offset, suballoc.size.
9484  ++alloc1stCount;
9485  usedBytes += suballoc.size;
9486 
9487  // 3. Prepare for next iteration.
9488  lastOffset = suballoc.offset + suballoc.size;
9489  ++nextAlloc1stIndex;
9490  }
9491  // We are at the end.
9492  else
9493  {
9494  if(lastOffset < size)
9495  {
9496  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9497  ++unusedRangeCount;
9498  }
9499 
9500  // End of loop.
9501  lastOffset = freeSpace1stTo2ndEnd;
9502  }
9503  }
9504 
9505  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9506  {
9507  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9508  while(lastOffset < size)
9509  {
9510  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9511  while(nextAlloc2ndIndex != SIZE_MAX &&
9512  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9513  {
9514  --nextAlloc2ndIndex;
9515  }
9516 
9517  // Found non-null allocation.
9518  if(nextAlloc2ndIndex != SIZE_MAX)
9519  {
9520  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9521 
9522  // 1. Process free space before this allocation.
9523  if(lastOffset < suballoc.offset)
9524  {
9525  // There is free space from lastOffset to suballoc.offset.
9526  ++unusedRangeCount;
9527  }
9528 
9529  // 2. Process this allocation.
9530  // There is allocation with suballoc.offset, suballoc.size.
9531  ++alloc2ndCount;
9532  usedBytes += suballoc.size;
9533 
9534  // 3. Prepare for next iteration.
9535  lastOffset = suballoc.offset + suballoc.size;
9536  --nextAlloc2ndIndex;
9537  }
9538  // We are at the end.
9539  else
9540  {
9541  if(lastOffset < size)
9542  {
9543  // There is free space from lastOffset to size.
9544  ++unusedRangeCount;
9545  }
9546 
9547  // End of loop.
9548  lastOffset = size;
9549  }
9550  }
9551  }
9552 
9553  const VkDeviceSize unusedBytes = size - usedBytes;
9554  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9555 
9556  // SECOND PASS
9557  lastOffset = 0;
9558 
9559  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9560  {
9561  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9562  size_t nextAlloc2ndIndex = 0;
9563  while(lastOffset < freeSpace2ndTo1stEnd)
9564  {
9565  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9566  while(nextAlloc2ndIndex < suballoc2ndCount &&
9567  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9568  {
9569  ++nextAlloc2ndIndex;
9570  }
9571 
9572  // Found non-null allocation.
9573  if(nextAlloc2ndIndex < suballoc2ndCount)
9574  {
9575  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9576 
9577  // 1. Process free space before this allocation.
9578  if(lastOffset < suballoc.offset)
9579  {
9580  // There is free space from lastOffset to suballoc.offset.
9581  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9582  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9583  }
9584 
9585  // 2. Process this allocation.
9586  // There is allocation with suballoc.offset, suballoc.size.
9587  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9588 
9589  // 3. Prepare for next iteration.
9590  lastOffset = suballoc.offset + suballoc.size;
9591  ++nextAlloc2ndIndex;
9592  }
9593  // We are at the end.
9594  else
9595  {
9596  if(lastOffset < freeSpace2ndTo1stEnd)
9597  {
9598  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9599  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9600  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9601  }
9602 
9603  // End of loop.
9604  lastOffset = freeSpace2ndTo1stEnd;
9605  }
9606  }
9607  }
9608 
9609  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9610  while(lastOffset < freeSpace1stTo2ndEnd)
9611  {
9612  // Find next non-null allocation or move nextAllocIndex to the end.
9613  while(nextAlloc1stIndex < suballoc1stCount &&
9614  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9615  {
9616  ++nextAlloc1stIndex;
9617  }
9618 
9619  // Found non-null allocation.
9620  if(nextAlloc1stIndex < suballoc1stCount)
9621  {
9622  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9623 
9624  // 1. Process free space before this allocation.
9625  if(lastOffset < suballoc.offset)
9626  {
9627  // There is free space from lastOffset to suballoc.offset.
9628  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9629  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9630  }
9631 
9632  // 2. Process this allocation.
9633  // There is allocation with suballoc.offset, suballoc.size.
9634  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9635 
9636  // 3. Prepare for next iteration.
9637  lastOffset = suballoc.offset + suballoc.size;
9638  ++nextAlloc1stIndex;
9639  }
9640  // We are at the end.
9641  else
9642  {
9643  if(lastOffset < freeSpace1stTo2ndEnd)
9644  {
9645  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9646  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9647  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9648  }
9649 
9650  // End of loop.
9651  lastOffset = freeSpace1stTo2ndEnd;
9652  }
9653  }
9654 
9655  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9656  {
9657  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9658  while(lastOffset < size)
9659  {
9660  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9661  while(nextAlloc2ndIndex != SIZE_MAX &&
9662  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9663  {
9664  --nextAlloc2ndIndex;
9665  }
9666 
9667  // Found non-null allocation.
9668  if(nextAlloc2ndIndex != SIZE_MAX)
9669  {
9670  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9671 
9672  // 1. Process free space before this allocation.
9673  if(lastOffset < suballoc.offset)
9674  {
9675  // There is free space from lastOffset to suballoc.offset.
9676  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9677  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9678  }
9679 
9680  // 2. Process this allocation.
9681  // There is allocation with suballoc.offset, suballoc.size.
9682  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9683 
9684  // 3. Prepare for next iteration.
9685  lastOffset = suballoc.offset + suballoc.size;
9686  --nextAlloc2ndIndex;
9687  }
9688  // We are at the end.
9689  else
9690  {
9691  if(lastOffset < size)
9692  {
9693  // There is free space from lastOffset to size.
9694  const VkDeviceSize unusedRangeSize = size - lastOffset;
9695  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9696  }
9697 
9698  // End of loop.
9699  lastOffset = size;
9700  }
9701  }
9702  }
9703 
9704  PrintDetailedMap_End(json);
9705 }
9706 #endif // #if VMA_STATS_STRING_ENABLED
9707 
9708 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9709  uint32_t currentFrameIndex,
9710  uint32_t frameInUseCount,
9711  VkDeviceSize bufferImageGranularity,
9712  VkDeviceSize allocSize,
9713  VkDeviceSize allocAlignment,
9714  bool upperAddress,
9715  VmaSuballocationType allocType,
9716  bool canMakeOtherLost,
9717  uint32_t strategy,
9718  VmaAllocationRequest* pAllocationRequest)
9719 {
9720  VMA_ASSERT(allocSize > 0);
9721  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9722  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9723  VMA_HEAVY_ASSERT(Validate());
9724  return upperAddress ?
9725  CreateAllocationRequest_UpperAddress(
9726  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9727  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
9728  CreateAllocationRequest_LowerAddress(
9729  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9730  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
9731 }
9732 
9733 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
9734  uint32_t currentFrameIndex,
9735  uint32_t frameInUseCount,
9736  VkDeviceSize bufferImageGranularity,
9737  VkDeviceSize allocSize,
9738  VkDeviceSize allocAlignment,
9739  VmaSuballocationType allocType,
9740  bool canMakeOtherLost,
9741  uint32_t strategy,
9742  VmaAllocationRequest* pAllocationRequest)
9743 {
9744  const VkDeviceSize size = GetSize();
9745  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9746  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9747 
9748  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9749  {
9750  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9751  return false;
9752  }
9753 
9754  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9755  if(allocSize > size)
9756  {
9757  return false;
9758  }
9759  VkDeviceSize resultBaseOffset = size - allocSize;
9760  if(!suballocations2nd.empty())
9761  {
9762  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9763  resultBaseOffset = lastSuballoc.offset - allocSize;
9764  if(allocSize > lastSuballoc.offset)
9765  {
9766  return false;
9767  }
9768  }
9769 
9770  // Start from offset equal to end of free space.
9771  VkDeviceSize resultOffset = resultBaseOffset;
9772 
9773  // Apply VMA_DEBUG_MARGIN at the end.
9774  if(VMA_DEBUG_MARGIN > 0)
9775  {
9776  if(resultOffset < VMA_DEBUG_MARGIN)
9777  {
9778  return false;
9779  }
9780  resultOffset -= VMA_DEBUG_MARGIN;
9781  }
9782 
9783  // Apply alignment.
9784  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9785 
9786  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9787  // Make bigger alignment if necessary.
9788  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9789  {
9790  bool bufferImageGranularityConflict = false;
9791  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9792  {
9793  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9794  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9795  {
9796  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9797  {
9798  bufferImageGranularityConflict = true;
9799  break;
9800  }
9801  }
9802  else
9803  // Already on previous page.
9804  break;
9805  }
9806  if(bufferImageGranularityConflict)
9807  {
9808  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9809  }
9810  }
9811 
9812  // There is enough free space.
9813  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9814  suballocations1st.back().offset + suballocations1st.back().size :
9815  0;
9816  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9817  {
9818  // Check previous suballocations for BufferImageGranularity conflicts.
9819  // If conflict exists, allocation cannot be made here.
9820  if(bufferImageGranularity > 1)
9821  {
9822  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9823  {
9824  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9825  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9826  {
9827  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9828  {
9829  return false;
9830  }
9831  }
9832  else
9833  {
9834  // Already on next page.
9835  break;
9836  }
9837  }
9838  }
9839 
9840  // All tests passed: Success.
9841  pAllocationRequest->offset = resultOffset;
9842  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9843  pAllocationRequest->sumItemSize = 0;
9844  // pAllocationRequest->item unused.
9845  pAllocationRequest->itemsToMakeLostCount = 0;
9846  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
9847  return true;
9848  }
9849 
9850  return false;
9851 }
9852 
9853 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
9854  uint32_t currentFrameIndex,
9855  uint32_t frameInUseCount,
9856  VkDeviceSize bufferImageGranularity,
9857  VkDeviceSize allocSize,
9858  VkDeviceSize allocAlignment,
9859  VmaSuballocationType allocType,
9860  bool canMakeOtherLost,
9861  uint32_t strategy,
9862  VmaAllocationRequest* pAllocationRequest)
9863 {
9864  const VkDeviceSize size = GetSize();
9865  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9866  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9867 
9868  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9869  {
9870  // Try to allocate at the end of 1st vector.
9871 
9872  VkDeviceSize resultBaseOffset = 0;
9873  if(!suballocations1st.empty())
9874  {
9875  const VmaSuballocation& lastSuballoc = suballocations1st.back();
9876  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9877  }
9878 
9879  // Start from offset equal to beginning of free space.
9880  VkDeviceSize resultOffset = resultBaseOffset;
9881 
9882  // Apply VMA_DEBUG_MARGIN at the beginning.
9883  if(VMA_DEBUG_MARGIN > 0)
9884  {
9885  resultOffset += VMA_DEBUG_MARGIN;
9886  }
9887 
9888  // Apply alignment.
9889  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9890 
9891  // Check previous suballocations for BufferImageGranularity conflicts.
9892  // Make bigger alignment if necessary.
9893  if(bufferImageGranularity > 1 && !suballocations1st.empty())
9894  {
9895  bool bufferImageGranularityConflict = false;
9896  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9897  {
9898  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9899  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9900  {
9901  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9902  {
9903  bufferImageGranularityConflict = true;
9904  break;
9905  }
9906  }
9907  else
9908  // Already on previous page.
9909  break;
9910  }
9911  if(bufferImageGranularityConflict)
9912  {
9913  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9914  }
9915  }
9916 
9917  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9918  suballocations2nd.back().offset : size;
9919 
9920  // There is enough free space at the end after alignment.
9921  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9922  {
9923  // Check next suballocations for BufferImageGranularity conflicts.
9924  // If conflict exists, allocation cannot be made here.
9925  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9926  {
9927  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9928  {
9929  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9930  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9931  {
9932  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9933  {
9934  return false;
9935  }
9936  }
9937  else
9938  {
9939  // Already on previous page.
9940  break;
9941  }
9942  }
9943  }
9944 
9945  // All tests passed: Success.
9946  pAllocationRequest->offset = resultOffset;
9947  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
9948  pAllocationRequest->sumItemSize = 0;
9949  // pAllocationRequest->item, customData unused.
9950  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
9951  pAllocationRequest->itemsToMakeLostCount = 0;
9952  return true;
9953  }
9954  }
9955 
9956  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9957  // beginning of 1st vector as the end of free space.
9958  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9959  {
9960  VMA_ASSERT(!suballocations1st.empty());
9961 
9962  VkDeviceSize resultBaseOffset = 0;
9963  if(!suballocations2nd.empty())
9964  {
9965  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9966  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9967  }
9968 
9969  // Start from offset equal to beginning of free space.
9970  VkDeviceSize resultOffset = resultBaseOffset;
9971 
9972  // Apply VMA_DEBUG_MARGIN at the beginning.
9973  if(VMA_DEBUG_MARGIN > 0)
9974  {
9975  resultOffset += VMA_DEBUG_MARGIN;
9976  }
9977 
9978  // Apply alignment.
9979  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9980 
9981  // Check previous suballocations for BufferImageGranularity conflicts.
9982  // Make bigger alignment if necessary.
9983  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9984  {
9985  bool bufferImageGranularityConflict = false;
9986  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
9987  {
9988  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
9989  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9990  {
9991  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9992  {
9993  bufferImageGranularityConflict = true;
9994  break;
9995  }
9996  }
9997  else
9998  // Already on previous page.
9999  break;
10000  }
10001  if(bufferImageGranularityConflict)
10002  {
10003  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10004  }
10005  }
10006 
10007  pAllocationRequest->itemsToMakeLostCount = 0;
10008  pAllocationRequest->sumItemSize = 0;
10009  size_t index1st = m_1stNullItemsBeginCount;
10010 
10011  if(canMakeOtherLost)
10012  {
10013  while(index1st < suballocations1st.size() &&
10014  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
10015  {
10016  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
10017  const VmaSuballocation& suballoc = suballocations1st[index1st];
10018  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
10019  {
10020  // No problem.
10021  }
10022  else
10023  {
10024  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10025  if(suballoc.hAllocation->CanBecomeLost() &&
10026  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10027  {
10028  ++pAllocationRequest->itemsToMakeLostCount;
10029  pAllocationRequest->sumItemSize += suballoc.size;
10030  }
10031  else
10032  {
10033  return false;
10034  }
10035  }
10036  ++index1st;
10037  }
10038 
10039  // Check next suballocations for BufferImageGranularity conflicts.
10040  // If conflict exists, we must mark more allocations lost or fail.
10041  if(bufferImageGranularity > 1)
10042  {
10043  while(index1st < suballocations1st.size())
10044  {
10045  const VmaSuballocation& suballoc = suballocations1st[index1st];
10046  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
10047  {
10048  if(suballoc.hAllocation != VK_NULL_HANDLE)
10049  {
10050  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
10051  if(suballoc.hAllocation->CanBecomeLost() &&
10052  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10053  {
10054  ++pAllocationRequest->itemsToMakeLostCount;
10055  pAllocationRequest->sumItemSize += suballoc.size;
10056  }
10057  else
10058  {
10059  return false;
10060  }
10061  }
10062  }
10063  else
10064  {
10065  // Already on next page.
10066  break;
10067  }
10068  ++index1st;
10069  }
10070  }
10071 
10072  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
10073  if(index1st == suballocations1st.size() &&
10074  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
10075  {
10076  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
10077  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
10078  }
10079  }
10080 
10081  // There is enough free space at the end after alignment.
10082  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
10083  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
10084  {
10085  // Check next suballocations for BufferImageGranularity conflicts.
10086  // If conflict exists, allocation cannot be made here.
10087  if(bufferImageGranularity > 1)
10088  {
10089  for(size_t nextSuballocIndex = index1st;
10090  nextSuballocIndex < suballocations1st.size();
10091  nextSuballocIndex++)
10092  {
10093  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
10094  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10095  {
10096  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10097  {
10098  return false;
10099  }
10100  }
10101  else
10102  {
10103  // Already on next page.
10104  break;
10105  }
10106  }
10107  }
10108 
10109  // All tests passed: Success.
10110  pAllocationRequest->offset = resultOffset;
10111  pAllocationRequest->sumFreeSize =
10112  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
10113  - resultBaseOffset
10114  - pAllocationRequest->sumItemSize;
10115  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
10116  // pAllocationRequest->item, customData unused.
10117  return true;
10118  }
10119  }
10120 
10121  return false;
10122 }
10123 
10124 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10125  uint32_t currentFrameIndex,
10126  uint32_t frameInUseCount,
10127  VmaAllocationRequest* pAllocationRequest)
10128 {
10129  if(pAllocationRequest->itemsToMakeLostCount == 0)
10130  {
10131  return true;
10132  }
10133 
10134  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10135 
10136  // We always start from 1st.
10137  SuballocationVectorType* suballocations = &AccessSuballocations1st();
10138  size_t index = m_1stNullItemsBeginCount;
10139  size_t madeLostCount = 0;
10140  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10141  {
10142  if(index == suballocations->size())
10143  {
10144  index = 0;
10145  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
10146  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10147  {
10148  suballocations = &AccessSuballocations2nd();
10149  }
10150  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
10151  // suballocations continues pointing at AccessSuballocations1st().
10152  VMA_ASSERT(!suballocations->empty());
10153  }
10154  VmaSuballocation& suballoc = (*suballocations)[index];
10155  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10156  {
10157  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10158  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10159  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10160  {
10161  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10162  suballoc.hAllocation = VK_NULL_HANDLE;
10163  m_SumFreeSize += suballoc.size;
10164  if(suballocations == &AccessSuballocations1st())
10165  {
10166  ++m_1stNullItemsMiddleCount;
10167  }
10168  else
10169  {
10170  ++m_2ndNullItemsCount;
10171  }
10172  ++madeLostCount;
10173  }
10174  else
10175  {
10176  return false;
10177  }
10178  }
10179  ++index;
10180  }
10181 
10182  CleanupAfterFree();
10183  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10184 
10185  return true;
10186 }
10187 
10188 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10189 {
10190  uint32_t lostAllocationCount = 0;
10191 
10192  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10193  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10194  {
10195  VmaSuballocation& suballoc = suballocations1st[i];
10196  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10197  suballoc.hAllocation->CanBecomeLost() &&
10198  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10199  {
10200  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10201  suballoc.hAllocation = VK_NULL_HANDLE;
10202  ++m_1stNullItemsMiddleCount;
10203  m_SumFreeSize += suballoc.size;
10204  ++lostAllocationCount;
10205  }
10206  }
10207 
10208  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10209  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10210  {
10211  VmaSuballocation& suballoc = suballocations2nd[i];
10212  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10213  suballoc.hAllocation->CanBecomeLost() &&
10214  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10215  {
10216  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10217  suballoc.hAllocation = VK_NULL_HANDLE;
10218  ++m_2ndNullItemsCount;
10219  m_SumFreeSize += suballoc.size;
10220  ++lostAllocationCount;
10221  }
10222  }
10223 
10224  if(lostAllocationCount)
10225  {
10226  CleanupAfterFree();
10227  }
10228 
10229  return lostAllocationCount;
10230 }
10231 
10232 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10233 {
10234  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10235  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10236  {
10237  const VmaSuballocation& suballoc = suballocations1st[i];
10238  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10239  {
10240  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10241  {
10242  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10243  return VK_ERROR_VALIDATION_FAILED_EXT;
10244  }
10245  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10246  {
10247  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10248  return VK_ERROR_VALIDATION_FAILED_EXT;
10249  }
10250  }
10251  }
10252 
10253  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10254  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10255  {
10256  const VmaSuballocation& suballoc = suballocations2nd[i];
10257  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10258  {
10259  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10260  {
10261  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10262  return VK_ERROR_VALIDATION_FAILED_EXT;
10263  }
10264  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10265  {
10266  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10267  return VK_ERROR_VALIDATION_FAILED_EXT;
10268  }
10269  }
10270  }
10271 
10272  return VK_SUCCESS;
10273 }
10274 
10275 void VmaBlockMetadata_Linear::Alloc(
10276  const VmaAllocationRequest& request,
10277  VmaSuballocationType type,
10278  VkDeviceSize allocSize,
10279  VmaAllocation hAllocation)
10280 {
10281  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10282 
10283  switch(request.type)
10284  {
10285  case VmaAllocationRequestType::UpperAddress:
10286  {
10287  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10288  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10289  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10290  suballocations2nd.push_back(newSuballoc);
10291  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10292  }
10293  break;
10294  case VmaAllocationRequestType::EndOf1st:
10295  {
10296  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10297 
10298  VMA_ASSERT(suballocations1st.empty() ||
10299  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
10300  // Check if it fits before the end of the block.
10301  VMA_ASSERT(request.offset + allocSize <= GetSize());
10302 
10303  suballocations1st.push_back(newSuballoc);
10304  }
10305  break;
10306  case VmaAllocationRequestType::EndOf2nd:
10307  {
10308  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10309  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10310  VMA_ASSERT(!suballocations1st.empty() &&
10311  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
10312  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10313 
10314  switch(m_2ndVectorMode)
10315  {
10316  case SECOND_VECTOR_EMPTY:
10317  // First allocation from second part ring buffer.
10318  VMA_ASSERT(suballocations2nd.empty());
10319  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10320  break;
10321  case SECOND_VECTOR_RING_BUFFER:
10322  // 2-part ring buffer is already started.
10323  VMA_ASSERT(!suballocations2nd.empty());
10324  break;
10325  case SECOND_VECTOR_DOUBLE_STACK:
10326  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10327  break;
10328  default:
10329  VMA_ASSERT(0);
10330  }
10331 
10332  suballocations2nd.push_back(newSuballoc);
10333  }
10334  break;
10335  default:
10336  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10337  }
10338 
10339  m_SumFreeSize -= newSuballoc.size;
10340 }
10341 
10342 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10343 {
10344  FreeAtOffset(allocation->GetOffset());
10345 }
10346 
10347 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10348 {
10349  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10350  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10351 
10352  if(!suballocations1st.empty())
10353  {
10354  // First allocation: Mark it as next empty at the beginning.
10355  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10356  if(firstSuballoc.offset == offset)
10357  {
10358  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10359  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10360  m_SumFreeSize += firstSuballoc.size;
10361  ++m_1stNullItemsBeginCount;
10362  CleanupAfterFree();
10363  return;
10364  }
10365  }
10366 
10367  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10368  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10369  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10370  {
10371  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10372  if(lastSuballoc.offset == offset)
10373  {
10374  m_SumFreeSize += lastSuballoc.size;
10375  suballocations2nd.pop_back();
10376  CleanupAfterFree();
10377  return;
10378  }
10379  }
10380  // Last allocation in 1st vector.
10381  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10382  {
10383  VmaSuballocation& lastSuballoc = suballocations1st.back();
10384  if(lastSuballoc.offset == offset)
10385  {
10386  m_SumFreeSize += lastSuballoc.size;
10387  suballocations1st.pop_back();
10388  CleanupAfterFree();
10389  return;
10390  }
10391  }
10392 
10393  // Item from the middle of 1st vector.
10394  {
10395  VmaSuballocation refSuballoc;
10396  refSuballoc.offset = offset;
10397  // Rest of members stays uninitialized intentionally for better performance.
10398  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
10399  suballocations1st.begin() + m_1stNullItemsBeginCount,
10400  suballocations1st.end(),
10401  refSuballoc);
10402  if(it != suballocations1st.end())
10403  {
10404  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10405  it->hAllocation = VK_NULL_HANDLE;
10406  ++m_1stNullItemsMiddleCount;
10407  m_SumFreeSize += it->size;
10408  CleanupAfterFree();
10409  return;
10410  }
10411  }
10412 
10413  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10414  {
10415  // Item from the middle of 2nd vector.
10416  VmaSuballocation refSuballoc;
10417  refSuballoc.offset = offset;
10418  // Rest of members stays uninitialized intentionally for better performance.
10419  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10420  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
10421  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
10422  if(it != suballocations2nd.end())
10423  {
10424  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10425  it->hAllocation = VK_NULL_HANDLE;
10426  ++m_2ndNullItemsCount;
10427  m_SumFreeSize += it->size;
10428  CleanupAfterFree();
10429  return;
10430  }
10431  }
10432 
10433  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10434 }
10435 
10436 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10437 {
10438  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10439  const size_t suballocCount = AccessSuballocations1st().size();
10440  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10441 }
10442 
10443 void VmaBlockMetadata_Linear::CleanupAfterFree()
10444 {
10445  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10446  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10447 
10448  if(IsEmpty())
10449  {
10450  suballocations1st.clear();
10451  suballocations2nd.clear();
10452  m_1stNullItemsBeginCount = 0;
10453  m_1stNullItemsMiddleCount = 0;
10454  m_2ndNullItemsCount = 0;
10455  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10456  }
10457  else
10458  {
10459  const size_t suballoc1stCount = suballocations1st.size();
10460  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10461  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10462 
10463  // Find more null items at the beginning of 1st vector.
10464  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10465  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10466  {
10467  ++m_1stNullItemsBeginCount;
10468  --m_1stNullItemsMiddleCount;
10469  }
10470 
10471  // Find more null items at the end of 1st vector.
10472  while(m_1stNullItemsMiddleCount > 0 &&
10473  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10474  {
10475  --m_1stNullItemsMiddleCount;
10476  suballocations1st.pop_back();
10477  }
10478 
10479  // Find more null items at the end of 2nd vector.
10480  while(m_2ndNullItemsCount > 0 &&
10481  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10482  {
10483  --m_2ndNullItemsCount;
10484  suballocations2nd.pop_back();
10485  }
10486 
10487  // Find more null items at the beginning of 2nd vector.
10488  while(m_2ndNullItemsCount > 0 &&
10489  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
10490  {
10491  --m_2ndNullItemsCount;
10492  suballocations2nd.remove(0);
10493  }
10494 
10495  if(ShouldCompact1st())
10496  {
10497  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10498  size_t srcIndex = m_1stNullItemsBeginCount;
10499  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10500  {
10501  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10502  {
10503  ++srcIndex;
10504  }
10505  if(dstIndex != srcIndex)
10506  {
10507  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10508  }
10509  ++srcIndex;
10510  }
10511  suballocations1st.resize(nonNullItemCount);
10512  m_1stNullItemsBeginCount = 0;
10513  m_1stNullItemsMiddleCount = 0;
10514  }
10515 
10516  // 2nd vector became empty.
10517  if(suballocations2nd.empty())
10518  {
10519  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10520  }
10521 
10522  // 1st vector became empty.
10523  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10524  {
10525  suballocations1st.clear();
10526  m_1stNullItemsBeginCount = 0;
10527 
10528  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10529  {
10530  // Swap 1st with 2nd. Now 2nd is empty.
10531  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10532  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10533  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10534  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10535  {
10536  ++m_1stNullItemsBeginCount;
10537  --m_1stNullItemsMiddleCount;
10538  }
10539  m_2ndNullItemsCount = 0;
10540  m_1stVectorIndex ^= 1;
10541  }
10542  }
10543  }
10544 
10545  VMA_HEAVY_ASSERT(Validate());
10546 }
10547 
10548 
10550 // class VmaBlockMetadata_Buddy
10551 
10552 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10553  VmaBlockMetadata(hAllocator),
10554  m_Root(VMA_NULL),
10555  m_AllocationCount(0),
10556  m_FreeCount(1),
10557  m_SumFreeSize(0)
10558 {
10559  memset(m_FreeList, 0, sizeof(m_FreeList));
10560 }
10561 
10562 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10563 {
10564  DeleteNode(m_Root);
10565 }
10566 
10567 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10568 {
10569  VmaBlockMetadata::Init(size);
10570 
10571  m_UsableSize = VmaPrevPow2(size);
10572  m_SumFreeSize = m_UsableSize;
10573 
10574  // Calculate m_LevelCount.
10575  m_LevelCount = 1;
10576  while(m_LevelCount < MAX_LEVELS &&
10577  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10578  {
10579  ++m_LevelCount;
10580  }
10581 
10582  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10583  rootNode->offset = 0;
10584  rootNode->type = Node::TYPE_FREE;
10585  rootNode->parent = VMA_NULL;
10586  rootNode->buddy = VMA_NULL;
10587 
10588  m_Root = rootNode;
10589  AddToFreeListFront(0, rootNode);
10590 }
10591 
10592 bool VmaBlockMetadata_Buddy::Validate() const
10593 {
10594  // Validate tree.
10595  ValidationContext ctx;
10596  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10597  {
10598  VMA_VALIDATE(false && "ValidateNode failed.");
10599  }
10600  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10601  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10602 
10603  // Validate free node lists.
10604  for(uint32_t level = 0; level < m_LevelCount; ++level)
10605  {
10606  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10607  m_FreeList[level].front->free.prev == VMA_NULL);
10608 
10609  for(Node* node = m_FreeList[level].front;
10610  node != VMA_NULL;
10611  node = node->free.next)
10612  {
10613  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10614 
10615  if(node->free.next == VMA_NULL)
10616  {
10617  VMA_VALIDATE(m_FreeList[level].back == node);
10618  }
10619  else
10620  {
10621  VMA_VALIDATE(node->free.next->free.prev == node);
10622  }
10623  }
10624  }
10625 
10626  // Validate that free lists ar higher levels are empty.
10627  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10628  {
10629  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10630  }
10631 
10632  return true;
10633 }
10634 
10635 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10636 {
10637  for(uint32_t level = 0; level < m_LevelCount; ++level)
10638  {
10639  if(m_FreeList[level].front != VMA_NULL)
10640  {
10641  return LevelToNodeSize(level);
10642  }
10643  }
10644  return 0;
10645 }
10646 
10647 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10648 {
10649  const VkDeviceSize unusableSize = GetUnusableSize();
10650 
10651  outInfo.blockCount = 1;
10652 
10653  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10654  outInfo.usedBytes = outInfo.unusedBytes = 0;
10655 
10656  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10657  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10658  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10659 
10660  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10661 
10662  if(unusableSize > 0)
10663  {
10664  ++outInfo.unusedRangeCount;
10665  outInfo.unusedBytes += unusableSize;
10666  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10667  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10668  }
10669 }
10670 
10671 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10672 {
10673  const VkDeviceSize unusableSize = GetUnusableSize();
10674 
10675  inoutStats.size += GetSize();
10676  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10677  inoutStats.allocationCount += m_AllocationCount;
10678  inoutStats.unusedRangeCount += m_FreeCount;
10679  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10680 
10681  if(unusableSize > 0)
10682  {
10683  ++inoutStats.unusedRangeCount;
10684  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10685  }
10686 }
10687 
10688 #if VMA_STATS_STRING_ENABLED
10689 
10690 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10691 {
10692  // TODO optimize
10693  VmaStatInfo stat;
10694  CalcAllocationStatInfo(stat);
10695 
10696  PrintDetailedMap_Begin(
10697  json,
10698  stat.unusedBytes,
10699  stat.allocationCount,
10700  stat.unusedRangeCount);
10701 
10702  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10703 
10704  const VkDeviceSize unusableSize = GetUnusableSize();
10705  if(unusableSize > 0)
10706  {
10707  PrintDetailedMap_UnusedRange(json,
10708  m_UsableSize, // offset
10709  unusableSize); // size
10710  }
10711 
10712  PrintDetailedMap_End(json);
10713 }
10714 
10715 #endif // #if VMA_STATS_STRING_ENABLED
10716 
10717 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10718  uint32_t currentFrameIndex,
10719  uint32_t frameInUseCount,
10720  VkDeviceSize bufferImageGranularity,
10721  VkDeviceSize allocSize,
10722  VkDeviceSize allocAlignment,
10723  bool upperAddress,
10724  VmaSuballocationType allocType,
10725  bool canMakeOtherLost,
10726  uint32_t strategy,
10727  VmaAllocationRequest* pAllocationRequest)
10728 {
10729  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10730 
10731  // Simple way to respect bufferImageGranularity. May be optimized some day.
10732  // Whenever it might be an OPTIMAL image...
10733  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10734  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10735  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10736  {
10737  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10738  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10739  }
10740 
10741  if(allocSize > m_UsableSize)
10742  {
10743  return false;
10744  }
10745 
10746  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10747  for(uint32_t level = targetLevel + 1; level--; )
10748  {
10749  for(Node* freeNode = m_FreeList[level].front;
10750  freeNode != VMA_NULL;
10751  freeNode = freeNode->free.next)
10752  {
10753  if(freeNode->offset % allocAlignment == 0)
10754  {
10755  pAllocationRequest->type = VmaAllocationRequestType::Normal;
10756  pAllocationRequest->offset = freeNode->offset;
10757  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10758  pAllocationRequest->sumItemSize = 0;
10759  pAllocationRequest->itemsToMakeLostCount = 0;
10760  pAllocationRequest->customData = (void*)(uintptr_t)level;
10761  return true;
10762  }
10763  }
10764  }
10765 
10766  return false;
10767 }
10768 
10769 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10770  uint32_t currentFrameIndex,
10771  uint32_t frameInUseCount,
10772  VmaAllocationRequest* pAllocationRequest)
10773 {
10774  /*
10775  Lost allocations are not supported in buddy allocator at the moment.
10776  Support might be added in the future.
10777  */
10778  return pAllocationRequest->itemsToMakeLostCount == 0;
10779 }
10780 
10781 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10782 {
10783  /*
10784  Lost allocations are not supported in buddy allocator at the moment.
10785  Support might be added in the future.
10786  */
10787  return 0;
10788 }
10789 
10790 void VmaBlockMetadata_Buddy::Alloc(
10791  const VmaAllocationRequest& request,
10792  VmaSuballocationType type,
10793  VkDeviceSize allocSize,
10794  VmaAllocation hAllocation)
10795 {
10796  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
10797 
10798  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10799  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10800 
10801  Node* currNode = m_FreeList[currLevel].front;
10802  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10803  while(currNode->offset != request.offset)
10804  {
10805  currNode = currNode->free.next;
10806  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10807  }
10808 
10809  // Go down, splitting free nodes.
10810  while(currLevel < targetLevel)
10811  {
10812  // currNode is already first free node at currLevel.
10813  // Remove it from list of free nodes at this currLevel.
10814  RemoveFromFreeList(currLevel, currNode);
10815 
10816  const uint32_t childrenLevel = currLevel + 1;
10817 
10818  // Create two free sub-nodes.
10819  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10820  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10821 
10822  leftChild->offset = currNode->offset;
10823  leftChild->type = Node::TYPE_FREE;
10824  leftChild->parent = currNode;
10825  leftChild->buddy = rightChild;
10826 
10827  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10828  rightChild->type = Node::TYPE_FREE;
10829  rightChild->parent = currNode;
10830  rightChild->buddy = leftChild;
10831 
10832  // Convert current currNode to split type.
10833  currNode->type = Node::TYPE_SPLIT;
10834  currNode->split.leftChild = leftChild;
10835 
10836  // Add child nodes to free list. Order is important!
10837  AddToFreeListFront(childrenLevel, rightChild);
10838  AddToFreeListFront(childrenLevel, leftChild);
10839 
10840  ++m_FreeCount;
10841  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
10842  ++currLevel;
10843  currNode = m_FreeList[currLevel].front;
10844 
10845  /*
10846  We can be sure that currNode, as left child of node previously split,
10847  also fullfills the alignment requirement.
10848  */
10849  }
10850 
10851  // Remove from free list.
10852  VMA_ASSERT(currLevel == targetLevel &&
10853  currNode != VMA_NULL &&
10854  currNode->type == Node::TYPE_FREE);
10855  RemoveFromFreeList(currLevel, currNode);
10856 
10857  // Convert to allocation node.
10858  currNode->type = Node::TYPE_ALLOCATION;
10859  currNode->allocation.alloc = hAllocation;
10860 
10861  ++m_AllocationCount;
10862  --m_FreeCount;
10863  m_SumFreeSize -= allocSize;
10864 }
10865 
10866 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10867 {
10868  if(node->type == Node::TYPE_SPLIT)
10869  {
10870  DeleteNode(node->split.leftChild->buddy);
10871  DeleteNode(node->split.leftChild);
10872  }
10873 
10874  vma_delete(GetAllocationCallbacks(), node);
10875 }
10876 
10877 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
10878 {
10879  VMA_VALIDATE(level < m_LevelCount);
10880  VMA_VALIDATE(curr->parent == parent);
10881  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10882  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10883  switch(curr->type)
10884  {
10885  case Node::TYPE_FREE:
10886  // curr->free.prev, next are validated separately.
10887  ctx.calculatedSumFreeSize += levelNodeSize;
10888  ++ctx.calculatedFreeCount;
10889  break;
10890  case Node::TYPE_ALLOCATION:
10891  ++ctx.calculatedAllocationCount;
10892  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10893  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10894  break;
10895  case Node::TYPE_SPLIT:
10896  {
10897  const uint32_t childrenLevel = level + 1;
10898  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10899  const Node* const leftChild = curr->split.leftChild;
10900  VMA_VALIDATE(leftChild != VMA_NULL);
10901  VMA_VALIDATE(leftChild->offset == curr->offset);
10902  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10903  {
10904  VMA_VALIDATE(false && "ValidateNode for left child failed.");
10905  }
10906  const Node* const rightChild = leftChild->buddy;
10907  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10908  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10909  {
10910  VMA_VALIDATE(false && "ValidateNode for right child failed.");
10911  }
10912  }
10913  break;
10914  default:
10915  return false;
10916  }
10917 
10918  return true;
10919 }
10920 
10921 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
10922 {
10923  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
10924  uint32_t level = 0;
10925  VkDeviceSize currLevelNodeSize = m_UsableSize;
10926  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10927  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10928  {
10929  ++level;
10930  currLevelNodeSize = nextLevelNodeSize;
10931  nextLevelNodeSize = currLevelNodeSize >> 1;
10932  }
10933  return level;
10934 }
10935 
10936 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
10937 {
10938  // Find node and level.
10939  Node* node = m_Root;
10940  VkDeviceSize nodeOffset = 0;
10941  uint32_t level = 0;
10942  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
10943  while(node->type == Node::TYPE_SPLIT)
10944  {
10945  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
10946  if(offset < nodeOffset + nextLevelSize)
10947  {
10948  node = node->split.leftChild;
10949  }
10950  else
10951  {
10952  node = node->split.leftChild->buddy;
10953  nodeOffset += nextLevelSize;
10954  }
10955  ++level;
10956  levelNodeSize = nextLevelSize;
10957  }
10958 
10959  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
10960  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
10961 
10962  ++m_FreeCount;
10963  --m_AllocationCount;
10964  m_SumFreeSize += alloc->GetSize();
10965 
10966  node->type = Node::TYPE_FREE;
10967 
10968  // Join free nodes if possible.
10969  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
10970  {
10971  RemoveFromFreeList(level, node->buddy);
10972  Node* const parent = node->parent;
10973 
10974  vma_delete(GetAllocationCallbacks(), node->buddy);
10975  vma_delete(GetAllocationCallbacks(), node);
10976  parent->type = Node::TYPE_FREE;
10977 
10978  node = parent;
10979  --level;
10980  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
10981  --m_FreeCount;
10982  }
10983 
10984  AddToFreeListFront(level, node);
10985 }
10986 
10987 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
10988 {
10989  switch(node->type)
10990  {
10991  case Node::TYPE_FREE:
10992  ++outInfo.unusedRangeCount;
10993  outInfo.unusedBytes += levelNodeSize;
10994  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
10995  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
10996  break;
10997  case Node::TYPE_ALLOCATION:
10998  {
10999  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11000  ++outInfo.allocationCount;
11001  outInfo.usedBytes += allocSize;
11002  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
11003  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
11004 
11005  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
11006  if(unusedRangeSize > 0)
11007  {
11008  ++outInfo.unusedRangeCount;
11009  outInfo.unusedBytes += unusedRangeSize;
11010  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
11011  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
11012  }
11013  }
11014  break;
11015  case Node::TYPE_SPLIT:
11016  {
11017  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11018  const Node* const leftChild = node->split.leftChild;
11019  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
11020  const Node* const rightChild = leftChild->buddy;
11021  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
11022  }
11023  break;
11024  default:
11025  VMA_ASSERT(0);
11026  }
11027 }
11028 
11029 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
11030 {
11031  VMA_ASSERT(node->type == Node::TYPE_FREE);
11032 
11033  // List is empty.
11034  Node* const frontNode = m_FreeList[level].front;
11035  if(frontNode == VMA_NULL)
11036  {
11037  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
11038  node->free.prev = node->free.next = VMA_NULL;
11039  m_FreeList[level].front = m_FreeList[level].back = node;
11040  }
11041  else
11042  {
11043  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
11044  node->free.prev = VMA_NULL;
11045  node->free.next = frontNode;
11046  frontNode->free.prev = node;
11047  m_FreeList[level].front = node;
11048  }
11049 }
11050 
11051 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
11052 {
11053  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
11054 
11055  // It is at the front.
11056  if(node->free.prev == VMA_NULL)
11057  {
11058  VMA_ASSERT(m_FreeList[level].front == node);
11059  m_FreeList[level].front = node->free.next;
11060  }
11061  else
11062  {
11063  Node* const prevFreeNode = node->free.prev;
11064  VMA_ASSERT(prevFreeNode->free.next == node);
11065  prevFreeNode->free.next = node->free.next;
11066  }
11067 
11068  // It is at the back.
11069  if(node->free.next == VMA_NULL)
11070  {
11071  VMA_ASSERT(m_FreeList[level].back == node);
11072  m_FreeList[level].back = node->free.prev;
11073  }
11074  else
11075  {
11076  Node* const nextFreeNode = node->free.next;
11077  VMA_ASSERT(nextFreeNode->free.prev == node);
11078  nextFreeNode->free.prev = node->free.prev;
11079  }
11080 }
11081 
11082 #if VMA_STATS_STRING_ENABLED
11083 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
11084 {
11085  switch(node->type)
11086  {
11087  case Node::TYPE_FREE:
11088  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
11089  break;
11090  case Node::TYPE_ALLOCATION:
11091  {
11092  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
11093  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11094  if(allocSize < levelNodeSize)
11095  {
11096  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
11097  }
11098  }
11099  break;
11100  case Node::TYPE_SPLIT:
11101  {
11102  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11103  const Node* const leftChild = node->split.leftChild;
11104  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
11105  const Node* const rightChild = leftChild->buddy;
11106  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
11107  }
11108  break;
11109  default:
11110  VMA_ASSERT(0);
11111  }
11112 }
11113 #endif // #if VMA_STATS_STRING_ENABLED
11114 
11115 
11117 // class VmaDeviceMemoryBlock
11118 
11119 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
11120  m_pMetadata(VMA_NULL),
11121  m_MemoryTypeIndex(UINT32_MAX),
11122  m_Id(0),
11123  m_hMemory(VK_NULL_HANDLE),
11124  m_MapCount(0),
11125  m_pMappedData(VMA_NULL)
11126 {
11127 }
11128 
11129 void VmaDeviceMemoryBlock::Init(
11130  VmaAllocator hAllocator,
11131  VmaPool hParentPool,
11132  uint32_t newMemoryTypeIndex,
11133  VkDeviceMemory newMemory,
11134  VkDeviceSize newSize,
11135  uint32_t id,
11136  uint32_t algorithm)
11137 {
11138  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11139 
11140  m_hParentPool = hParentPool;
11141  m_MemoryTypeIndex = newMemoryTypeIndex;
11142  m_Id = id;
11143  m_hMemory = newMemory;
11144 
11145  switch(algorithm)
11146  {
11148  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
11149  break;
11151  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11152  break;
11153  default:
11154  VMA_ASSERT(0);
11155  // Fall-through.
11156  case 0:
11157  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11158  }
11159  m_pMetadata->Init(newSize);
11160 }
11161 
11162 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11163 {
11164  // This is the most important assert in the entire library.
11165  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11166  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11167 
11168  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11169  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11170  m_hMemory = VK_NULL_HANDLE;
11171 
11172  vma_delete(allocator, m_pMetadata);
11173  m_pMetadata = VMA_NULL;
11174 }
11175 
11176 bool VmaDeviceMemoryBlock::Validate() const
11177 {
11178  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11179  (m_pMetadata->GetSize() != 0));
11180 
11181  return m_pMetadata->Validate();
11182 }
11183 
11184 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11185 {
11186  void* pData = nullptr;
11187  VkResult res = Map(hAllocator, 1, &pData);
11188  if(res != VK_SUCCESS)
11189  {
11190  return res;
11191  }
11192 
11193  res = m_pMetadata->CheckCorruption(pData);
11194 
11195  Unmap(hAllocator, 1);
11196 
11197  return res;
11198 }
11199 
11200 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11201 {
11202  if(count == 0)
11203  {
11204  return VK_SUCCESS;
11205  }
11206 
11207  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11208  if(m_MapCount != 0)
11209  {
11210  m_MapCount += count;
11211  VMA_ASSERT(m_pMappedData != VMA_NULL);
11212  if(ppData != VMA_NULL)
11213  {
11214  *ppData = m_pMappedData;
11215  }
11216  return VK_SUCCESS;
11217  }
11218  else
11219  {
11220  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11221  hAllocator->m_hDevice,
11222  m_hMemory,
11223  0, // offset
11224  VK_WHOLE_SIZE,
11225  0, // flags
11226  &m_pMappedData);
11227  if(result == VK_SUCCESS)
11228  {
11229  if(ppData != VMA_NULL)
11230  {
11231  *ppData = m_pMappedData;
11232  }
11233  m_MapCount = count;
11234  }
11235  return result;
11236  }
11237 }
11238 
11239 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11240 {
11241  if(count == 0)
11242  {
11243  return;
11244  }
11245 
11246  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11247  if(m_MapCount >= count)
11248  {
11249  m_MapCount -= count;
11250  if(m_MapCount == 0)
11251  {
11252  m_pMappedData = VMA_NULL;
11253  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11254  }
11255  }
11256  else
11257  {
11258  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11259  }
11260 }
11261 
11262 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11263 {
11264  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11265  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11266 
11267  void* pData;
11268  VkResult res = Map(hAllocator, 1, &pData);
11269  if(res != VK_SUCCESS)
11270  {
11271  return res;
11272  }
11273 
11274  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11275  VmaWriteMagicValue(pData, allocOffset + allocSize);
11276 
11277  Unmap(hAllocator, 1);
11278 
11279  return VK_SUCCESS;
11280 }
11281 
11282 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11283 {
11284  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11285  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11286 
11287  void* pData;
11288  VkResult res = Map(hAllocator, 1, &pData);
11289  if(res != VK_SUCCESS)
11290  {
11291  return res;
11292  }
11293 
11294  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11295  {
11296  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11297  }
11298  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11299  {
11300  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11301  }
11302 
11303  Unmap(hAllocator, 1);
11304 
11305  return VK_SUCCESS;
11306 }
11307 
11308 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11309  const VmaAllocator hAllocator,
11310  const VmaAllocation hAllocation,
11311  VkBuffer hBuffer)
11312 {
11313  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11314  hAllocation->GetBlock() == this);
11315  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11316  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11317  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
11318  hAllocator->m_hDevice,
11319  hBuffer,
11320  m_hMemory,
11321  hAllocation->GetOffset());
11322 }
11323 
11324 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11325  const VmaAllocator hAllocator,
11326  const VmaAllocation hAllocation,
11327  VkImage hImage)
11328 {
11329  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11330  hAllocation->GetBlock() == this);
11331  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11332  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11333  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
11334  hAllocator->m_hDevice,
11335  hImage,
11336  m_hMemory,
11337  hAllocation->GetOffset());
11338 }
11339 
11340 static void InitStatInfo(VmaStatInfo& outInfo)
11341 {
11342  memset(&outInfo, 0, sizeof(outInfo));
11343  outInfo.allocationSizeMin = UINT64_MAX;
11344  outInfo.unusedRangeSizeMin = UINT64_MAX;
11345 }
11346 
11347 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11348 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11349 {
11350  inoutInfo.blockCount += srcInfo.blockCount;
11351  inoutInfo.allocationCount += srcInfo.allocationCount;
11352  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11353  inoutInfo.usedBytes += srcInfo.usedBytes;
11354  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11355  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11356  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11357  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11358  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11359 }
11360 
11361 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11362 {
11363  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11364  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11365  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11366  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11367 }
11368 
11369 VmaPool_T::VmaPool_T(
11370  VmaAllocator hAllocator,
11371  const VmaPoolCreateInfo& createInfo,
11372  VkDeviceSize preferredBlockSize) :
11373  m_BlockVector(
11374  hAllocator,
11375  this, // hParentPool
11376  createInfo.memoryTypeIndex,
11377  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11378  createInfo.minBlockCount,
11379  createInfo.maxBlockCount,
11380  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11381  createInfo.frameInUseCount,
11382  true, // isCustomPool
11383  createInfo.blockSize != 0, // explicitBlockSize
11384  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11385  m_Id(0)
11386 {
11387 }
11388 
11389 VmaPool_T::~VmaPool_T()
11390 {
11391 }
11392 
11393 #if VMA_STATS_STRING_ENABLED
11394 
11395 #endif // #if VMA_STATS_STRING_ENABLED
11396 
11397 VmaBlockVector::VmaBlockVector(
11398  VmaAllocator hAllocator,
11399  VmaPool hParentPool,
11400  uint32_t memoryTypeIndex,
11401  VkDeviceSize preferredBlockSize,
11402  size_t minBlockCount,
11403  size_t maxBlockCount,
11404  VkDeviceSize bufferImageGranularity,
11405  uint32_t frameInUseCount,
11406  bool isCustomPool,
11407  bool explicitBlockSize,
11408  uint32_t algorithm) :
11409  m_hAllocator(hAllocator),
11410  m_hParentPool(hParentPool),
11411  m_MemoryTypeIndex(memoryTypeIndex),
11412  m_PreferredBlockSize(preferredBlockSize),
11413  m_MinBlockCount(minBlockCount),
11414  m_MaxBlockCount(maxBlockCount),
11415  m_BufferImageGranularity(bufferImageGranularity),
11416  m_FrameInUseCount(frameInUseCount),
11417  m_IsCustomPool(isCustomPool),
11418  m_ExplicitBlockSize(explicitBlockSize),
11419  m_Algorithm(algorithm),
11420  m_HasEmptyBlock(false),
11421  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11422  m_NextBlockId(0)
11423 {
11424 }
11425 
11426 VmaBlockVector::~VmaBlockVector()
11427 {
11428  for(size_t i = m_Blocks.size(); i--; )
11429  {
11430  m_Blocks[i]->Destroy(m_hAllocator);
11431  vma_delete(m_hAllocator, m_Blocks[i]);
11432  }
11433 }
11434 
11435 VkResult VmaBlockVector::CreateMinBlocks()
11436 {
11437  for(size_t i = 0; i < m_MinBlockCount; ++i)
11438  {
11439  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11440  if(res != VK_SUCCESS)
11441  {
11442  return res;
11443  }
11444  }
11445  return VK_SUCCESS;
11446 }
11447 
11448 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11449 {
11450  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11451 
11452  const size_t blockCount = m_Blocks.size();
11453 
11454  pStats->size = 0;
11455  pStats->unusedSize = 0;
11456  pStats->allocationCount = 0;
11457  pStats->unusedRangeCount = 0;
11458  pStats->unusedRangeSizeMax = 0;
11459  pStats->blockCount = blockCount;
11460 
11461  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11462  {
11463  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11464  VMA_ASSERT(pBlock);
11465  VMA_HEAVY_ASSERT(pBlock->Validate());
11466  pBlock->m_pMetadata->AddPoolStats(*pStats);
11467  }
11468 }
11469 
11470 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11471 {
11472  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11473  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11474  (VMA_DEBUG_MARGIN > 0) &&
11475  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
11476  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11477 }
11478 
11479 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11480 
11481 VkResult VmaBlockVector::Allocate(
11482  uint32_t currentFrameIndex,
11483  VkDeviceSize size,
11484  VkDeviceSize alignment,
11485  const VmaAllocationCreateInfo& createInfo,
11486  VmaSuballocationType suballocType,
11487  size_t allocationCount,
11488  VmaAllocation* pAllocations)
11489 {
11490  size_t allocIndex;
11491  VkResult res = VK_SUCCESS;
11492 
11493  if(IsCorruptionDetectionEnabled())
11494  {
11495  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11496  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11497  }
11498 
11499  {
11500  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11501  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11502  {
11503  res = AllocatePage(
11504  currentFrameIndex,
11505  size,
11506  alignment,
11507  createInfo,
11508  suballocType,
11509  pAllocations + allocIndex);
11510  if(res != VK_SUCCESS)
11511  {
11512  break;
11513  }
11514  }
11515  }
11516 
11517  if(res != VK_SUCCESS)
11518  {
11519  // Free all already created allocations.
11520  while(allocIndex--)
11521  {
11522  Free(pAllocations[allocIndex]);
11523  }
11524  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11525  }
11526 
11527  return res;
11528 }
11529 
11530 VkResult VmaBlockVector::AllocatePage(
11531  uint32_t currentFrameIndex,
11532  VkDeviceSize size,
11533  VkDeviceSize alignment,
11534  const VmaAllocationCreateInfo& createInfo,
11535  VmaSuballocationType suballocType,
11536  VmaAllocation* pAllocation)
11537 {
11538  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11539  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11540  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11541  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11542  const bool canCreateNewBlock =
11543  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11544  (m_Blocks.size() < m_MaxBlockCount);
11545  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11546 
11547  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11548  // Which in turn is available only when maxBlockCount = 1.
11549  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11550  {
11551  canMakeOtherLost = false;
11552  }
11553 
11554  // Upper address can only be used with linear allocator and within single memory block.
11555  if(isUpperAddress &&
11556  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11557  {
11558  return VK_ERROR_FEATURE_NOT_PRESENT;
11559  }
11560 
11561  // Validate strategy.
11562  switch(strategy)
11563  {
11564  case 0:
11566  break;
11570  break;
11571  default:
11572  return VK_ERROR_FEATURE_NOT_PRESENT;
11573  }
11574 
11575  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11576  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11577  {
11578  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11579  }
11580 
11581  /*
11582  Under certain condition, this whole section can be skipped for optimization, so
11583  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11584  e.g. for custom pools with linear algorithm.
11585  */
11586  if(!canMakeOtherLost || canCreateNewBlock)
11587  {
11588  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11589  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11591 
11592  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11593  {
11594  // Use only last block.
11595  if(!m_Blocks.empty())
11596  {
11597  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11598  VMA_ASSERT(pCurrBlock);
11599  VkResult res = AllocateFromBlock(
11600  pCurrBlock,
11601  currentFrameIndex,
11602  size,
11603  alignment,
11604  allocFlagsCopy,
11605  createInfo.pUserData,
11606  suballocType,
11607  strategy,
11608  pAllocation);
11609  if(res == VK_SUCCESS)
11610  {
11611  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11612  return VK_SUCCESS;
11613  }
11614  }
11615  }
11616  else
11617  {
11619  {
11620  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11621  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11622  {
11623  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11624  VMA_ASSERT(pCurrBlock);
11625  VkResult res = AllocateFromBlock(
11626  pCurrBlock,
11627  currentFrameIndex,
11628  size,
11629  alignment,
11630  allocFlagsCopy,
11631  createInfo.pUserData,
11632  suballocType,
11633  strategy,
11634  pAllocation);
11635  if(res == VK_SUCCESS)
11636  {
11637  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11638  return VK_SUCCESS;
11639  }
11640  }
11641  }
11642  else // WORST_FIT, FIRST_FIT
11643  {
11644  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11645  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11646  {
11647  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11648  VMA_ASSERT(pCurrBlock);
11649  VkResult res = AllocateFromBlock(
11650  pCurrBlock,
11651  currentFrameIndex,
11652  size,
11653  alignment,
11654  allocFlagsCopy,
11655  createInfo.pUserData,
11656  suballocType,
11657  strategy,
11658  pAllocation);
11659  if(res == VK_SUCCESS)
11660  {
11661  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11662  return VK_SUCCESS;
11663  }
11664  }
11665  }
11666  }
11667 
11668  // 2. Try to create new block.
11669  if(canCreateNewBlock)
11670  {
11671  // Calculate optimal size for new block.
11672  VkDeviceSize newBlockSize = m_PreferredBlockSize;
11673  uint32_t newBlockSizeShift = 0;
11674  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11675 
11676  if(!m_ExplicitBlockSize)
11677  {
11678  // Allocate 1/8, 1/4, 1/2 as first blocks.
11679  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11680  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11681  {
11682  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11683  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11684  {
11685  newBlockSize = smallerNewBlockSize;
11686  ++newBlockSizeShift;
11687  }
11688  else
11689  {
11690  break;
11691  }
11692  }
11693  }
11694 
11695  size_t newBlockIndex = 0;
11696  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11697  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11698  if(!m_ExplicitBlockSize)
11699  {
11700  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11701  {
11702  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11703  if(smallerNewBlockSize >= size)
11704  {
11705  newBlockSize = smallerNewBlockSize;
11706  ++newBlockSizeShift;
11707  res = CreateBlock(newBlockSize, &newBlockIndex);
11708  }
11709  else
11710  {
11711  break;
11712  }
11713  }
11714  }
11715 
11716  if(res == VK_SUCCESS)
11717  {
11718  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11719  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11720 
11721  res = AllocateFromBlock(
11722  pBlock,
11723  currentFrameIndex,
11724  size,
11725  alignment,
11726  allocFlagsCopy,
11727  createInfo.pUserData,
11728  suballocType,
11729  strategy,
11730  pAllocation);
11731  if(res == VK_SUCCESS)
11732  {
11733  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
11734  return VK_SUCCESS;
11735  }
11736  else
11737  {
11738  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11739  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11740  }
11741  }
11742  }
11743  }
11744 
11745  // 3. Try to allocate from existing blocks with making other allocations lost.
11746  if(canMakeOtherLost)
11747  {
11748  uint32_t tryIndex = 0;
11749  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11750  {
11751  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11752  VmaAllocationRequest bestRequest = {};
11753  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11754 
11755  // 1. Search existing allocations.
11757  {
11758  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11759  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11760  {
11761  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11762  VMA_ASSERT(pCurrBlock);
11763  VmaAllocationRequest currRequest = {};
11764  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11765  currentFrameIndex,
11766  m_FrameInUseCount,
11767  m_BufferImageGranularity,
11768  size,
11769  alignment,
11770  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11771  suballocType,
11772  canMakeOtherLost,
11773  strategy,
11774  &currRequest))
11775  {
11776  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11777  if(pBestRequestBlock == VMA_NULL ||
11778  currRequestCost < bestRequestCost)
11779  {
11780  pBestRequestBlock = pCurrBlock;
11781  bestRequest = currRequest;
11782  bestRequestCost = currRequestCost;
11783 
11784  if(bestRequestCost == 0)
11785  {
11786  break;
11787  }
11788  }
11789  }
11790  }
11791  }
11792  else // WORST_FIT, FIRST_FIT
11793  {
11794  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11795  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11796  {
11797  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11798  VMA_ASSERT(pCurrBlock);
11799  VmaAllocationRequest currRequest = {};
11800  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11801  currentFrameIndex,
11802  m_FrameInUseCount,
11803  m_BufferImageGranularity,
11804  size,
11805  alignment,
11806  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11807  suballocType,
11808  canMakeOtherLost,
11809  strategy,
11810  &currRequest))
11811  {
11812  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11813  if(pBestRequestBlock == VMA_NULL ||
11814  currRequestCost < bestRequestCost ||
11816  {
11817  pBestRequestBlock = pCurrBlock;
11818  bestRequest = currRequest;
11819  bestRequestCost = currRequestCost;
11820 
11821  if(bestRequestCost == 0 ||
11823  {
11824  break;
11825  }
11826  }
11827  }
11828  }
11829  }
11830 
11831  if(pBestRequestBlock != VMA_NULL)
11832  {
11833  if(mapped)
11834  {
11835  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11836  if(res != VK_SUCCESS)
11837  {
11838  return res;
11839  }
11840  }
11841 
11842  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11843  currentFrameIndex,
11844  m_FrameInUseCount,
11845  &bestRequest))
11846  {
11847  // We no longer have an empty Allocation.
11848  if(pBestRequestBlock->m_pMetadata->IsEmpty())
11849  {
11850  m_HasEmptyBlock = false;
11851  }
11852  // Allocate from this pBlock.
11853  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
11854  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
11855  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
11856  (*pAllocation)->InitBlockAllocation(
11857  pBestRequestBlock,
11858  bestRequest.offset,
11859  alignment,
11860  size,
11861  suballocType,
11862  mapped,
11863  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11864  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11865  VMA_DEBUG_LOG(" Returned from existing block");
11866  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
11867  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11868  {
11869  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11870  }
11871  if(IsCorruptionDetectionEnabled())
11872  {
11873  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11874  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11875  }
11876  return VK_SUCCESS;
11877  }
11878  // else: Some allocations must have been touched while we are here. Next try.
11879  }
11880  else
11881  {
11882  // Could not find place in any of the blocks - break outer loop.
11883  break;
11884  }
11885  }
11886  /* Maximum number of tries exceeded - a very unlike event when many other
11887  threads are simultaneously touching allocations making it impossible to make
11888  lost at the same time as we try to allocate. */
11889  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11890  {
11891  return VK_ERROR_TOO_MANY_OBJECTS;
11892  }
11893  }
11894 
11895  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11896 }
11897 
11898 void VmaBlockVector::Free(
11899  VmaAllocation hAllocation)
11900 {
11901  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11902 
11903  // Scope for lock.
11904  {
11905  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11906 
11907  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11908 
11909  if(IsCorruptionDetectionEnabled())
11910  {
11911  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11912  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
11913  }
11914 
11915  if(hAllocation->IsPersistentMap())
11916  {
11917  pBlock->Unmap(m_hAllocator, 1);
11918  }
11919 
11920  pBlock->m_pMetadata->Free(hAllocation);
11921  VMA_HEAVY_ASSERT(pBlock->Validate());
11922 
11923  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
11924 
11925  // pBlock became empty after this deallocation.
11926  if(pBlock->m_pMetadata->IsEmpty())
11927  {
11928  // Already has empty Allocation. We don't want to have two, so delete this one.
11929  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11930  {
11931  pBlockToDelete = pBlock;
11932  Remove(pBlock);
11933  }
11934  // We now have first empty block.
11935  else
11936  {
11937  m_HasEmptyBlock = true;
11938  }
11939  }
11940  // pBlock didn't become empty, but we have another empty block - find and free that one.
11941  // (This is optional, heuristics.)
11942  else if(m_HasEmptyBlock)
11943  {
11944  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
11945  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
11946  {
11947  pBlockToDelete = pLastBlock;
11948  m_Blocks.pop_back();
11949  m_HasEmptyBlock = false;
11950  }
11951  }
11952 
11953  IncrementallySortBlocks();
11954  }
11955 
11956  // Destruction of a free Allocation. Deferred until this point, outside of mutex
11957  // lock, for performance reason.
11958  if(pBlockToDelete != VMA_NULL)
11959  {
11960  VMA_DEBUG_LOG(" Deleted empty allocation");
11961  pBlockToDelete->Destroy(m_hAllocator);
11962  vma_delete(m_hAllocator, pBlockToDelete);
11963  }
11964 }
11965 
11966 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
11967 {
11968  VkDeviceSize result = 0;
11969  for(size_t i = m_Blocks.size(); i--; )
11970  {
11971  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
11972  if(result >= m_PreferredBlockSize)
11973  {
11974  break;
11975  }
11976  }
11977  return result;
11978 }
11979 
11980 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
11981 {
11982  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11983  {
11984  if(m_Blocks[blockIndex] == pBlock)
11985  {
11986  VmaVectorRemove(m_Blocks, blockIndex);
11987  return;
11988  }
11989  }
11990  VMA_ASSERT(0);
11991 }
11992 
11993 void VmaBlockVector::IncrementallySortBlocks()
11994 {
11995  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11996  {
11997  // Bubble sort only until first swap.
11998  for(size_t i = 1; i < m_Blocks.size(); ++i)
11999  {
12000  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
12001  {
12002  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
12003  return;
12004  }
12005  }
12006  }
12007 }
12008 
12009 VkResult VmaBlockVector::AllocateFromBlock(
12010  VmaDeviceMemoryBlock* pBlock,
12011  uint32_t currentFrameIndex,
12012  VkDeviceSize size,
12013  VkDeviceSize alignment,
12014  VmaAllocationCreateFlags allocFlags,
12015  void* pUserData,
12016  VmaSuballocationType suballocType,
12017  uint32_t strategy,
12018  VmaAllocation* pAllocation)
12019 {
12020  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
12021  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12022  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12023  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12024 
12025  VmaAllocationRequest currRequest = {};
12026  if(pBlock->m_pMetadata->CreateAllocationRequest(
12027  currentFrameIndex,
12028  m_FrameInUseCount,
12029  m_BufferImageGranularity,
12030  size,
12031  alignment,
12032  isUpperAddress,
12033  suballocType,
12034  false, // canMakeOtherLost
12035  strategy,
12036  &currRequest))
12037  {
12038  // Allocate from pCurrBlock.
12039  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
12040 
12041  if(mapped)
12042  {
12043  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12044  if(res != VK_SUCCESS)
12045  {
12046  return res;
12047  }
12048  }
12049 
12050  // We no longer have an empty Allocation.
12051  if(pBlock->m_pMetadata->IsEmpty())
12052  {
12053  m_HasEmptyBlock = false;
12054  }
12055 
12056  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
12057  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
12058  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
12059  (*pAllocation)->InitBlockAllocation(
12060  pBlock,
12061  currRequest.offset,
12062  alignment,
12063  size,
12064  suballocType,
12065  mapped,
12066  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12067  VMA_HEAVY_ASSERT(pBlock->Validate());
12068  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12069  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12070  {
12071  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12072  }
12073  if(IsCorruptionDetectionEnabled())
12074  {
12075  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
12076  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12077  }
12078  return VK_SUCCESS;
12079  }
12080  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12081 }
12082 
12083 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12084 {
12085  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12086  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12087  allocInfo.allocationSize = blockSize;
12088  VkDeviceMemory mem = VK_NULL_HANDLE;
12089  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12090  if(res < 0)
12091  {
12092  return res;
12093  }
12094 
12095  // New VkDeviceMemory successfully created.
12096 
12097  // Create new Allocation for it.
12098  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12099  pBlock->Init(
12100  m_hAllocator,
12101  m_hParentPool,
12102  m_MemoryTypeIndex,
12103  mem,
12104  allocInfo.allocationSize,
12105  m_NextBlockId++,
12106  m_Algorithm);
12107 
12108  m_Blocks.push_back(pBlock);
12109  if(pNewBlockIndex != VMA_NULL)
12110  {
12111  *pNewBlockIndex = m_Blocks.size() - 1;
12112  }
12113 
12114  return VK_SUCCESS;
12115 }
12116 
12117 void VmaBlockVector::ApplyDefragmentationMovesCpu(
12118  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12119  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
12120 {
12121  const size_t blockCount = m_Blocks.size();
12122  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
12123 
12124  enum BLOCK_FLAG
12125  {
12126  BLOCK_FLAG_USED = 0x00000001,
12127  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
12128  };
12129 
12130  struct BlockInfo
12131  {
12132  uint32_t flags;
12133  void* pMappedData;
12134  };
12135  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
12136  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
12137  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
12138 
12139  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12140  const size_t moveCount = moves.size();
12141  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12142  {
12143  const VmaDefragmentationMove& move = moves[moveIndex];
12144  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
12145  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
12146  }
12147 
12148  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12149 
12150  // Go over all blocks. Get mapped pointer or map if necessary.
12151  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12152  {
12153  BlockInfo& currBlockInfo = blockInfo[blockIndex];
12154  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12155  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12156  {
12157  currBlockInfo.pMappedData = pBlock->GetMappedData();
12158  // It is not originally mapped - map it.
12159  if(currBlockInfo.pMappedData == VMA_NULL)
12160  {
12161  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12162  if(pDefragCtx->res == VK_SUCCESS)
12163  {
12164  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12165  }
12166  }
12167  }
12168  }
12169 
12170  // Go over all moves. Do actual data transfer.
12171  if(pDefragCtx->res == VK_SUCCESS)
12172  {
12173  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12174  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12175 
12176  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12177  {
12178  const VmaDefragmentationMove& move = moves[moveIndex];
12179 
12180  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12181  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12182 
12183  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12184 
12185  // Invalidate source.
12186  if(isNonCoherent)
12187  {
12188  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12189  memRange.memory = pSrcBlock->GetDeviceMemory();
12190  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12191  memRange.size = VMA_MIN(
12192  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12193  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12194  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12195  }
12196 
12197  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12198  memmove(
12199  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12200  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12201  static_cast<size_t>(move.size));
12202 
12203  if(IsCorruptionDetectionEnabled())
12204  {
12205  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12206  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12207  }
12208 
12209  // Flush destination.
12210  if(isNonCoherent)
12211  {
12212  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12213  memRange.memory = pDstBlock->GetDeviceMemory();
12214  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12215  memRange.size = VMA_MIN(
12216  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12217  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12218  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12219  }
12220  }
12221  }
12222 
12223  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12224  // Regardless of pCtx->res == VK_SUCCESS.
12225  for(size_t blockIndex = blockCount; blockIndex--; )
12226  {
12227  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12228  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12229  {
12230  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12231  pBlock->Unmap(m_hAllocator, 1);
12232  }
12233  }
12234 }
12235 
12236 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12237  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12238  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12239  VkCommandBuffer commandBuffer)
12240 {
12241  const size_t blockCount = m_Blocks.size();
12242 
12243  pDefragCtx->blockContexts.resize(blockCount);
12244  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12245 
12246  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12247  const size_t moveCount = moves.size();
12248  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12249  {
12250  const VmaDefragmentationMove& move = moves[moveIndex];
12251  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12252  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12253  }
12254 
12255  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12256 
12257  // Go over all blocks. Create and bind buffer for whole block if necessary.
12258  {
12259  VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
12260  bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
12261  VK_BUFFER_USAGE_TRANSFER_DST_BIT;
12262 
12263  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12264  {
12265  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12266  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12267  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12268  {
12269  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12270  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12271  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12272  if(pDefragCtx->res == VK_SUCCESS)
12273  {
12274  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12275  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12276  }
12277  }
12278  }
12279  }
12280 
12281  // Go over all moves. Post data transfer commands to command buffer.
12282  if(pDefragCtx->res == VK_SUCCESS)
12283  {
12284  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12285  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12286 
12287  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12288  {
12289  const VmaDefragmentationMove& move = moves[moveIndex];
12290 
12291  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12292  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12293 
12294  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12295 
12296  VkBufferCopy region = {
12297  move.srcOffset,
12298  move.dstOffset,
12299  move.size };
12300  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12301  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12302  }
12303  }
12304 
12305  // Save buffers to defrag context for later destruction.
12306  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12307  {
12308  pDefragCtx->res = VK_NOT_READY;
12309  }
12310 }
12311 
12312 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12313 {
12314  m_HasEmptyBlock = false;
12315  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12316  {
12317  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12318  if(pBlock->m_pMetadata->IsEmpty())
12319  {
12320  if(m_Blocks.size() > m_MinBlockCount)
12321  {
12322  if(pDefragmentationStats != VMA_NULL)
12323  {
12324  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12325  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12326  }
12327 
12328  VmaVectorRemove(m_Blocks, blockIndex);
12329  pBlock->Destroy(m_hAllocator);
12330  vma_delete(m_hAllocator, pBlock);
12331  }
12332  else
12333  {
12334  m_HasEmptyBlock = true;
12335  }
12336  }
12337  }
12338 }
12339 
12340 #if VMA_STATS_STRING_ENABLED
12341 
12342 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12343 {
12344  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12345 
12346  json.BeginObject();
12347 
12348  if(m_IsCustomPool)
12349  {
12350  json.WriteString("MemoryTypeIndex");
12351  json.WriteNumber(m_MemoryTypeIndex);
12352 
12353  json.WriteString("BlockSize");
12354  json.WriteNumber(m_PreferredBlockSize);
12355 
12356  json.WriteString("BlockCount");
12357  json.BeginObject(true);
12358  if(m_MinBlockCount > 0)
12359  {
12360  json.WriteString("Min");
12361  json.WriteNumber((uint64_t)m_MinBlockCount);
12362  }
12363  if(m_MaxBlockCount < SIZE_MAX)
12364  {
12365  json.WriteString("Max");
12366  json.WriteNumber((uint64_t)m_MaxBlockCount);
12367  }
12368  json.WriteString("Cur");
12369  json.WriteNumber((uint64_t)m_Blocks.size());
12370  json.EndObject();
12371 
12372  if(m_FrameInUseCount > 0)
12373  {
12374  json.WriteString("FrameInUseCount");
12375  json.WriteNumber(m_FrameInUseCount);
12376  }
12377 
12378  if(m_Algorithm != 0)
12379  {
12380  json.WriteString("Algorithm");
12381  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12382  }
12383  }
12384  else
12385  {
12386  json.WriteString("PreferredBlockSize");
12387  json.WriteNumber(m_PreferredBlockSize);
12388  }
12389 
12390  json.WriteString("Blocks");
12391  json.BeginObject();
12392  for(size_t i = 0; i < m_Blocks.size(); ++i)
12393  {
12394  json.BeginString();
12395  json.ContinueString(m_Blocks[i]->GetId());
12396  json.EndString();
12397 
12398  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12399  }
12400  json.EndObject();
12401 
12402  json.EndObject();
12403 }
12404 
12405 #endif // #if VMA_STATS_STRING_ENABLED
12406 
12407 void VmaBlockVector::Defragment(
12408  class VmaBlockVectorDefragmentationContext* pCtx,
12409  VmaDefragmentationStats* pStats,
12410  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12411  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12412  VkCommandBuffer commandBuffer)
12413 {
12414  pCtx->res = VK_SUCCESS;
12415 
12416  const VkMemoryPropertyFlags memPropFlags =
12417  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12418  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12419  const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
12420 
12421  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12422  isHostVisible;
12423  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12424  !IsCorruptionDetectionEnabled();
12425 
12426  // There are options to defragment this memory type.
12427  if(canDefragmentOnCpu || canDefragmentOnGpu)
12428  {
12429  bool defragmentOnGpu;
12430  // There is only one option to defragment this memory type.
12431  if(canDefragmentOnGpu != canDefragmentOnCpu)
12432  {
12433  defragmentOnGpu = canDefragmentOnGpu;
12434  }
12435  // Both options are available: Heuristics to choose the best one.
12436  else
12437  {
12438  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12439  m_hAllocator->IsIntegratedGpu();
12440  }
12441 
12442  bool overlappingMoveSupported = !defragmentOnGpu;
12443 
12444  if(m_hAllocator->m_UseMutex)
12445  {
12446  m_Mutex.LockWrite();
12447  pCtx->mutexLocked = true;
12448  }
12449 
12450  pCtx->Begin(overlappingMoveSupported);
12451 
12452  // Defragment.
12453 
12454  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12455  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12456  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12457  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12458  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12459 
12460  // Accumulate statistics.
12461  if(pStats != VMA_NULL)
12462  {
12463  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12464  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12465  pStats->bytesMoved += bytesMoved;
12466  pStats->allocationsMoved += allocationsMoved;
12467  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12468  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12469  if(defragmentOnGpu)
12470  {
12471  maxGpuBytesToMove -= bytesMoved;
12472  maxGpuAllocationsToMove -= allocationsMoved;
12473  }
12474  else
12475  {
12476  maxCpuBytesToMove -= bytesMoved;
12477  maxCpuAllocationsToMove -= allocationsMoved;
12478  }
12479  }
12480 
12481  if(pCtx->res >= VK_SUCCESS)
12482  {
12483  if(defragmentOnGpu)
12484  {
12485  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12486  }
12487  else
12488  {
12489  ApplyDefragmentationMovesCpu(pCtx, moves);
12490  }
12491  }
12492  }
12493 }
12494 
12495 void VmaBlockVector::DefragmentationEnd(
12496  class VmaBlockVectorDefragmentationContext* pCtx,
12497  VmaDefragmentationStats* pStats)
12498 {
12499  // Destroy buffers.
12500  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12501  {
12502  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12503  if(blockCtx.hBuffer)
12504  {
12505  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12506  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12507  }
12508  }
12509 
12510  if(pCtx->res >= VK_SUCCESS)
12511  {
12512  FreeEmptyBlocks(pStats);
12513  }
12514 
12515  if(pCtx->mutexLocked)
12516  {
12517  VMA_ASSERT(m_hAllocator->m_UseMutex);
12518  m_Mutex.UnlockWrite();
12519  }
12520 }
12521 
12522 size_t VmaBlockVector::CalcAllocationCount() const
12523 {
12524  size_t result = 0;
12525  for(size_t i = 0; i < m_Blocks.size(); ++i)
12526  {
12527  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12528  }
12529  return result;
12530 }
12531 
12532 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12533 {
12534  if(m_BufferImageGranularity == 1)
12535  {
12536  return false;
12537  }
12538  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12539  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12540  {
12541  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12542  VMA_ASSERT(m_Algorithm == 0);
12543  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12544  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12545  {
12546  return true;
12547  }
12548  }
12549  return false;
12550 }
12551 
12552 void VmaBlockVector::MakePoolAllocationsLost(
12553  uint32_t currentFrameIndex,
12554  size_t* pLostAllocationCount)
12555 {
12556  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12557  size_t lostAllocationCount = 0;
12558  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12559  {
12560  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12561  VMA_ASSERT(pBlock);
12562  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12563  }
12564  if(pLostAllocationCount != VMA_NULL)
12565  {
12566  *pLostAllocationCount = lostAllocationCount;
12567  }
12568 }
12569 
12570 VkResult VmaBlockVector::CheckCorruption()
12571 {
12572  if(!IsCorruptionDetectionEnabled())
12573  {
12574  return VK_ERROR_FEATURE_NOT_PRESENT;
12575  }
12576 
12577  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12578  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12579  {
12580  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12581  VMA_ASSERT(pBlock);
12582  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12583  if(res != VK_SUCCESS)
12584  {
12585  return res;
12586  }
12587  }
12588  return VK_SUCCESS;
12589 }
12590 
12591 void VmaBlockVector::AddStats(VmaStats* pStats)
12592 {
12593  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12594  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12595 
12596  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12597 
12598  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12599  {
12600  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12601  VMA_ASSERT(pBlock);
12602  VMA_HEAVY_ASSERT(pBlock->Validate());
12603  VmaStatInfo allocationStatInfo;
12604  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12605  VmaAddStatInfo(pStats->total, allocationStatInfo);
12606  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12607  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12608  }
12609 }
12610 
12612 // VmaDefragmentationAlgorithm_Generic members definition
12613 
12614 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12615  VmaAllocator hAllocator,
12616  VmaBlockVector* pBlockVector,
12617  uint32_t currentFrameIndex,
12618  bool overlappingMoveSupported) :
12619  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12620  m_AllocationCount(0),
12621  m_AllAllocations(false),
12622  m_BytesMoved(0),
12623  m_AllocationsMoved(0),
12624  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12625 {
12626  // Create block info for each block.
12627  const size_t blockCount = m_pBlockVector->m_Blocks.size();
12628  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12629  {
12630  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12631  pBlockInfo->m_OriginalBlockIndex = blockIndex;
12632  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12633  m_Blocks.push_back(pBlockInfo);
12634  }
12635 
12636  // Sort them by m_pBlock pointer value.
12637  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12638 }
12639 
12640 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12641 {
12642  for(size_t i = m_Blocks.size(); i--; )
12643  {
12644  vma_delete(m_hAllocator, m_Blocks[i]);
12645  }
12646 }
12647 
12648 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12649 {
12650  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12651  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12652  {
12653  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12654  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12655  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12656  {
12657  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12658  (*it)->m_Allocations.push_back(allocInfo);
12659  }
12660  else
12661  {
12662  VMA_ASSERT(0);
12663  }
12664 
12665  ++m_AllocationCount;
12666  }
12667 }
12668 
12669 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12670  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12671  VkDeviceSize maxBytesToMove,
12672  uint32_t maxAllocationsToMove)
12673 {
12674  if(m_Blocks.empty())
12675  {
12676  return VK_SUCCESS;
12677  }
12678 
12679  // This is a choice based on research.
12680  // Option 1:
12681  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12682  // Option 2:
12683  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12684  // Option 3:
12685  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12686 
12687  size_t srcBlockMinIndex = 0;
12688  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12689  /*
12690  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12691  {
12692  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12693  if(blocksWithNonMovableCount > 0)
12694  {
12695  srcBlockMinIndex = blocksWithNonMovableCount - 1;
12696  }
12697  }
12698  */
12699 
12700  size_t srcBlockIndex = m_Blocks.size() - 1;
12701  size_t srcAllocIndex = SIZE_MAX;
12702  for(;;)
12703  {
12704  // 1. Find next allocation to move.
12705  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12706  // 1.2. Then start from last to first m_Allocations.
12707  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12708  {
12709  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12710  {
12711  // Finished: no more allocations to process.
12712  if(srcBlockIndex == srcBlockMinIndex)
12713  {
12714  return VK_SUCCESS;
12715  }
12716  else
12717  {
12718  --srcBlockIndex;
12719  srcAllocIndex = SIZE_MAX;
12720  }
12721  }
12722  else
12723  {
12724  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12725  }
12726  }
12727 
12728  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12729  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12730 
12731  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12732  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12733  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12734  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12735 
12736  // 2. Try to find new place for this allocation in preceding or current block.
12737  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12738  {
12739  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12740  VmaAllocationRequest dstAllocRequest;
12741  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12742  m_CurrentFrameIndex,
12743  m_pBlockVector->GetFrameInUseCount(),
12744  m_pBlockVector->GetBufferImageGranularity(),
12745  size,
12746  alignment,
12747  false, // upperAddress
12748  suballocType,
12749  false, // canMakeOtherLost
12750  strategy,
12751  &dstAllocRequest) &&
12752  MoveMakesSense(
12753  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12754  {
12755  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12756 
12757  // Reached limit on number of allocations or bytes to move.
12758  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12759  (m_BytesMoved + size > maxBytesToMove))
12760  {
12761  return VK_SUCCESS;
12762  }
12763 
12764  VmaDefragmentationMove move;
12765  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12766  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12767  move.srcOffset = srcOffset;
12768  move.dstOffset = dstAllocRequest.offset;
12769  move.size = size;
12770  moves.push_back(move);
12771 
12772  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12773  dstAllocRequest,
12774  suballocType,
12775  size,
12776  allocInfo.m_hAllocation);
12777  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12778 
12779  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12780 
12781  if(allocInfo.m_pChanged != VMA_NULL)
12782  {
12783  *allocInfo.m_pChanged = VK_TRUE;
12784  }
12785 
12786  ++m_AllocationsMoved;
12787  m_BytesMoved += size;
12788 
12789  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12790 
12791  break;
12792  }
12793  }
12794 
12795  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
12796 
12797  if(srcAllocIndex > 0)
12798  {
12799  --srcAllocIndex;
12800  }
12801  else
12802  {
12803  if(srcBlockIndex > 0)
12804  {
12805  --srcBlockIndex;
12806  srcAllocIndex = SIZE_MAX;
12807  }
12808  else
12809  {
12810  return VK_SUCCESS;
12811  }
12812  }
12813  }
12814 }
12815 
12816 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
12817 {
12818  size_t result = 0;
12819  for(size_t i = 0; i < m_Blocks.size(); ++i)
12820  {
12821  if(m_Blocks[i]->m_HasNonMovableAllocations)
12822  {
12823  ++result;
12824  }
12825  }
12826  return result;
12827 }
12828 
12829 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12830  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12831  VkDeviceSize maxBytesToMove,
12832  uint32_t maxAllocationsToMove)
12833 {
12834  if(!m_AllAllocations && m_AllocationCount == 0)
12835  {
12836  return VK_SUCCESS;
12837  }
12838 
12839  const size_t blockCount = m_Blocks.size();
12840  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12841  {
12842  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12843 
12844  if(m_AllAllocations)
12845  {
12846  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12847  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12848  it != pMetadata->m_Suballocations.end();
12849  ++it)
12850  {
12851  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12852  {
12853  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12854  pBlockInfo->m_Allocations.push_back(allocInfo);
12855  }
12856  }
12857  }
12858 
12859  pBlockInfo->CalcHasNonMovableAllocations();
12860 
12861  // This is a choice based on research.
12862  // Option 1:
12863  pBlockInfo->SortAllocationsByOffsetDescending();
12864  // Option 2:
12865  //pBlockInfo->SortAllocationsBySizeDescending();
12866  }
12867 
12868  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
12869  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12870 
12871  // This is a choice based on research.
12872  const uint32_t roundCount = 2;
12873 
12874  // Execute defragmentation rounds (the main part).
12875  VkResult result = VK_SUCCESS;
12876  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12877  {
12878  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12879  }
12880 
12881  return result;
12882 }
12883 
12884 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12885  size_t dstBlockIndex, VkDeviceSize dstOffset,
12886  size_t srcBlockIndex, VkDeviceSize srcOffset)
12887 {
12888  if(dstBlockIndex < srcBlockIndex)
12889  {
12890  return true;
12891  }
12892  if(dstBlockIndex > srcBlockIndex)
12893  {
12894  return false;
12895  }
12896  if(dstOffset < srcOffset)
12897  {
12898  return true;
12899  }
12900  return false;
12901 }
12902 
12904 // VmaDefragmentationAlgorithm_Fast
12905 
12906 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12907  VmaAllocator hAllocator,
12908  VmaBlockVector* pBlockVector,
12909  uint32_t currentFrameIndex,
12910  bool overlappingMoveSupported) :
12911  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12912  m_OverlappingMoveSupported(overlappingMoveSupported),
12913  m_AllocationCount(0),
12914  m_AllAllocations(false),
12915  m_BytesMoved(0),
12916  m_AllocationsMoved(0),
12917  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12918 {
12919  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12920 
12921 }
12922 
12923 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12924 {
12925 }
12926 
12927 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12928  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12929  VkDeviceSize maxBytesToMove,
12930  uint32_t maxAllocationsToMove)
12931 {
12932  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12933 
12934  const size_t blockCount = m_pBlockVector->GetBlockCount();
12935  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12936  {
12937  return VK_SUCCESS;
12938  }
12939 
12940  PreprocessMetadata();
12941 
12942  // Sort blocks in order from most destination.
12943 
12944  m_BlockInfos.resize(blockCount);
12945  for(size_t i = 0; i < blockCount; ++i)
12946  {
12947  m_BlockInfos[i].origBlockIndex = i;
12948  }
12949 
12950  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
12951  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
12952  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
12953  });
12954 
12955  // THE MAIN ALGORITHM
12956 
12957  FreeSpaceDatabase freeSpaceDb;
12958 
12959  size_t dstBlockInfoIndex = 0;
12960  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12961  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12962  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12963  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
12964  VkDeviceSize dstOffset = 0;
12965 
12966  bool end = false;
12967  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
12968  {
12969  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
12970  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
12971  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
12972  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
12973  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
12974  {
12975  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
12976  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
12977  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
12978  if(m_AllocationsMoved == maxAllocationsToMove ||
12979  m_BytesMoved + srcAllocSize > maxBytesToMove)
12980  {
12981  end = true;
12982  break;
12983  }
12984  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
12985 
12986  // Try to place it in one of free spaces from the database.
12987  size_t freeSpaceInfoIndex;
12988  VkDeviceSize dstAllocOffset;
12989  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
12990  freeSpaceInfoIndex, dstAllocOffset))
12991  {
12992  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
12993  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
12994  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
12995 
12996  // Same block
12997  if(freeSpaceInfoIndex == srcBlockInfoIndex)
12998  {
12999  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13000 
13001  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13002 
13003  VmaSuballocation suballoc = *srcSuballocIt;
13004  suballoc.offset = dstAllocOffset;
13005  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
13006  m_BytesMoved += srcAllocSize;
13007  ++m_AllocationsMoved;
13008 
13009  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13010  ++nextSuballocIt;
13011  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13012  srcSuballocIt = nextSuballocIt;
13013 
13014  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13015 
13016  VmaDefragmentationMove move = {
13017  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13018  srcAllocOffset, dstAllocOffset,
13019  srcAllocSize };
13020  moves.push_back(move);
13021  }
13022  // Different block
13023  else
13024  {
13025  // MOVE OPTION 2: Move the allocation to a different block.
13026 
13027  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
13028 
13029  VmaSuballocation suballoc = *srcSuballocIt;
13030  suballoc.offset = dstAllocOffset;
13031  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
13032  m_BytesMoved += srcAllocSize;
13033  ++m_AllocationsMoved;
13034 
13035  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13036  ++nextSuballocIt;
13037  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13038  srcSuballocIt = nextSuballocIt;
13039 
13040  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13041 
13042  VmaDefragmentationMove move = {
13043  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13044  srcAllocOffset, dstAllocOffset,
13045  srcAllocSize };
13046  moves.push_back(move);
13047  }
13048  }
13049  else
13050  {
13051  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
13052 
13053  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
13054  while(dstBlockInfoIndex < srcBlockInfoIndex &&
13055  dstAllocOffset + srcAllocSize > dstBlockSize)
13056  {
13057  // But before that, register remaining free space at the end of dst block.
13058  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
13059 
13060  ++dstBlockInfoIndex;
13061  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13062  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13063  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13064  dstBlockSize = pDstMetadata->GetSize();
13065  dstOffset = 0;
13066  dstAllocOffset = 0;
13067  }
13068 
13069  // Same block
13070  if(dstBlockInfoIndex == srcBlockInfoIndex)
13071  {
13072  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13073 
13074  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
13075 
13076  bool skipOver = overlap;
13077  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
13078  {
13079  // If destination and source place overlap, skip if it would move it
13080  // by only < 1/64 of its size.
13081  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
13082  }
13083 
13084  if(skipOver)
13085  {
13086  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
13087 
13088  dstOffset = srcAllocOffset + srcAllocSize;
13089  ++srcSuballocIt;
13090  }
13091  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13092  else
13093  {
13094  srcSuballocIt->offset = dstAllocOffset;
13095  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
13096  dstOffset = dstAllocOffset + srcAllocSize;
13097  m_BytesMoved += srcAllocSize;
13098  ++m_AllocationsMoved;
13099  ++srcSuballocIt;
13100  VmaDefragmentationMove move = {
13101  srcOrigBlockIndex, dstOrigBlockIndex,
13102  srcAllocOffset, dstAllocOffset,
13103  srcAllocSize };
13104  moves.push_back(move);
13105  }
13106  }
13107  // Different block
13108  else
13109  {
13110  // MOVE OPTION 2: Move the allocation to a different block.
13111 
13112  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
13113  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
13114 
13115  VmaSuballocation suballoc = *srcSuballocIt;
13116  suballoc.offset = dstAllocOffset;
13117  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
13118  dstOffset = dstAllocOffset + srcAllocSize;
13119  m_BytesMoved += srcAllocSize;
13120  ++m_AllocationsMoved;
13121 
13122  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13123  ++nextSuballocIt;
13124  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13125  srcSuballocIt = nextSuballocIt;
13126 
13127  pDstMetadata->m_Suballocations.push_back(suballoc);
13128 
13129  VmaDefragmentationMove move = {
13130  srcOrigBlockIndex, dstOrigBlockIndex,
13131  srcAllocOffset, dstAllocOffset,
13132  srcAllocSize };
13133  moves.push_back(move);
13134  }
13135  }
13136  }
13137  }
13138 
13139  m_BlockInfos.clear();
13140 
13141  PostprocessMetadata();
13142 
13143  return VK_SUCCESS;
13144 }
13145 
13146 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
13147 {
13148  const size_t blockCount = m_pBlockVector->GetBlockCount();
13149  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13150  {
13151  VmaBlockMetadata_Generic* const pMetadata =
13152  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13153  pMetadata->m_FreeCount = 0;
13154  pMetadata->m_SumFreeSize = pMetadata->GetSize();
13155  pMetadata->m_FreeSuballocationsBySize.clear();
13156  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13157  it != pMetadata->m_Suballocations.end(); )
13158  {
13159  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13160  {
13161  VmaSuballocationList::iterator nextIt = it;
13162  ++nextIt;
13163  pMetadata->m_Suballocations.erase(it);
13164  it = nextIt;
13165  }
13166  else
13167  {
13168  ++it;
13169  }
13170  }
13171  }
13172 }
13173 
13174 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13175 {
13176  const size_t blockCount = m_pBlockVector->GetBlockCount();
13177  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13178  {
13179  VmaBlockMetadata_Generic* const pMetadata =
13180  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13181  const VkDeviceSize blockSize = pMetadata->GetSize();
13182 
13183  // No allocations in this block - entire area is free.
13184  if(pMetadata->m_Suballocations.empty())
13185  {
13186  pMetadata->m_FreeCount = 1;
13187  //pMetadata->m_SumFreeSize is already set to blockSize.
13188  VmaSuballocation suballoc = {
13189  0, // offset
13190  blockSize, // size
13191  VMA_NULL, // hAllocation
13192  VMA_SUBALLOCATION_TYPE_FREE };
13193  pMetadata->m_Suballocations.push_back(suballoc);
13194  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13195  }
13196  // There are some allocations in this block.
13197  else
13198  {
13199  VkDeviceSize offset = 0;
13200  VmaSuballocationList::iterator it;
13201  for(it = pMetadata->m_Suballocations.begin();
13202  it != pMetadata->m_Suballocations.end();
13203  ++it)
13204  {
13205  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13206  VMA_ASSERT(it->offset >= offset);
13207 
13208  // Need to insert preceding free space.
13209  if(it->offset > offset)
13210  {
13211  ++pMetadata->m_FreeCount;
13212  const VkDeviceSize freeSize = it->offset - offset;
13213  VmaSuballocation suballoc = {
13214  offset, // offset
13215  freeSize, // size
13216  VMA_NULL, // hAllocation
13217  VMA_SUBALLOCATION_TYPE_FREE };
13218  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13219  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13220  {
13221  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13222  }
13223  }
13224 
13225  pMetadata->m_SumFreeSize -= it->size;
13226  offset = it->offset + it->size;
13227  }
13228 
13229  // Need to insert trailing free space.
13230  if(offset < blockSize)
13231  {
13232  ++pMetadata->m_FreeCount;
13233  const VkDeviceSize freeSize = blockSize - offset;
13234  VmaSuballocation suballoc = {
13235  offset, // offset
13236  freeSize, // size
13237  VMA_NULL, // hAllocation
13238  VMA_SUBALLOCATION_TYPE_FREE };
13239  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13240  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13241  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13242  {
13243  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13244  }
13245  }
13246 
13247  VMA_SORT(
13248  pMetadata->m_FreeSuballocationsBySize.begin(),
13249  pMetadata->m_FreeSuballocationsBySize.end(),
13250  VmaSuballocationItemSizeLess());
13251  }
13252 
13253  VMA_HEAVY_ASSERT(pMetadata->Validate());
13254  }
13255 }
13256 
13257 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13258 {
13259  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13260  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13261  while(it != pMetadata->m_Suballocations.end())
13262  {
13263  if(it->offset < suballoc.offset)
13264  {
13265  ++it;
13266  }
13267  }
13268  pMetadata->m_Suballocations.insert(it, suballoc);
13269 }
13270 
13272 // VmaBlockVectorDefragmentationContext
13273 
13274 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13275  VmaAllocator hAllocator,
13276  VmaPool hCustomPool,
13277  VmaBlockVector* pBlockVector,
13278  uint32_t currFrameIndex,
13279  uint32_t algorithmFlags) :
13280  res(VK_SUCCESS),
13281  mutexLocked(false),
13282  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13283  m_hAllocator(hAllocator),
13284  m_hCustomPool(hCustomPool),
13285  m_pBlockVector(pBlockVector),
13286  m_CurrFrameIndex(currFrameIndex),
13287  m_AlgorithmFlags(algorithmFlags),
13288  m_pAlgorithm(VMA_NULL),
13289  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13290  m_AllAllocations(false)
13291 {
13292 }
13293 
13294 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13295 {
13296  vma_delete(m_hAllocator, m_pAlgorithm);
13297 }
13298 
13299 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13300 {
13301  AllocInfo info = { hAlloc, pChanged };
13302  m_Allocations.push_back(info);
13303 }
13304 
13305 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13306 {
13307  const bool allAllocations = m_AllAllocations ||
13308  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13309 
13310  /********************************
13311  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13312  ********************************/
13313 
13314  /*
13315  Fast algorithm is supported only when certain criteria are met:
13316  - VMA_DEBUG_MARGIN is 0.
13317  - All allocations in this block vector are moveable.
13318  - There is no possibility of image/buffer granularity conflict.
13319  */
13320  if(VMA_DEBUG_MARGIN == 0 &&
13321  allAllocations &&
13322  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13323  {
13324  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13325  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13326  }
13327  else
13328  {
13329  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13330  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13331  }
13332 
13333  if(allAllocations)
13334  {
13335  m_pAlgorithm->AddAll();
13336  }
13337  else
13338  {
13339  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13340  {
13341  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13342  }
13343  }
13344 }
13345 
13347 // VmaDefragmentationContext
13348 
13349 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13350  VmaAllocator hAllocator,
13351  uint32_t currFrameIndex,
13352  uint32_t flags,
13353  VmaDefragmentationStats* pStats) :
13354  m_hAllocator(hAllocator),
13355  m_CurrFrameIndex(currFrameIndex),
13356  m_Flags(flags),
13357  m_pStats(pStats),
13358  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13359 {
13360  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13361 }
13362 
13363 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13364 {
13365  for(size_t i = m_CustomPoolContexts.size(); i--; )
13366  {
13367  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13368  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13369  vma_delete(m_hAllocator, pBlockVectorCtx);
13370  }
13371  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13372  {
13373  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13374  if(pBlockVectorCtx)
13375  {
13376  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13377  vma_delete(m_hAllocator, pBlockVectorCtx);
13378  }
13379  }
13380 }
13381 
13382 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13383 {
13384  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13385  {
13386  VmaPool pool = pPools[poolIndex];
13387  VMA_ASSERT(pool);
13388  // Pools with algorithm other than default are not defragmented.
13389  if(pool->m_BlockVector.GetAlgorithm() == 0)
13390  {
13391  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13392 
13393  for(size_t i = m_CustomPoolContexts.size(); i--; )
13394  {
13395  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13396  {
13397  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13398  break;
13399  }
13400  }
13401 
13402  if(!pBlockVectorDefragCtx)
13403  {
13404  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13405  m_hAllocator,
13406  pool,
13407  &pool->m_BlockVector,
13408  m_CurrFrameIndex,
13409  m_Flags);
13410  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13411  }
13412 
13413  pBlockVectorDefragCtx->AddAll();
13414  }
13415  }
13416 }
13417 
13418 void VmaDefragmentationContext_T::AddAllocations(
13419  uint32_t allocationCount,
13420  VmaAllocation* pAllocations,
13421  VkBool32* pAllocationsChanged)
13422 {
13423  // Dispatch pAllocations among defragmentators. Create them when necessary.
13424  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13425  {
13426  const VmaAllocation hAlloc = pAllocations[allocIndex];
13427  VMA_ASSERT(hAlloc);
13428  // DedicatedAlloc cannot be defragmented.
13429  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13430  // Lost allocation cannot be defragmented.
13431  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13432  {
13433  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13434 
13435  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
13436  // This allocation belongs to custom pool.
13437  if(hAllocPool != VK_NULL_HANDLE)
13438  {
13439  // Pools with algorithm other than default are not defragmented.
13440  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13441  {
13442  for(size_t i = m_CustomPoolContexts.size(); i--; )
13443  {
13444  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13445  {
13446  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13447  break;
13448  }
13449  }
13450  if(!pBlockVectorDefragCtx)
13451  {
13452  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13453  m_hAllocator,
13454  hAllocPool,
13455  &hAllocPool->m_BlockVector,
13456  m_CurrFrameIndex,
13457  m_Flags);
13458  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13459  }
13460  }
13461  }
13462  // This allocation belongs to default pool.
13463  else
13464  {
13465  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13466  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13467  if(!pBlockVectorDefragCtx)
13468  {
13469  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13470  m_hAllocator,
13471  VMA_NULL, // hCustomPool
13472  m_hAllocator->m_pBlockVectors[memTypeIndex],
13473  m_CurrFrameIndex,
13474  m_Flags);
13475  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13476  }
13477  }
13478 
13479  if(pBlockVectorDefragCtx)
13480  {
13481  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13482  &pAllocationsChanged[allocIndex] : VMA_NULL;
13483  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13484  }
13485  }
13486  }
13487 }
13488 
13489 VkResult VmaDefragmentationContext_T::Defragment(
13490  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13491  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13492  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13493 {
13494  if(pStats)
13495  {
13496  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13497  }
13498 
13499  if(commandBuffer == VK_NULL_HANDLE)
13500  {
13501  maxGpuBytesToMove = 0;
13502  maxGpuAllocationsToMove = 0;
13503  }
13504 
13505  VkResult res = VK_SUCCESS;
13506 
13507  // Process default pools.
13508  for(uint32_t memTypeIndex = 0;
13509  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13510  ++memTypeIndex)
13511  {
13512  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13513  if(pBlockVectorCtx)
13514  {
13515  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13516  pBlockVectorCtx->GetBlockVector()->Defragment(
13517  pBlockVectorCtx,
13518  pStats,
13519  maxCpuBytesToMove, maxCpuAllocationsToMove,
13520  maxGpuBytesToMove, maxGpuAllocationsToMove,
13521  commandBuffer);
13522  if(pBlockVectorCtx->res != VK_SUCCESS)
13523  {
13524  res = pBlockVectorCtx->res;
13525  }
13526  }
13527  }
13528 
13529  // Process custom pools.
13530  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13531  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13532  ++customCtxIndex)
13533  {
13534  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13535  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13536  pBlockVectorCtx->GetBlockVector()->Defragment(
13537  pBlockVectorCtx,
13538  pStats,
13539  maxCpuBytesToMove, maxCpuAllocationsToMove,
13540  maxGpuBytesToMove, maxGpuAllocationsToMove,
13541  commandBuffer);
13542  if(pBlockVectorCtx->res != VK_SUCCESS)
13543  {
13544  res = pBlockVectorCtx->res;
13545  }
13546  }
13547 
13548  return res;
13549 }
13550 
13552 // VmaRecorder
13553 
13554 #if VMA_RECORDING_ENABLED
13555 
13556 VmaRecorder::VmaRecorder() :
13557  m_UseMutex(true),
13558  m_Flags(0),
13559  m_File(VMA_NULL),
13560  m_Freq(INT64_MAX),
13561  m_StartCounter(INT64_MAX)
13562 {
13563 }
13564 
13565 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13566 {
13567  m_UseMutex = useMutex;
13568  m_Flags = settings.flags;
13569 
13570  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13571  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13572 
13573  // Open file for writing.
13574  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13575  if(err != 0)
13576  {
13577  return VK_ERROR_INITIALIZATION_FAILED;
13578  }
13579 
13580  // Write header.
13581  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13582  fprintf(m_File, "%s\n", "1,5");
13583 
13584  return VK_SUCCESS;
13585 }
13586 
13587 VmaRecorder::~VmaRecorder()
13588 {
13589  if(m_File != VMA_NULL)
13590  {
13591  fclose(m_File);
13592  }
13593 }
13594 
13595 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13596 {
13597  CallParams callParams;
13598  GetBasicParams(callParams);
13599 
13600  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13601  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13602  Flush();
13603 }
13604 
13605 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13606 {
13607  CallParams callParams;
13608  GetBasicParams(callParams);
13609 
13610  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13611  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13612  Flush();
13613 }
13614 
13615 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13616 {
13617  CallParams callParams;
13618  GetBasicParams(callParams);
13619 
13620  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13621  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13622  createInfo.memoryTypeIndex,
13623  createInfo.flags,
13624  createInfo.blockSize,
13625  (uint64_t)createInfo.minBlockCount,
13626  (uint64_t)createInfo.maxBlockCount,
13627  createInfo.frameInUseCount,
13628  pool);
13629  Flush();
13630 }
13631 
13632 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13633 {
13634  CallParams callParams;
13635  GetBasicParams(callParams);
13636 
13637  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13638  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13639  pool);
13640  Flush();
13641 }
13642 
13643 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13644  const VkMemoryRequirements& vkMemReq,
13645  const VmaAllocationCreateInfo& createInfo,
13646  VmaAllocation allocation)
13647 {
13648  CallParams callParams;
13649  GetBasicParams(callParams);
13650 
13651  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13652  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13653  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13654  vkMemReq.size,
13655  vkMemReq.alignment,
13656  vkMemReq.memoryTypeBits,
13657  createInfo.flags,
13658  createInfo.usage,
13659  createInfo.requiredFlags,
13660  createInfo.preferredFlags,
13661  createInfo.memoryTypeBits,
13662  createInfo.pool,
13663  allocation,
13664  userDataStr.GetString());
13665  Flush();
13666 }
13667 
13668 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
13669  const VkMemoryRequirements& vkMemReq,
13670  const VmaAllocationCreateInfo& createInfo,
13671  uint64_t allocationCount,
13672  const VmaAllocation* pAllocations)
13673 {
13674  CallParams callParams;
13675  GetBasicParams(callParams);
13676 
13677  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13678  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13679  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
13680  vkMemReq.size,
13681  vkMemReq.alignment,
13682  vkMemReq.memoryTypeBits,
13683  createInfo.flags,
13684  createInfo.usage,
13685  createInfo.requiredFlags,
13686  createInfo.preferredFlags,
13687  createInfo.memoryTypeBits,
13688  createInfo.pool);
13689  PrintPointerList(allocationCount, pAllocations);
13690  fprintf(m_File, ",%s\n", userDataStr.GetString());
13691  Flush();
13692 }
13693 
13694 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13695  const VkMemoryRequirements& vkMemReq,
13696  bool requiresDedicatedAllocation,
13697  bool prefersDedicatedAllocation,
13698  const VmaAllocationCreateInfo& createInfo,
13699  VmaAllocation allocation)
13700 {
13701  CallParams callParams;
13702  GetBasicParams(callParams);
13703 
13704  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13705  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13706  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13707  vkMemReq.size,
13708  vkMemReq.alignment,
13709  vkMemReq.memoryTypeBits,
13710  requiresDedicatedAllocation ? 1 : 0,
13711  prefersDedicatedAllocation ? 1 : 0,
13712  createInfo.flags,
13713  createInfo.usage,
13714  createInfo.requiredFlags,
13715  createInfo.preferredFlags,
13716  createInfo.memoryTypeBits,
13717  createInfo.pool,
13718  allocation,
13719  userDataStr.GetString());
13720  Flush();
13721 }
13722 
13723 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13724  const VkMemoryRequirements& vkMemReq,
13725  bool requiresDedicatedAllocation,
13726  bool prefersDedicatedAllocation,
13727  const VmaAllocationCreateInfo& createInfo,
13728  VmaAllocation allocation)
13729 {
13730  CallParams callParams;
13731  GetBasicParams(callParams);
13732 
13733  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13734  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13735  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13736  vkMemReq.size,
13737  vkMemReq.alignment,
13738  vkMemReq.memoryTypeBits,
13739  requiresDedicatedAllocation ? 1 : 0,
13740  prefersDedicatedAllocation ? 1 : 0,
13741  createInfo.flags,
13742  createInfo.usage,
13743  createInfo.requiredFlags,
13744  createInfo.preferredFlags,
13745  createInfo.memoryTypeBits,
13746  createInfo.pool,
13747  allocation,
13748  userDataStr.GetString());
13749  Flush();
13750 }
13751 
13752 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13753  VmaAllocation allocation)
13754 {
13755  CallParams callParams;
13756  GetBasicParams(callParams);
13757 
13758  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13759  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13760  allocation);
13761  Flush();
13762 }
13763 
13764 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
13765  uint64_t allocationCount,
13766  const VmaAllocation* pAllocations)
13767 {
13768  CallParams callParams;
13769  GetBasicParams(callParams);
13770 
13771  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13772  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
13773  PrintPointerList(allocationCount, pAllocations);
13774  fprintf(m_File, "\n");
13775  Flush();
13776 }
13777 
13778 void VmaRecorder::RecordResizeAllocation(
13779  uint32_t frameIndex,
13780  VmaAllocation allocation,
13781  VkDeviceSize newSize)
13782 {
13783  CallParams callParams;
13784  GetBasicParams(callParams);
13785 
13786  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13787  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
13788  allocation, newSize);
13789  Flush();
13790 }
13791 
13792 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13793  VmaAllocation allocation,
13794  const void* pUserData)
13795 {
13796  CallParams callParams;
13797  GetBasicParams(callParams);
13798 
13799  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13800  UserDataString userDataStr(
13801  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
13802  pUserData);
13803  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13804  allocation,
13805  userDataStr.GetString());
13806  Flush();
13807 }
13808 
13809 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13810  VmaAllocation allocation)
13811 {
13812  CallParams callParams;
13813  GetBasicParams(callParams);
13814 
13815  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13816  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13817  allocation);
13818  Flush();
13819 }
13820 
13821 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13822  VmaAllocation allocation)
13823 {
13824  CallParams callParams;
13825  GetBasicParams(callParams);
13826 
13827  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13828  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13829  allocation);
13830  Flush();
13831 }
13832 
13833 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13834  VmaAllocation allocation)
13835 {
13836  CallParams callParams;
13837  GetBasicParams(callParams);
13838 
13839  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13840  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13841  allocation);
13842  Flush();
13843 }
13844 
13845 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13846  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13847 {
13848  CallParams callParams;
13849  GetBasicParams(callParams);
13850 
13851  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13852  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13853  allocation,
13854  offset,
13855  size);
13856  Flush();
13857 }
13858 
13859 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13860  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13861 {
13862  CallParams callParams;
13863  GetBasicParams(callParams);
13864 
13865  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13866  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13867  allocation,
13868  offset,
13869  size);
13870  Flush();
13871 }
13872 
13873 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13874  const VkBufferCreateInfo& bufCreateInfo,
13875  const VmaAllocationCreateInfo& allocCreateInfo,
13876  VmaAllocation allocation)
13877 {
13878  CallParams callParams;
13879  GetBasicParams(callParams);
13880 
13881  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13882  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13883  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13884  bufCreateInfo.flags,
13885  bufCreateInfo.size,
13886  bufCreateInfo.usage,
13887  bufCreateInfo.sharingMode,
13888  allocCreateInfo.flags,
13889  allocCreateInfo.usage,
13890  allocCreateInfo.requiredFlags,
13891  allocCreateInfo.preferredFlags,
13892  allocCreateInfo.memoryTypeBits,
13893  allocCreateInfo.pool,
13894  allocation,
13895  userDataStr.GetString());
13896  Flush();
13897 }
13898 
13899 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13900  const VkImageCreateInfo& imageCreateInfo,
13901  const VmaAllocationCreateInfo& allocCreateInfo,
13902  VmaAllocation allocation)
13903 {
13904  CallParams callParams;
13905  GetBasicParams(callParams);
13906 
13907  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13908  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13909  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13910  imageCreateInfo.flags,
13911  imageCreateInfo.imageType,
13912  imageCreateInfo.format,
13913  imageCreateInfo.extent.width,
13914  imageCreateInfo.extent.height,
13915  imageCreateInfo.extent.depth,
13916  imageCreateInfo.mipLevels,
13917  imageCreateInfo.arrayLayers,
13918  imageCreateInfo.samples,
13919  imageCreateInfo.tiling,
13920  imageCreateInfo.usage,
13921  imageCreateInfo.sharingMode,
13922  imageCreateInfo.initialLayout,
13923  allocCreateInfo.flags,
13924  allocCreateInfo.usage,
13925  allocCreateInfo.requiredFlags,
13926  allocCreateInfo.preferredFlags,
13927  allocCreateInfo.memoryTypeBits,
13928  allocCreateInfo.pool,
13929  allocation,
13930  userDataStr.GetString());
13931  Flush();
13932 }
13933 
13934 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13935  VmaAllocation allocation)
13936 {
13937  CallParams callParams;
13938  GetBasicParams(callParams);
13939 
13940  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13941  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13942  allocation);
13943  Flush();
13944 }
13945 
13946 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
13947  VmaAllocation allocation)
13948 {
13949  CallParams callParams;
13950  GetBasicParams(callParams);
13951 
13952  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13953  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
13954  allocation);
13955  Flush();
13956 }
13957 
13958 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
13959  VmaAllocation allocation)
13960 {
13961  CallParams callParams;
13962  GetBasicParams(callParams);
13963 
13964  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13965  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13966  allocation);
13967  Flush();
13968 }
13969 
13970 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
13971  VmaAllocation allocation)
13972 {
13973  CallParams callParams;
13974  GetBasicParams(callParams);
13975 
13976  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13977  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
13978  allocation);
13979  Flush();
13980 }
13981 
13982 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
13983  VmaPool pool)
13984 {
13985  CallParams callParams;
13986  GetBasicParams(callParams);
13987 
13988  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13989  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
13990  pool);
13991  Flush();
13992 }
13993 
13994 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
13995  const VmaDefragmentationInfo2& info,
13997 {
13998  CallParams callParams;
13999  GetBasicParams(callParams);
14000 
14001  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14002  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
14003  info.flags);
14004  PrintPointerList(info.allocationCount, info.pAllocations);
14005  fprintf(m_File, ",");
14006  PrintPointerList(info.poolCount, info.pPools);
14007  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
14008  info.maxCpuBytesToMove,
14010  info.maxGpuBytesToMove,
14012  info.commandBuffer,
14013  ctx);
14014  Flush();
14015 }
14016 
14017 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
14019 {
14020  CallParams callParams;
14021  GetBasicParams(callParams);
14022 
14023  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14024  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
14025  ctx);
14026  Flush();
14027 }
14028 
14029 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
14030 {
14031  if(pUserData != VMA_NULL)
14032  {
14033  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
14034  {
14035  m_Str = (const char*)pUserData;
14036  }
14037  else
14038  {
14039  sprintf_s(m_PtrStr, "%p", pUserData);
14040  m_Str = m_PtrStr;
14041  }
14042  }
14043  else
14044  {
14045  m_Str = "";
14046  }
14047 }
14048 
14049 void VmaRecorder::WriteConfiguration(
14050  const VkPhysicalDeviceProperties& devProps,
14051  const VkPhysicalDeviceMemoryProperties& memProps,
14052  bool dedicatedAllocationExtensionEnabled)
14053 {
14054  fprintf(m_File, "Config,Begin\n");
14055 
14056  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
14057  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
14058  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
14059  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
14060  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
14061  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
14062 
14063  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
14064  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
14065  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
14066 
14067  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
14068  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
14069  {
14070  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
14071  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
14072  }
14073  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
14074  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
14075  {
14076  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
14077  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
14078  }
14079 
14080  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
14081 
14082  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
14083  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
14084  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
14085  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
14086  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
14087  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
14088  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
14089  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
14090  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14091 
14092  fprintf(m_File, "Config,End\n");
14093 }
14094 
14095 void VmaRecorder::GetBasicParams(CallParams& outParams)
14096 {
14097  outParams.threadId = GetCurrentThreadId();
14098 
14099  LARGE_INTEGER counter;
14100  QueryPerformanceCounter(&counter);
14101  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
14102 }
14103 
14104 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
14105 {
14106  if(count)
14107  {
14108  fprintf(m_File, "%p", pItems[0]);
14109  for(uint64_t i = 1; i < count; ++i)
14110  {
14111  fprintf(m_File, " %p", pItems[i]);
14112  }
14113  }
14114 }
14115 
14116 void VmaRecorder::Flush()
14117 {
14118  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
14119  {
14120  fflush(m_File);
14121  }
14122 }
14123 
14124 #endif // #if VMA_RECORDING_ENABLED
14125 
14127 // VmaAllocationObjectAllocator
14128 
14129 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
14130  m_Allocator(pAllocationCallbacks, 1024)
14131 {
14132 }
14133 
14134 VmaAllocation VmaAllocationObjectAllocator::Allocate()
14135 {
14136  VmaMutexLock mutexLock(m_Mutex);
14137  return m_Allocator.Alloc();
14138 }
14139 
14140 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
14141 {
14142  VmaMutexLock mutexLock(m_Mutex);
14143  m_Allocator.Free(hAlloc);
14144 }
14145 
14147 // VmaAllocator_T
14148 
14149 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
14150  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
14151  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
14152  m_hDevice(pCreateInfo->device),
14153  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
14154  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
14155  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
14156  m_AllocationObjectAllocator(&m_AllocationCallbacks),
14157  m_PreferredLargeHeapBlockSize(0),
14158  m_PhysicalDevice(pCreateInfo->physicalDevice),
14159  m_CurrentFrameIndex(0),
14160  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
14161  m_NextPoolId(0)
14163  ,m_pRecorder(VMA_NULL)
14164 #endif
14165 {
14166  if(VMA_DEBUG_DETECT_CORRUPTION)
14167  {
14168  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
14169  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
14170  }
14171 
14172  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
14173 
14174 #if !(VMA_DEDICATED_ALLOCATION)
14176  {
14177  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14178  }
14179 #endif
14180 
14181  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
14182  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14183  memset(&m_MemProps, 0, sizeof(m_MemProps));
14184 
14185  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14186  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
14187 
14188  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14189  {
14190  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
14191  }
14192 
14193  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14194  {
14195  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14196  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14197  }
14198 
14199  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14200 
14201  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14202  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14203 
14204  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14205  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14206  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14207  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14208 
14209  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14210  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14211 
14212  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14213  {
14214  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14215  {
14216  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14217  if(limit != VK_WHOLE_SIZE)
14218  {
14219  m_HeapSizeLimit[heapIndex] = limit;
14220  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14221  {
14222  m_MemProps.memoryHeaps[heapIndex].size = limit;
14223  }
14224  }
14225  }
14226  }
14227 
14228  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14229  {
14230  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14231 
14232  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14233  this,
14234  VK_NULL_HANDLE, // hParentPool
14235  memTypeIndex,
14236  preferredBlockSize,
14237  0,
14238  SIZE_MAX,
14239  GetBufferImageGranularity(),
14240  pCreateInfo->frameInUseCount,
14241  false, // isCustomPool
14242  false, // explicitBlockSize
14243  false); // linearAlgorithm
14244  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14245  // becase minBlockCount is 0.
14246  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14247 
14248  }
14249 }
14250 
14251 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14252 {
14253  VkResult res = VK_SUCCESS;
14254 
14255  if(pCreateInfo->pRecordSettings != VMA_NULL &&
14256  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14257  {
14258 #if VMA_RECORDING_ENABLED
14259  m_pRecorder = vma_new(this, VmaRecorder)();
14260  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14261  if(res != VK_SUCCESS)
14262  {
14263  return res;
14264  }
14265  m_pRecorder->WriteConfiguration(
14266  m_PhysicalDeviceProperties,
14267  m_MemProps,
14268  m_UseKhrDedicatedAllocation);
14269  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14270 #else
14271  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14272  return VK_ERROR_FEATURE_NOT_PRESENT;
14273 #endif
14274  }
14275 
14276  return res;
14277 }
14278 
14279 VmaAllocator_T::~VmaAllocator_T()
14280 {
14281 #if VMA_RECORDING_ENABLED
14282  if(m_pRecorder != VMA_NULL)
14283  {
14284  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14285  vma_delete(this, m_pRecorder);
14286  }
14287 #endif
14288 
14289  VMA_ASSERT(m_Pools.empty());
14290 
14291  for(size_t i = GetMemoryTypeCount(); i--; )
14292  {
14293  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
14294  {
14295  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
14296  }
14297 
14298  vma_delete(this, m_pDedicatedAllocations[i]);
14299  vma_delete(this, m_pBlockVectors[i]);
14300  }
14301 }
14302 
14303 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14304 {
14305 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14306  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
14307  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
14308  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
14309  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
14310  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
14311  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
14312  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
14313  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
14314  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
14315  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
14316  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
14317  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
14318  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
14319  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
14320  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
14321  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
14322  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
14323 #if VMA_DEDICATED_ALLOCATION
14324  if(m_UseKhrDedicatedAllocation)
14325  {
14326  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14327  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14328  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14329  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14330  }
14331 #endif // #if VMA_DEDICATED_ALLOCATION
14332 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14333 
14334 #define VMA_COPY_IF_NOT_NULL(funcName) \
14335  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14336 
14337  if(pVulkanFunctions != VMA_NULL)
14338  {
14339  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14340  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14341  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14342  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14343  VMA_COPY_IF_NOT_NULL(vkMapMemory);
14344  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14345  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14346  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14347  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14348  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14349  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14350  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14351  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14352  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14353  VMA_COPY_IF_NOT_NULL(vkCreateImage);
14354  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14355  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14356 #if VMA_DEDICATED_ALLOCATION
14357  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14358  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14359 #endif
14360  }
14361 
14362 #undef VMA_COPY_IF_NOT_NULL
14363 
14364  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14365  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14366  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14367  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14368  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14369  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14370  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14371  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14372  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14373  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14374  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14375  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14376  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14377  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14378  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14379  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14380  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14381  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14382  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14383 #if VMA_DEDICATED_ALLOCATION
14384  if(m_UseKhrDedicatedAllocation)
14385  {
14386  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14387  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14388  }
14389 #endif
14390 }
14391 
14392 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14393 {
14394  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14395  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14396  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14397  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
14398 }
14399 
14400 VkResult VmaAllocator_T::AllocateMemoryOfType(
14401  VkDeviceSize size,
14402  VkDeviceSize alignment,
14403  bool dedicatedAllocation,
14404  VkBuffer dedicatedBuffer,
14405  VkImage dedicatedImage,
14406  const VmaAllocationCreateInfo& createInfo,
14407  uint32_t memTypeIndex,
14408  VmaSuballocationType suballocType,
14409  size_t allocationCount,
14410  VmaAllocation* pAllocations)
14411 {
14412  VMA_ASSERT(pAllocations != VMA_NULL);
14413  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
14414 
14415  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14416 
14417  // If memory type is not HOST_VISIBLE, disable MAPPED.
14418  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14419  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14420  {
14421  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14422  }
14423 
14424  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14425  VMA_ASSERT(blockVector);
14426 
14427  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14428  bool preferDedicatedMemory =
14429  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14430  dedicatedAllocation ||
14431  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14432  size > preferredBlockSize / 2;
14433 
14434  if(preferDedicatedMemory &&
14435  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14436  finalCreateInfo.pool == VK_NULL_HANDLE)
14437  {
14439  }
14440 
14441  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14442  {
14443  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14444  {
14445  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14446  }
14447  else
14448  {
14449  return AllocateDedicatedMemory(
14450  size,
14451  suballocType,
14452  memTypeIndex,
14453  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14454  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14455  finalCreateInfo.pUserData,
14456  dedicatedBuffer,
14457  dedicatedImage,
14458  allocationCount,
14459  pAllocations);
14460  }
14461  }
14462  else
14463  {
14464  VkResult res = blockVector->Allocate(
14465  m_CurrentFrameIndex.load(),
14466  size,
14467  alignment,
14468  finalCreateInfo,
14469  suballocType,
14470  allocationCount,
14471  pAllocations);
14472  if(res == VK_SUCCESS)
14473  {
14474  return res;
14475  }
14476 
14477  // 5. Try dedicated memory.
14478  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14479  {
14480  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14481  }
14482  else
14483  {
14484  res = AllocateDedicatedMemory(
14485  size,
14486  suballocType,
14487  memTypeIndex,
14488  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14489  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14490  finalCreateInfo.pUserData,
14491  dedicatedBuffer,
14492  dedicatedImage,
14493  allocationCount,
14494  pAllocations);
14495  if(res == VK_SUCCESS)
14496  {
14497  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14498  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14499  return VK_SUCCESS;
14500  }
14501  else
14502  {
14503  // Everything failed: Return error code.
14504  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14505  return res;
14506  }
14507  }
14508  }
14509 }
14510 
14511 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14512  VkDeviceSize size,
14513  VmaSuballocationType suballocType,
14514  uint32_t memTypeIndex,
14515  bool map,
14516  bool isUserDataString,
14517  void* pUserData,
14518  VkBuffer dedicatedBuffer,
14519  VkImage dedicatedImage,
14520  size_t allocationCount,
14521  VmaAllocation* pAllocations)
14522 {
14523  VMA_ASSERT(allocationCount > 0 && pAllocations);
14524 
14525  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14526  allocInfo.memoryTypeIndex = memTypeIndex;
14527  allocInfo.allocationSize = size;
14528 
14529 #if VMA_DEDICATED_ALLOCATION
14530  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14531  if(m_UseKhrDedicatedAllocation)
14532  {
14533  if(dedicatedBuffer != VK_NULL_HANDLE)
14534  {
14535  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14536  dedicatedAllocInfo.buffer = dedicatedBuffer;
14537  allocInfo.pNext = &dedicatedAllocInfo;
14538  }
14539  else if(dedicatedImage != VK_NULL_HANDLE)
14540  {
14541  dedicatedAllocInfo.image = dedicatedImage;
14542  allocInfo.pNext = &dedicatedAllocInfo;
14543  }
14544  }
14545 #endif // #if VMA_DEDICATED_ALLOCATION
14546 
14547  size_t allocIndex;
14548  VkResult res = VK_SUCCESS;
14549  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14550  {
14551  res = AllocateDedicatedMemoryPage(
14552  size,
14553  suballocType,
14554  memTypeIndex,
14555  allocInfo,
14556  map,
14557  isUserDataString,
14558  pUserData,
14559  pAllocations + allocIndex);
14560  if(res != VK_SUCCESS)
14561  {
14562  break;
14563  }
14564  }
14565 
14566  if(res == VK_SUCCESS)
14567  {
14568  // Register them in m_pDedicatedAllocations.
14569  {
14570  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14571  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14572  VMA_ASSERT(pDedicatedAllocations);
14573  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14574  {
14575  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
14576  }
14577  }
14578 
14579  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14580  }
14581  else
14582  {
14583  // Free all already created allocations.
14584  while(allocIndex--)
14585  {
14586  VmaAllocation currAlloc = pAllocations[allocIndex];
14587  VkDeviceMemory hMemory = currAlloc->GetMemory();
14588 
14589  /*
14590  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14591  before vkFreeMemory.
14592 
14593  if(currAlloc->GetMappedData() != VMA_NULL)
14594  {
14595  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14596  }
14597  */
14598 
14599  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14600 
14601  currAlloc->SetUserData(this, VMA_NULL);
14602  currAlloc->Dtor();
14603  m_AllocationObjectAllocator.Free(currAlloc);
14604  }
14605 
14606  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14607  }
14608 
14609  return res;
14610 }
14611 
14612 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14613  VkDeviceSize size,
14614  VmaSuballocationType suballocType,
14615  uint32_t memTypeIndex,
14616  const VkMemoryAllocateInfo& allocInfo,
14617  bool map,
14618  bool isUserDataString,
14619  void* pUserData,
14620  VmaAllocation* pAllocation)
14621 {
14622  VkDeviceMemory hMemory = VK_NULL_HANDLE;
14623  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14624  if(res < 0)
14625  {
14626  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14627  return res;
14628  }
14629 
14630  void* pMappedData = VMA_NULL;
14631  if(map)
14632  {
14633  res = (*m_VulkanFunctions.vkMapMemory)(
14634  m_hDevice,
14635  hMemory,
14636  0,
14637  VK_WHOLE_SIZE,
14638  0,
14639  &pMappedData);
14640  if(res < 0)
14641  {
14642  VMA_DEBUG_LOG(" vkMapMemory FAILED");
14643  FreeVulkanMemory(memTypeIndex, size, hMemory);
14644  return res;
14645  }
14646  }
14647 
14648  *pAllocation = m_AllocationObjectAllocator.Allocate();
14649  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
14650  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14651  (*pAllocation)->SetUserData(this, pUserData);
14652  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14653  {
14654  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14655  }
14656 
14657  return VK_SUCCESS;
14658 }
14659 
14660 void VmaAllocator_T::GetBufferMemoryRequirements(
14661  VkBuffer hBuffer,
14662  VkMemoryRequirements& memReq,
14663  bool& requiresDedicatedAllocation,
14664  bool& prefersDedicatedAllocation) const
14665 {
14666 #if VMA_DEDICATED_ALLOCATION
14667  if(m_UseKhrDedicatedAllocation)
14668  {
14669  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14670  memReqInfo.buffer = hBuffer;
14671 
14672  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14673 
14674  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14675  memReq2.pNext = &memDedicatedReq;
14676 
14677  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14678 
14679  memReq = memReq2.memoryRequirements;
14680  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14681  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14682  }
14683  else
14684 #endif // #if VMA_DEDICATED_ALLOCATION
14685  {
14686  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14687  requiresDedicatedAllocation = false;
14688  prefersDedicatedAllocation = false;
14689  }
14690 }
14691 
14692 void VmaAllocator_T::GetImageMemoryRequirements(
14693  VkImage hImage,
14694  VkMemoryRequirements& memReq,
14695  bool& requiresDedicatedAllocation,
14696  bool& prefersDedicatedAllocation) const
14697 {
14698 #if VMA_DEDICATED_ALLOCATION
14699  if(m_UseKhrDedicatedAllocation)
14700  {
14701  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14702  memReqInfo.image = hImage;
14703 
14704  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14705 
14706  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14707  memReq2.pNext = &memDedicatedReq;
14708 
14709  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14710 
14711  memReq = memReq2.memoryRequirements;
14712  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14713  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14714  }
14715  else
14716 #endif // #if VMA_DEDICATED_ALLOCATION
14717  {
14718  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14719  requiresDedicatedAllocation = false;
14720  prefersDedicatedAllocation = false;
14721  }
14722 }
14723 
14724 VkResult VmaAllocator_T::AllocateMemory(
14725  const VkMemoryRequirements& vkMemReq,
14726  bool requiresDedicatedAllocation,
14727  bool prefersDedicatedAllocation,
14728  VkBuffer dedicatedBuffer,
14729  VkImage dedicatedImage,
14730  const VmaAllocationCreateInfo& createInfo,
14731  VmaSuballocationType suballocType,
14732  size_t allocationCount,
14733  VmaAllocation* pAllocations)
14734 {
14735  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14736 
14737  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14738 
14739  if(vkMemReq.size == 0)
14740  {
14741  return VK_ERROR_VALIDATION_FAILED_EXT;
14742  }
14743  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14744  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14745  {
14746  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14747  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14748  }
14749  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14751  {
14752  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14753  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14754  }
14755  if(requiresDedicatedAllocation)
14756  {
14757  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14758  {
14759  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14760  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14761  }
14762  if(createInfo.pool != VK_NULL_HANDLE)
14763  {
14764  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
14765  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14766  }
14767  }
14768  if((createInfo.pool != VK_NULL_HANDLE) &&
14769  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
14770  {
14771  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14772  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14773  }
14774 
14775  if(createInfo.pool != VK_NULL_HANDLE)
14776  {
14777  const VkDeviceSize alignmentForPool = VMA_MAX(
14778  vkMemReq.alignment,
14779  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
14780  return createInfo.pool->m_BlockVector.Allocate(
14781  m_CurrentFrameIndex.load(),
14782  vkMemReq.size,
14783  alignmentForPool,
14784  createInfo,
14785  suballocType,
14786  allocationCount,
14787  pAllocations);
14788  }
14789  else
14790  {
14791  // Bit mask of memory Vulkan types acceptable for this allocation.
14792  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14793  uint32_t memTypeIndex = UINT32_MAX;
14794  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14795  if(res == VK_SUCCESS)
14796  {
14797  VkDeviceSize alignmentForMemType = VMA_MAX(
14798  vkMemReq.alignment,
14799  GetMemoryTypeMinAlignment(memTypeIndex));
14800 
14801  res = AllocateMemoryOfType(
14802  vkMemReq.size,
14803  alignmentForMemType,
14804  requiresDedicatedAllocation || prefersDedicatedAllocation,
14805  dedicatedBuffer,
14806  dedicatedImage,
14807  createInfo,
14808  memTypeIndex,
14809  suballocType,
14810  allocationCount,
14811  pAllocations);
14812  // Succeeded on first try.
14813  if(res == VK_SUCCESS)
14814  {
14815  return res;
14816  }
14817  // Allocation from this memory type failed. Try other compatible memory types.
14818  else
14819  {
14820  for(;;)
14821  {
14822  // Remove old memTypeIndex from list of possibilities.
14823  memoryTypeBits &= ~(1u << memTypeIndex);
14824  // Find alternative memTypeIndex.
14825  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14826  if(res == VK_SUCCESS)
14827  {
14828  alignmentForMemType = VMA_MAX(
14829  vkMemReq.alignment,
14830  GetMemoryTypeMinAlignment(memTypeIndex));
14831 
14832  res = AllocateMemoryOfType(
14833  vkMemReq.size,
14834  alignmentForMemType,
14835  requiresDedicatedAllocation || prefersDedicatedAllocation,
14836  dedicatedBuffer,
14837  dedicatedImage,
14838  createInfo,
14839  memTypeIndex,
14840  suballocType,
14841  allocationCount,
14842  pAllocations);
14843  // Allocation from this alternative memory type succeeded.
14844  if(res == VK_SUCCESS)
14845  {
14846  return res;
14847  }
14848  // else: Allocation from this memory type failed. Try next one - next loop iteration.
14849  }
14850  // No other matching memory type index could be found.
14851  else
14852  {
14853  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
14854  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14855  }
14856  }
14857  }
14858  }
14859  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
14860  else
14861  return res;
14862  }
14863 }
14864 
14865 void VmaAllocator_T::FreeMemory(
14866  size_t allocationCount,
14867  const VmaAllocation* pAllocations)
14868 {
14869  VMA_ASSERT(pAllocations);
14870 
14871  for(size_t allocIndex = allocationCount; allocIndex--; )
14872  {
14873  VmaAllocation allocation = pAllocations[allocIndex];
14874 
14875  if(allocation != VK_NULL_HANDLE)
14876  {
14877  if(TouchAllocation(allocation))
14878  {
14879  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14880  {
14881  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14882  }
14883 
14884  switch(allocation->GetType())
14885  {
14886  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14887  {
14888  VmaBlockVector* pBlockVector = VMA_NULL;
14889  VmaPool hPool = allocation->GetBlock()->GetParentPool();
14890  if(hPool != VK_NULL_HANDLE)
14891  {
14892  pBlockVector = &hPool->m_BlockVector;
14893  }
14894  else
14895  {
14896  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14897  pBlockVector = m_pBlockVectors[memTypeIndex];
14898  }
14899  pBlockVector->Free(allocation);
14900  }
14901  break;
14902  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14903  FreeDedicatedMemory(allocation);
14904  break;
14905  default:
14906  VMA_ASSERT(0);
14907  }
14908  }
14909 
14910  allocation->SetUserData(this, VMA_NULL);
14911  allocation->Dtor();
14912  m_AllocationObjectAllocator.Free(allocation);
14913  }
14914  }
14915 }
14916 
14917 VkResult VmaAllocator_T::ResizeAllocation(
14918  const VmaAllocation alloc,
14919  VkDeviceSize newSize)
14920 {
14921  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14922  {
14923  return VK_ERROR_VALIDATION_FAILED_EXT;
14924  }
14925  if(newSize == alloc->GetSize())
14926  {
14927  return VK_SUCCESS;
14928  }
14929 
14930  switch(alloc->GetType())
14931  {
14932  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14933  return VK_ERROR_FEATURE_NOT_PRESENT;
14934  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14935  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
14936  {
14937  alloc->ChangeSize(newSize);
14938  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
14939  return VK_SUCCESS;
14940  }
14941  else
14942  {
14943  return VK_ERROR_OUT_OF_POOL_MEMORY;
14944  }
14945  default:
14946  VMA_ASSERT(0);
14947  return VK_ERROR_VALIDATION_FAILED_EXT;
14948  }
14949 }
14950 
14951 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
14952 {
14953  // Initialize.
14954  InitStatInfo(pStats->total);
14955  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
14956  InitStatInfo(pStats->memoryType[i]);
14957  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14958  InitStatInfo(pStats->memoryHeap[i]);
14959 
14960  // Process default pools.
14961  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14962  {
14963  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14964  VMA_ASSERT(pBlockVector);
14965  pBlockVector->AddStats(pStats);
14966  }
14967 
14968  // Process custom pools.
14969  {
14970  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14971  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14972  {
14973  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
14974  }
14975  }
14976 
14977  // Process dedicated allocations.
14978  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14979  {
14980  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14981  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14982  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
14983  VMA_ASSERT(pDedicatedAllocVector);
14984  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
14985  {
14986  VmaStatInfo allocationStatInfo;
14987  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
14988  VmaAddStatInfo(pStats->total, allocationStatInfo);
14989  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
14990  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
14991  }
14992  }
14993 
14994  // Postprocess.
14995  VmaPostprocessCalcStatInfo(pStats->total);
14996  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
14997  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
14998  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
14999  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
15000 }
15001 
15002 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
15003 
15004 VkResult VmaAllocator_T::DefragmentationBegin(
15005  const VmaDefragmentationInfo2& info,
15006  VmaDefragmentationStats* pStats,
15007  VmaDefragmentationContext* pContext)
15008 {
15009  if(info.pAllocationsChanged != VMA_NULL)
15010  {
15011  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
15012  }
15013 
15014  *pContext = vma_new(this, VmaDefragmentationContext_T)(
15015  this, m_CurrentFrameIndex.load(), info.flags, pStats);
15016 
15017  (*pContext)->AddPools(info.poolCount, info.pPools);
15018  (*pContext)->AddAllocations(
15020 
15021  VkResult res = (*pContext)->Defragment(
15024  info.commandBuffer, pStats);
15025 
15026  if(res != VK_NOT_READY)
15027  {
15028  vma_delete(this, *pContext);
15029  *pContext = VMA_NULL;
15030  }
15031 
15032  return res;
15033 }
15034 
15035 VkResult VmaAllocator_T::DefragmentationEnd(
15036  VmaDefragmentationContext context)
15037 {
15038  vma_delete(this, context);
15039  return VK_SUCCESS;
15040 }
15041 
15042 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
15043 {
15044  if(hAllocation->CanBecomeLost())
15045  {
15046  /*
15047  Warning: This is a carefully designed algorithm.
15048  Do not modify unless you really know what you're doing :)
15049  */
15050  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15051  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15052  for(;;)
15053  {
15054  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15055  {
15056  pAllocationInfo->memoryType = UINT32_MAX;
15057  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
15058  pAllocationInfo->offset = 0;
15059  pAllocationInfo->size = hAllocation->GetSize();
15060  pAllocationInfo->pMappedData = VMA_NULL;
15061  pAllocationInfo->pUserData = hAllocation->GetUserData();
15062  return;
15063  }
15064  else if(localLastUseFrameIndex == localCurrFrameIndex)
15065  {
15066  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15067  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15068  pAllocationInfo->offset = hAllocation->GetOffset();
15069  pAllocationInfo->size = hAllocation->GetSize();
15070  pAllocationInfo->pMappedData = VMA_NULL;
15071  pAllocationInfo->pUserData = hAllocation->GetUserData();
15072  return;
15073  }
15074  else // Last use time earlier than current time.
15075  {
15076  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15077  {
15078  localLastUseFrameIndex = localCurrFrameIndex;
15079  }
15080  }
15081  }
15082  }
15083  else
15084  {
15085 #if VMA_STATS_STRING_ENABLED
15086  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15087  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15088  for(;;)
15089  {
15090  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15091  if(localLastUseFrameIndex == localCurrFrameIndex)
15092  {
15093  break;
15094  }
15095  else // Last use time earlier than current time.
15096  {
15097  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15098  {
15099  localLastUseFrameIndex = localCurrFrameIndex;
15100  }
15101  }
15102  }
15103 #endif
15104 
15105  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15106  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15107  pAllocationInfo->offset = hAllocation->GetOffset();
15108  pAllocationInfo->size = hAllocation->GetSize();
15109  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
15110  pAllocationInfo->pUserData = hAllocation->GetUserData();
15111  }
15112 }
15113 
15114 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
15115 {
15116  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
15117  if(hAllocation->CanBecomeLost())
15118  {
15119  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15120  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15121  for(;;)
15122  {
15123  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15124  {
15125  return false;
15126  }
15127  else if(localLastUseFrameIndex == localCurrFrameIndex)
15128  {
15129  return true;
15130  }
15131  else // Last use time earlier than current time.
15132  {
15133  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15134  {
15135  localLastUseFrameIndex = localCurrFrameIndex;
15136  }
15137  }
15138  }
15139  }
15140  else
15141  {
15142 #if VMA_STATS_STRING_ENABLED
15143  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15144  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15145  for(;;)
15146  {
15147  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15148  if(localLastUseFrameIndex == localCurrFrameIndex)
15149  {
15150  break;
15151  }
15152  else // Last use time earlier than current time.
15153  {
15154  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15155  {
15156  localLastUseFrameIndex = localCurrFrameIndex;
15157  }
15158  }
15159  }
15160 #endif
15161 
15162  return true;
15163  }
15164 }
15165 
15166 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
15167 {
15168  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
15169 
15170  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
15171 
15172  if(newCreateInfo.maxBlockCount == 0)
15173  {
15174  newCreateInfo.maxBlockCount = SIZE_MAX;
15175  }
15176  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
15177  {
15178  return VK_ERROR_INITIALIZATION_FAILED;
15179  }
15180 
15181  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
15182 
15183  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
15184 
15185  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15186  if(res != VK_SUCCESS)
15187  {
15188  vma_delete(this, *pPool);
15189  *pPool = VMA_NULL;
15190  return res;
15191  }
15192 
15193  // Add to m_Pools.
15194  {
15195  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15196  (*pPool)->SetId(m_NextPoolId++);
15197  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
15198  }
15199 
15200  return VK_SUCCESS;
15201 }
15202 
15203 void VmaAllocator_T::DestroyPool(VmaPool pool)
15204 {
15205  // Remove from m_Pools.
15206  {
15207  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15208  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15209  VMA_ASSERT(success && "Pool not found in Allocator.");
15210  }
15211 
15212  vma_delete(this, pool);
15213 }
15214 
15215 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15216 {
15217  pool->m_BlockVector.GetPoolStats(pPoolStats);
15218 }
15219 
15220 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15221 {
15222  m_CurrentFrameIndex.store(frameIndex);
15223 }
15224 
15225 void VmaAllocator_T::MakePoolAllocationsLost(
15226  VmaPool hPool,
15227  size_t* pLostAllocationCount)
15228 {
15229  hPool->m_BlockVector.MakePoolAllocationsLost(
15230  m_CurrentFrameIndex.load(),
15231  pLostAllocationCount);
15232 }
15233 
15234 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15235 {
15236  return hPool->m_BlockVector.CheckCorruption();
15237 }
15238 
15239 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15240 {
15241  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15242 
15243  // Process default pools.
15244  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15245  {
15246  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15247  {
15248  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15249  VMA_ASSERT(pBlockVector);
15250  VkResult localRes = pBlockVector->CheckCorruption();
15251  switch(localRes)
15252  {
15253  case VK_ERROR_FEATURE_NOT_PRESENT:
15254  break;
15255  case VK_SUCCESS:
15256  finalRes = VK_SUCCESS;
15257  break;
15258  default:
15259  return localRes;
15260  }
15261  }
15262  }
15263 
15264  // Process custom pools.
15265  {
15266  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15267  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15268  {
15269  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15270  {
15271  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15272  switch(localRes)
15273  {
15274  case VK_ERROR_FEATURE_NOT_PRESENT:
15275  break;
15276  case VK_SUCCESS:
15277  finalRes = VK_SUCCESS;
15278  break;
15279  default:
15280  return localRes;
15281  }
15282  }
15283  }
15284  }
15285 
15286  return finalRes;
15287 }
15288 
15289 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15290 {
15291  *pAllocation = m_AllocationObjectAllocator.Allocate();
15292  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
15293  (*pAllocation)->InitLost();
15294 }
15295 
15296 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15297 {
15298  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15299 
15300  VkResult res;
15301  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15302  {
15303  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15304  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
15305  {
15306  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15307  if(res == VK_SUCCESS)
15308  {
15309  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
15310  }
15311  }
15312  else
15313  {
15314  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
15315  }
15316  }
15317  else
15318  {
15319  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15320  }
15321 
15322  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15323  {
15324  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15325  }
15326 
15327  return res;
15328 }
15329 
15330 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15331 {
15332  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15333  {
15334  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15335  }
15336 
15337  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15338 
15339  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15340  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15341  {
15342  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15343  m_HeapSizeLimit[heapIndex] += size;
15344  }
15345 }
15346 
15347 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15348 {
15349  if(hAllocation->CanBecomeLost())
15350  {
15351  return VK_ERROR_MEMORY_MAP_FAILED;
15352  }
15353 
15354  switch(hAllocation->GetType())
15355  {
15356  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15357  {
15358  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15359  char *pBytes = VMA_NULL;
15360  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15361  if(res == VK_SUCCESS)
15362  {
15363  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15364  hAllocation->BlockAllocMap();
15365  }
15366  return res;
15367  }
15368  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15369  return hAllocation->DedicatedAllocMap(this, ppData);
15370  default:
15371  VMA_ASSERT(0);
15372  return VK_ERROR_MEMORY_MAP_FAILED;
15373  }
15374 }
15375 
15376 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15377 {
15378  switch(hAllocation->GetType())
15379  {
15380  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15381  {
15382  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15383  hAllocation->BlockAllocUnmap();
15384  pBlock->Unmap(this, 1);
15385  }
15386  break;
15387  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15388  hAllocation->DedicatedAllocUnmap(this);
15389  break;
15390  default:
15391  VMA_ASSERT(0);
15392  }
15393 }
15394 
15395 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
15396 {
15397  VkResult res = VK_SUCCESS;
15398  switch(hAllocation->GetType())
15399  {
15400  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15401  res = GetVulkanFunctions().vkBindBufferMemory(
15402  m_hDevice,
15403  hBuffer,
15404  hAllocation->GetMemory(),
15405  0); //memoryOffset
15406  break;
15407  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15408  {
15409  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15410  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
15411  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
15412  break;
15413  }
15414  default:
15415  VMA_ASSERT(0);
15416  }
15417  return res;
15418 }
15419 
15420 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
15421 {
15422  VkResult res = VK_SUCCESS;
15423  switch(hAllocation->GetType())
15424  {
15425  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15426  res = GetVulkanFunctions().vkBindImageMemory(
15427  m_hDevice,
15428  hImage,
15429  hAllocation->GetMemory(),
15430  0); //memoryOffset
15431  break;
15432  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15433  {
15434  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15435  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
15436  res = pBlock->BindImageMemory(this, hAllocation, hImage);
15437  break;
15438  }
15439  default:
15440  VMA_ASSERT(0);
15441  }
15442  return res;
15443 }
15444 
15445 void VmaAllocator_T::FlushOrInvalidateAllocation(
15446  VmaAllocation hAllocation,
15447  VkDeviceSize offset, VkDeviceSize size,
15448  VMA_CACHE_OPERATION op)
15449 {
15450  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
15451  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15452  {
15453  const VkDeviceSize allocationSize = hAllocation->GetSize();
15454  VMA_ASSERT(offset <= allocationSize);
15455 
15456  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15457 
15458  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
15459  memRange.memory = hAllocation->GetMemory();
15460 
15461  switch(hAllocation->GetType())
15462  {
15463  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15464  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15465  if(size == VK_WHOLE_SIZE)
15466  {
15467  memRange.size = allocationSize - memRange.offset;
15468  }
15469  else
15470  {
15471  VMA_ASSERT(offset + size <= allocationSize);
15472  memRange.size = VMA_MIN(
15473  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
15474  allocationSize - memRange.offset);
15475  }
15476  break;
15477 
15478  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15479  {
15480  // 1. Still within this allocation.
15481  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15482  if(size == VK_WHOLE_SIZE)
15483  {
15484  size = allocationSize - offset;
15485  }
15486  else
15487  {
15488  VMA_ASSERT(offset + size <= allocationSize);
15489  }
15490  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
15491 
15492  // 2. Adjust to whole block.
15493  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
15494  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15495  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
15496  memRange.offset += allocationOffset;
15497  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
15498 
15499  break;
15500  }
15501 
15502  default:
15503  VMA_ASSERT(0);
15504  }
15505 
15506  switch(op)
15507  {
15508  case VMA_CACHE_FLUSH:
15509  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15510  break;
15511  case VMA_CACHE_INVALIDATE:
15512  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15513  break;
15514  default:
15515  VMA_ASSERT(0);
15516  }
15517  }
15518  // else: Just ignore this call.
15519 }
15520 
15521 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
15522 {
15523  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15524 
15525  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15526  {
15527  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15528  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15529  VMA_ASSERT(pDedicatedAllocations);
15530  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
15531  VMA_ASSERT(success);
15532  }
15533 
15534  VkDeviceMemory hMemory = allocation->GetMemory();
15535 
15536  /*
15537  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15538  before vkFreeMemory.
15539 
15540  if(allocation->GetMappedData() != VMA_NULL)
15541  {
15542  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15543  }
15544  */
15545 
15546  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15547 
15548  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15549 }
15550 
15551 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15552 {
15553  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15554  !hAllocation->CanBecomeLost() &&
15555  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15556  {
15557  void* pData = VMA_NULL;
15558  VkResult res = Map(hAllocation, &pData);
15559  if(res == VK_SUCCESS)
15560  {
15561  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15562  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15563  Unmap(hAllocation);
15564  }
15565  else
15566  {
15567  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15568  }
15569  }
15570 }
15571 
15572 #if VMA_STATS_STRING_ENABLED
15573 
15574 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15575 {
15576  bool dedicatedAllocationsStarted = false;
15577  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15578  {
15579  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15580  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15581  VMA_ASSERT(pDedicatedAllocVector);
15582  if(pDedicatedAllocVector->empty() == false)
15583  {
15584  if(dedicatedAllocationsStarted == false)
15585  {
15586  dedicatedAllocationsStarted = true;
15587  json.WriteString("DedicatedAllocations");
15588  json.BeginObject();
15589  }
15590 
15591  json.BeginString("Type ");
15592  json.ContinueString(memTypeIndex);
15593  json.EndString();
15594 
15595  json.BeginArray();
15596 
15597  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15598  {
15599  json.BeginObject(true);
15600  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
15601  hAlloc->PrintParameters(json);
15602  json.EndObject();
15603  }
15604 
15605  json.EndArray();
15606  }
15607  }
15608  if(dedicatedAllocationsStarted)
15609  {
15610  json.EndObject();
15611  }
15612 
15613  {
15614  bool allocationsStarted = false;
15615  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15616  {
15617  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
15618  {
15619  if(allocationsStarted == false)
15620  {
15621  allocationsStarted = true;
15622  json.WriteString("DefaultPools");
15623  json.BeginObject();
15624  }
15625 
15626  json.BeginString("Type ");
15627  json.ContinueString(memTypeIndex);
15628  json.EndString();
15629 
15630  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15631  }
15632  }
15633  if(allocationsStarted)
15634  {
15635  json.EndObject();
15636  }
15637  }
15638 
15639  // Custom pools
15640  {
15641  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15642  const size_t poolCount = m_Pools.size();
15643  if(poolCount > 0)
15644  {
15645  json.WriteString("Pools");
15646  json.BeginObject();
15647  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15648  {
15649  json.BeginString();
15650  json.ContinueString(m_Pools[poolIndex]->GetId());
15651  json.EndString();
15652 
15653  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15654  }
15655  json.EndObject();
15656  }
15657  }
15658 }
15659 
15660 #endif // #if VMA_STATS_STRING_ENABLED
15661 
15663 // Public interface
15664 
15665 VkResult vmaCreateAllocator(
15666  const VmaAllocatorCreateInfo* pCreateInfo,
15667  VmaAllocator* pAllocator)
15668 {
15669  VMA_ASSERT(pCreateInfo && pAllocator);
15670  VMA_DEBUG_LOG("vmaCreateAllocator");
15671  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
15672  return (*pAllocator)->Init(pCreateInfo);
15673 }
15674 
15675 void vmaDestroyAllocator(
15676  VmaAllocator allocator)
15677 {
15678  if(allocator != VK_NULL_HANDLE)
15679  {
15680  VMA_DEBUG_LOG("vmaDestroyAllocator");
15681  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15682  vma_delete(&allocationCallbacks, allocator);
15683  }
15684 }
15685 
15687  VmaAllocator allocator,
15688  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15689 {
15690  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15691  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15692 }
15693 
15695  VmaAllocator allocator,
15696  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15697 {
15698  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15699  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15700 }
15701 
15703  VmaAllocator allocator,
15704  uint32_t memoryTypeIndex,
15705  VkMemoryPropertyFlags* pFlags)
15706 {
15707  VMA_ASSERT(allocator && pFlags);
15708  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15709  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15710 }
15711 
15713  VmaAllocator allocator,
15714  uint32_t frameIndex)
15715 {
15716  VMA_ASSERT(allocator);
15717  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15718 
15719  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15720 
15721  allocator->SetCurrentFrameIndex(frameIndex);
15722 }
15723 
15724 void vmaCalculateStats(
15725  VmaAllocator allocator,
15726  VmaStats* pStats)
15727 {
15728  VMA_ASSERT(allocator && pStats);
15729  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15730  allocator->CalculateStats(pStats);
15731 }
15732 
15733 #if VMA_STATS_STRING_ENABLED
15734 
15735 void vmaBuildStatsString(
15736  VmaAllocator allocator,
15737  char** ppStatsString,
15738  VkBool32 detailedMap)
15739 {
15740  VMA_ASSERT(allocator && ppStatsString);
15741  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15742 
15743  VmaStringBuilder sb(allocator);
15744  {
15745  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15746  json.BeginObject();
15747 
15748  VmaStats stats;
15749  allocator->CalculateStats(&stats);
15750 
15751  json.WriteString("Total");
15752  VmaPrintStatInfo(json, stats.total);
15753 
15754  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15755  {
15756  json.BeginString("Heap ");
15757  json.ContinueString(heapIndex);
15758  json.EndString();
15759  json.BeginObject();
15760 
15761  json.WriteString("Size");
15762  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15763 
15764  json.WriteString("Flags");
15765  json.BeginArray(true);
15766  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15767  {
15768  json.WriteString("DEVICE_LOCAL");
15769  }
15770  json.EndArray();
15771 
15772  if(stats.memoryHeap[heapIndex].blockCount > 0)
15773  {
15774  json.WriteString("Stats");
15775  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
15776  }
15777 
15778  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15779  {
15780  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15781  {
15782  json.BeginString("Type ");
15783  json.ContinueString(typeIndex);
15784  json.EndString();
15785 
15786  json.BeginObject();
15787 
15788  json.WriteString("Flags");
15789  json.BeginArray(true);
15790  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15791  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15792  {
15793  json.WriteString("DEVICE_LOCAL");
15794  }
15795  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15796  {
15797  json.WriteString("HOST_VISIBLE");
15798  }
15799  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15800  {
15801  json.WriteString("HOST_COHERENT");
15802  }
15803  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15804  {
15805  json.WriteString("HOST_CACHED");
15806  }
15807  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15808  {
15809  json.WriteString("LAZILY_ALLOCATED");
15810  }
15811  json.EndArray();
15812 
15813  if(stats.memoryType[typeIndex].blockCount > 0)
15814  {
15815  json.WriteString("Stats");
15816  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
15817  }
15818 
15819  json.EndObject();
15820  }
15821  }
15822 
15823  json.EndObject();
15824  }
15825  if(detailedMap == VK_TRUE)
15826  {
15827  allocator->PrintDetailedMap(json);
15828  }
15829 
15830  json.EndObject();
15831  }
15832 
15833  const size_t len = sb.GetLength();
15834  char* const pChars = vma_new_array(allocator, char, len + 1);
15835  if(len > 0)
15836  {
15837  memcpy(pChars, sb.GetData(), len);
15838  }
15839  pChars[len] = '\0';
15840  *ppStatsString = pChars;
15841 }
15842 
15843 void vmaFreeStatsString(
15844  VmaAllocator allocator,
15845  char* pStatsString)
15846 {
15847  if(pStatsString != VMA_NULL)
15848  {
15849  VMA_ASSERT(allocator);
15850  size_t len = strlen(pStatsString);
15851  vma_delete_array(allocator, pStatsString, len + 1);
15852  }
15853 }
15854 
15855 #endif // #if VMA_STATS_STRING_ENABLED
15856 
15857 /*
15858 This function is not protected by any mutex because it just reads immutable data.
15859 */
15860 VkResult vmaFindMemoryTypeIndex(
15861  VmaAllocator allocator,
15862  uint32_t memoryTypeBits,
15863  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15864  uint32_t* pMemoryTypeIndex)
15865 {
15866  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15867  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15868  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15869 
15870  if(pAllocationCreateInfo->memoryTypeBits != 0)
15871  {
15872  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
15873  }
15874 
15875  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
15876  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
15877 
15878  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
15879  if(mapped)
15880  {
15881  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15882  }
15883 
15884  // Convert usage to requiredFlags and preferredFlags.
15885  switch(pAllocationCreateInfo->usage)
15886  {
15888  break;
15890  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15891  {
15892  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15893  }
15894  break;
15896  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15897  break;
15899  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15900  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15901  {
15902  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15903  }
15904  break;
15906  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15907  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
15908  break;
15909  default:
15910  break;
15911  }
15912 
15913  *pMemoryTypeIndex = UINT32_MAX;
15914  uint32_t minCost = UINT32_MAX;
15915  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
15916  memTypeIndex < allocator->GetMemoryTypeCount();
15917  ++memTypeIndex, memTypeBit <<= 1)
15918  {
15919  // This memory type is acceptable according to memoryTypeBits bitmask.
15920  if((memTypeBit & memoryTypeBits) != 0)
15921  {
15922  const VkMemoryPropertyFlags currFlags =
15923  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
15924  // This memory type contains requiredFlags.
15925  if((requiredFlags & ~currFlags) == 0)
15926  {
15927  // Calculate cost as number of bits from preferredFlags not present in this memory type.
15928  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
15929  // Remember memory type with lowest cost.
15930  if(currCost < minCost)
15931  {
15932  *pMemoryTypeIndex = memTypeIndex;
15933  if(currCost == 0)
15934  {
15935  return VK_SUCCESS;
15936  }
15937  minCost = currCost;
15938  }
15939  }
15940  }
15941  }
15942  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
15943 }
15944 
15946  VmaAllocator allocator,
15947  const VkBufferCreateInfo* pBufferCreateInfo,
15948  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15949  uint32_t* pMemoryTypeIndex)
15950 {
15951  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15952  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
15953  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15954  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15955 
15956  const VkDevice hDev = allocator->m_hDevice;
15957  VkBuffer hBuffer = VK_NULL_HANDLE;
15958  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
15959  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
15960  if(res == VK_SUCCESS)
15961  {
15962  VkMemoryRequirements memReq = {};
15963  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
15964  hDev, hBuffer, &memReq);
15965 
15966  res = vmaFindMemoryTypeIndex(
15967  allocator,
15968  memReq.memoryTypeBits,
15969  pAllocationCreateInfo,
15970  pMemoryTypeIndex);
15971 
15972  allocator->GetVulkanFunctions().vkDestroyBuffer(
15973  hDev, hBuffer, allocator->GetAllocationCallbacks());
15974  }
15975  return res;
15976 }
15977 
15979  VmaAllocator allocator,
15980  const VkImageCreateInfo* pImageCreateInfo,
15981  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15982  uint32_t* pMemoryTypeIndex)
15983 {
15984  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15985  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
15986  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15987  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15988 
15989  const VkDevice hDev = allocator->m_hDevice;
15990  VkImage hImage = VK_NULL_HANDLE;
15991  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
15992  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
15993  if(res == VK_SUCCESS)
15994  {
15995  VkMemoryRequirements memReq = {};
15996  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
15997  hDev, hImage, &memReq);
15998 
15999  res = vmaFindMemoryTypeIndex(
16000  allocator,
16001  memReq.memoryTypeBits,
16002  pAllocationCreateInfo,
16003  pMemoryTypeIndex);
16004 
16005  allocator->GetVulkanFunctions().vkDestroyImage(
16006  hDev, hImage, allocator->GetAllocationCallbacks());
16007  }
16008  return res;
16009 }
16010 
16011 VkResult vmaCreatePool(
16012  VmaAllocator allocator,
16013  const VmaPoolCreateInfo* pCreateInfo,
16014  VmaPool* pPool)
16015 {
16016  VMA_ASSERT(allocator && pCreateInfo && pPool);
16017 
16018  VMA_DEBUG_LOG("vmaCreatePool");
16019 
16020  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16021 
16022  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
16023 
16024 #if VMA_RECORDING_ENABLED
16025  if(allocator->GetRecorder() != VMA_NULL)
16026  {
16027  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
16028  }
16029 #endif
16030 
16031  return res;
16032 }
16033 
16034 void vmaDestroyPool(
16035  VmaAllocator allocator,
16036  VmaPool pool)
16037 {
16038  VMA_ASSERT(allocator);
16039 
16040  if(pool == VK_NULL_HANDLE)
16041  {
16042  return;
16043  }
16044 
16045  VMA_DEBUG_LOG("vmaDestroyPool");
16046 
16047  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16048 
16049 #if VMA_RECORDING_ENABLED
16050  if(allocator->GetRecorder() != VMA_NULL)
16051  {
16052  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
16053  }
16054 #endif
16055 
16056  allocator->DestroyPool(pool);
16057 }
16058 
16059 void vmaGetPoolStats(
16060  VmaAllocator allocator,
16061  VmaPool pool,
16062  VmaPoolStats* pPoolStats)
16063 {
16064  VMA_ASSERT(allocator && pool && pPoolStats);
16065 
16066  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16067 
16068  allocator->GetPoolStats(pool, pPoolStats);
16069 }
16070 
16072  VmaAllocator allocator,
16073  VmaPool pool,
16074  size_t* pLostAllocationCount)
16075 {
16076  VMA_ASSERT(allocator && pool);
16077 
16078  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16079 
16080 #if VMA_RECORDING_ENABLED
16081  if(allocator->GetRecorder() != VMA_NULL)
16082  {
16083  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
16084  }
16085 #endif
16086 
16087  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
16088 }
16089 
16090 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
16091 {
16092  VMA_ASSERT(allocator && pool);
16093 
16094  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16095 
16096  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
16097 
16098  return allocator->CheckPoolCorruption(pool);
16099 }
16100 
16101 VkResult vmaAllocateMemory(
16102  VmaAllocator allocator,
16103  const VkMemoryRequirements* pVkMemoryRequirements,
16104  const VmaAllocationCreateInfo* pCreateInfo,
16105  VmaAllocation* pAllocation,
16106  VmaAllocationInfo* pAllocationInfo)
16107 {
16108  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
16109 
16110  VMA_DEBUG_LOG("vmaAllocateMemory");
16111 
16112  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16113 
16114  VkResult result = allocator->AllocateMemory(
16115  *pVkMemoryRequirements,
16116  false, // requiresDedicatedAllocation
16117  false, // prefersDedicatedAllocation
16118  VK_NULL_HANDLE, // dedicatedBuffer
16119  VK_NULL_HANDLE, // dedicatedImage
16120  *pCreateInfo,
16121  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16122  1, // allocationCount
16123  pAllocation);
16124 
16125 #if VMA_RECORDING_ENABLED
16126  if(allocator->GetRecorder() != VMA_NULL)
16127  {
16128  allocator->GetRecorder()->RecordAllocateMemory(
16129  allocator->GetCurrentFrameIndex(),
16130  *pVkMemoryRequirements,
16131  *pCreateInfo,
16132  *pAllocation);
16133  }
16134 #endif
16135 
16136  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16137  {
16138  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16139  }
16140 
16141  return result;
16142 }
16143 
16144 VkResult vmaAllocateMemoryPages(
16145  VmaAllocator allocator,
16146  const VkMemoryRequirements* pVkMemoryRequirements,
16147  const VmaAllocationCreateInfo* pCreateInfo,
16148  size_t allocationCount,
16149  VmaAllocation* pAllocations,
16150  VmaAllocationInfo* pAllocationInfo)
16151 {
16152  if(allocationCount == 0)
16153  {
16154  return VK_SUCCESS;
16155  }
16156 
16157  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
16158 
16159  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
16160 
16161  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16162 
16163  VkResult result = allocator->AllocateMemory(
16164  *pVkMemoryRequirements,
16165  false, // requiresDedicatedAllocation
16166  false, // prefersDedicatedAllocation
16167  VK_NULL_HANDLE, // dedicatedBuffer
16168  VK_NULL_HANDLE, // dedicatedImage
16169  *pCreateInfo,
16170  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16171  allocationCount,
16172  pAllocations);
16173 
16174 #if VMA_RECORDING_ENABLED
16175  if(allocator->GetRecorder() != VMA_NULL)
16176  {
16177  allocator->GetRecorder()->RecordAllocateMemoryPages(
16178  allocator->GetCurrentFrameIndex(),
16179  *pVkMemoryRequirements,
16180  *pCreateInfo,
16181  (uint64_t)allocationCount,
16182  pAllocations);
16183  }
16184 #endif
16185 
16186  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16187  {
16188  for(size_t i = 0; i < allocationCount; ++i)
16189  {
16190  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16191  }
16192  }
16193 
16194  return result;
16195 }
16196 
16198  VmaAllocator allocator,
16199  VkBuffer buffer,
16200  const VmaAllocationCreateInfo* pCreateInfo,
16201  VmaAllocation* pAllocation,
16202  VmaAllocationInfo* pAllocationInfo)
16203 {
16204  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16205 
16206  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16207 
16208  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16209 
16210  VkMemoryRequirements vkMemReq = {};
16211  bool requiresDedicatedAllocation = false;
16212  bool prefersDedicatedAllocation = false;
16213  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16214  requiresDedicatedAllocation,
16215  prefersDedicatedAllocation);
16216 
16217  VkResult result = allocator->AllocateMemory(
16218  vkMemReq,
16219  requiresDedicatedAllocation,
16220  prefersDedicatedAllocation,
16221  buffer, // dedicatedBuffer
16222  VK_NULL_HANDLE, // dedicatedImage
16223  *pCreateInfo,
16224  VMA_SUBALLOCATION_TYPE_BUFFER,
16225  1, // allocationCount
16226  pAllocation);
16227 
16228 #if VMA_RECORDING_ENABLED
16229  if(allocator->GetRecorder() != VMA_NULL)
16230  {
16231  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16232  allocator->GetCurrentFrameIndex(),
16233  vkMemReq,
16234  requiresDedicatedAllocation,
16235  prefersDedicatedAllocation,
16236  *pCreateInfo,
16237  *pAllocation);
16238  }
16239 #endif
16240 
16241  if(pAllocationInfo && result == VK_SUCCESS)
16242  {
16243  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16244  }
16245 
16246  return result;
16247 }
16248 
16249 VkResult vmaAllocateMemoryForImage(
16250  VmaAllocator allocator,
16251  VkImage image,
16252  const VmaAllocationCreateInfo* pCreateInfo,
16253  VmaAllocation* pAllocation,
16254  VmaAllocationInfo* pAllocationInfo)
16255 {
16256  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16257 
16258  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
16259 
16260  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16261 
16262  VkMemoryRequirements vkMemReq = {};
16263  bool requiresDedicatedAllocation = false;
16264  bool prefersDedicatedAllocation = false;
16265  allocator->GetImageMemoryRequirements(image, vkMemReq,
16266  requiresDedicatedAllocation, prefersDedicatedAllocation);
16267 
16268  VkResult result = allocator->AllocateMemory(
16269  vkMemReq,
16270  requiresDedicatedAllocation,
16271  prefersDedicatedAllocation,
16272  VK_NULL_HANDLE, // dedicatedBuffer
16273  image, // dedicatedImage
16274  *pCreateInfo,
16275  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16276  1, // allocationCount
16277  pAllocation);
16278 
16279 #if VMA_RECORDING_ENABLED
16280  if(allocator->GetRecorder() != VMA_NULL)
16281  {
16282  allocator->GetRecorder()->RecordAllocateMemoryForImage(
16283  allocator->GetCurrentFrameIndex(),
16284  vkMemReq,
16285  requiresDedicatedAllocation,
16286  prefersDedicatedAllocation,
16287  *pCreateInfo,
16288  *pAllocation);
16289  }
16290 #endif
16291 
16292  if(pAllocationInfo && result == VK_SUCCESS)
16293  {
16294  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16295  }
16296 
16297  return result;
16298 }
16299 
16300 void vmaFreeMemory(
16301  VmaAllocator allocator,
16302  VmaAllocation allocation)
16303 {
16304  VMA_ASSERT(allocator);
16305 
16306  if(allocation == VK_NULL_HANDLE)
16307  {
16308  return;
16309  }
16310 
16311  VMA_DEBUG_LOG("vmaFreeMemory");
16312 
16313  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16314 
16315 #if VMA_RECORDING_ENABLED
16316  if(allocator->GetRecorder() != VMA_NULL)
16317  {
16318  allocator->GetRecorder()->RecordFreeMemory(
16319  allocator->GetCurrentFrameIndex(),
16320  allocation);
16321  }
16322 #endif
16323 
16324  allocator->FreeMemory(
16325  1, // allocationCount
16326  &allocation);
16327 }
16328 
16329 void vmaFreeMemoryPages(
16330  VmaAllocator allocator,
16331  size_t allocationCount,
16332  VmaAllocation* pAllocations)
16333 {
16334  if(allocationCount == 0)
16335  {
16336  return;
16337  }
16338 
16339  VMA_ASSERT(allocator);
16340 
16341  VMA_DEBUG_LOG("vmaFreeMemoryPages");
16342 
16343  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16344 
16345 #if VMA_RECORDING_ENABLED
16346  if(allocator->GetRecorder() != VMA_NULL)
16347  {
16348  allocator->GetRecorder()->RecordFreeMemoryPages(
16349  allocator->GetCurrentFrameIndex(),
16350  (uint64_t)allocationCount,
16351  pAllocations);
16352  }
16353 #endif
16354 
16355  allocator->FreeMemory(allocationCount, pAllocations);
16356 }
16357 
16358 VkResult vmaResizeAllocation(
16359  VmaAllocator allocator,
16360  VmaAllocation allocation,
16361  VkDeviceSize newSize)
16362 {
16363  VMA_ASSERT(allocator && allocation);
16364 
16365  VMA_DEBUG_LOG("vmaResizeAllocation");
16366 
16367  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16368 
16369 #if VMA_RECORDING_ENABLED
16370  if(allocator->GetRecorder() != VMA_NULL)
16371  {
16372  allocator->GetRecorder()->RecordResizeAllocation(
16373  allocator->GetCurrentFrameIndex(),
16374  allocation,
16375  newSize);
16376  }
16377 #endif
16378 
16379  return allocator->ResizeAllocation(allocation, newSize);
16380 }
16381 
16383  VmaAllocator allocator,
16384  VmaAllocation allocation,
16385  VmaAllocationInfo* pAllocationInfo)
16386 {
16387  VMA_ASSERT(allocator && allocation && pAllocationInfo);
16388 
16389  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16390 
16391 #if VMA_RECORDING_ENABLED
16392  if(allocator->GetRecorder() != VMA_NULL)
16393  {
16394  allocator->GetRecorder()->RecordGetAllocationInfo(
16395  allocator->GetCurrentFrameIndex(),
16396  allocation);
16397  }
16398 #endif
16399 
16400  allocator->GetAllocationInfo(allocation, pAllocationInfo);
16401 }
16402 
16403 VkBool32 vmaTouchAllocation(
16404  VmaAllocator allocator,
16405  VmaAllocation allocation)
16406 {
16407  VMA_ASSERT(allocator && allocation);
16408 
16409  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16410 
16411 #if VMA_RECORDING_ENABLED
16412  if(allocator->GetRecorder() != VMA_NULL)
16413  {
16414  allocator->GetRecorder()->RecordTouchAllocation(
16415  allocator->GetCurrentFrameIndex(),
16416  allocation);
16417  }
16418 #endif
16419 
16420  return allocator->TouchAllocation(allocation);
16421 }
16422 
16424  VmaAllocator allocator,
16425  VmaAllocation allocation,
16426  void* pUserData)
16427 {
16428  VMA_ASSERT(allocator && allocation);
16429 
16430  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16431 
16432  allocation->SetUserData(allocator, pUserData);
16433 
16434 #if VMA_RECORDING_ENABLED
16435  if(allocator->GetRecorder() != VMA_NULL)
16436  {
16437  allocator->GetRecorder()->RecordSetAllocationUserData(
16438  allocator->GetCurrentFrameIndex(),
16439  allocation,
16440  pUserData);
16441  }
16442 #endif
16443 }
16444 
16446  VmaAllocator allocator,
16447  VmaAllocation* pAllocation)
16448 {
16449  VMA_ASSERT(allocator && pAllocation);
16450 
16451  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
16452 
16453  allocator->CreateLostAllocation(pAllocation);
16454 
16455 #if VMA_RECORDING_ENABLED
16456  if(allocator->GetRecorder() != VMA_NULL)
16457  {
16458  allocator->GetRecorder()->RecordCreateLostAllocation(
16459  allocator->GetCurrentFrameIndex(),
16460  *pAllocation);
16461  }
16462 #endif
16463 }
16464 
16465 VkResult vmaMapMemory(
16466  VmaAllocator allocator,
16467  VmaAllocation allocation,
16468  void** ppData)
16469 {
16470  VMA_ASSERT(allocator && allocation && ppData);
16471 
16472  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16473 
16474  VkResult res = allocator->Map(allocation, ppData);
16475 
16476 #if VMA_RECORDING_ENABLED
16477  if(allocator->GetRecorder() != VMA_NULL)
16478  {
16479  allocator->GetRecorder()->RecordMapMemory(
16480  allocator->GetCurrentFrameIndex(),
16481  allocation);
16482  }
16483 #endif
16484 
16485  return res;
16486 }
16487 
16488 void vmaUnmapMemory(
16489  VmaAllocator allocator,
16490  VmaAllocation allocation)
16491 {
16492  VMA_ASSERT(allocator && allocation);
16493 
16494  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16495 
16496 #if VMA_RECORDING_ENABLED
16497  if(allocator->GetRecorder() != VMA_NULL)
16498  {
16499  allocator->GetRecorder()->RecordUnmapMemory(
16500  allocator->GetCurrentFrameIndex(),
16501  allocation);
16502  }
16503 #endif
16504 
16505  allocator->Unmap(allocation);
16506 }
16507 
16508 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16509 {
16510  VMA_ASSERT(allocator && allocation);
16511 
16512  VMA_DEBUG_LOG("vmaFlushAllocation");
16513 
16514  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16515 
16516  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
16517 
16518 #if VMA_RECORDING_ENABLED
16519  if(allocator->GetRecorder() != VMA_NULL)
16520  {
16521  allocator->GetRecorder()->RecordFlushAllocation(
16522  allocator->GetCurrentFrameIndex(),
16523  allocation, offset, size);
16524  }
16525 #endif
16526 }
16527 
16528 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16529 {
16530  VMA_ASSERT(allocator && allocation);
16531 
16532  VMA_DEBUG_LOG("vmaInvalidateAllocation");
16533 
16534  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16535 
16536  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
16537 
16538 #if VMA_RECORDING_ENABLED
16539  if(allocator->GetRecorder() != VMA_NULL)
16540  {
16541  allocator->GetRecorder()->RecordInvalidateAllocation(
16542  allocator->GetCurrentFrameIndex(),
16543  allocation, offset, size);
16544  }
16545 #endif
16546 }
16547 
16548 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
16549 {
16550  VMA_ASSERT(allocator);
16551 
16552  VMA_DEBUG_LOG("vmaCheckCorruption");
16553 
16554  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16555 
16556  return allocator->CheckCorruption(memoryTypeBits);
16557 }
16558 
16559 VkResult vmaDefragment(
16560  VmaAllocator allocator,
16561  VmaAllocation* pAllocations,
16562  size_t allocationCount,
16563  VkBool32* pAllocationsChanged,
16564  const VmaDefragmentationInfo *pDefragmentationInfo,
16565  VmaDefragmentationStats* pDefragmentationStats)
16566 {
16567  // Deprecated interface, reimplemented using new one.
16568 
16569  VmaDefragmentationInfo2 info2 = {};
16570  info2.allocationCount = (uint32_t)allocationCount;
16571  info2.pAllocations = pAllocations;
16572  info2.pAllocationsChanged = pAllocationsChanged;
16573  if(pDefragmentationInfo != VMA_NULL)
16574  {
16575  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
16576  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
16577  }
16578  else
16579  {
16580  info2.maxCpuAllocationsToMove = UINT32_MAX;
16581  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
16582  }
16583  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
16584 
16586  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
16587  if(res == VK_NOT_READY)
16588  {
16589  res = vmaDefragmentationEnd( allocator, ctx);
16590  }
16591  return res;
16592 }
16593 
16594 VkResult vmaDefragmentationBegin(
16595  VmaAllocator allocator,
16596  const VmaDefragmentationInfo2* pInfo,
16597  VmaDefragmentationStats* pStats,
16598  VmaDefragmentationContext *pContext)
16599 {
16600  VMA_ASSERT(allocator && pInfo && pContext);
16601 
16602  // Degenerate case: Nothing to defragment.
16603  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
16604  {
16605  return VK_SUCCESS;
16606  }
16607 
16608  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
16609  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
16610  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
16611  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
16612 
16613  VMA_DEBUG_LOG("vmaDefragmentationBegin");
16614 
16615  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16616 
16617  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
16618 
16619 #if VMA_RECORDING_ENABLED
16620  if(allocator->GetRecorder() != VMA_NULL)
16621  {
16622  allocator->GetRecorder()->RecordDefragmentationBegin(
16623  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
16624  }
16625 #endif
16626 
16627  return res;
16628 }
16629 
16630 VkResult vmaDefragmentationEnd(
16631  VmaAllocator allocator,
16632  VmaDefragmentationContext context)
16633 {
16634  VMA_ASSERT(allocator);
16635 
16636  VMA_DEBUG_LOG("vmaDefragmentationEnd");
16637 
16638  if(context != VK_NULL_HANDLE)
16639  {
16640  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16641 
16642 #if VMA_RECORDING_ENABLED
16643  if(allocator->GetRecorder() != VMA_NULL)
16644  {
16645  allocator->GetRecorder()->RecordDefragmentationEnd(
16646  allocator->GetCurrentFrameIndex(), context);
16647  }
16648 #endif
16649 
16650  return allocator->DefragmentationEnd(context);
16651  }
16652  else
16653  {
16654  return VK_SUCCESS;
16655  }
16656 }
16657 
16658 VkResult vmaBindBufferMemory(
16659  VmaAllocator allocator,
16660  VmaAllocation allocation,
16661  VkBuffer buffer)
16662 {
16663  VMA_ASSERT(allocator && allocation && buffer);
16664 
16665  VMA_DEBUG_LOG("vmaBindBufferMemory");
16666 
16667  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16668 
16669  return allocator->BindBufferMemory(allocation, buffer);
16670 }
16671 
16672 VkResult vmaBindImageMemory(
16673  VmaAllocator allocator,
16674  VmaAllocation allocation,
16675  VkImage image)
16676 {
16677  VMA_ASSERT(allocator && allocation && image);
16678 
16679  VMA_DEBUG_LOG("vmaBindImageMemory");
16680 
16681  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16682 
16683  return allocator->BindImageMemory(allocation, image);
16684 }
16685 
16686 VkResult vmaCreateBuffer(
16687  VmaAllocator allocator,
16688  const VkBufferCreateInfo* pBufferCreateInfo,
16689  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16690  VkBuffer* pBuffer,
16691  VmaAllocation* pAllocation,
16692  VmaAllocationInfo* pAllocationInfo)
16693 {
16694  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16695 
16696  if(pBufferCreateInfo->size == 0)
16697  {
16698  return VK_ERROR_VALIDATION_FAILED_EXT;
16699  }
16700 
16701  VMA_DEBUG_LOG("vmaCreateBuffer");
16702 
16703  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16704 
16705  *pBuffer = VK_NULL_HANDLE;
16706  *pAllocation = VK_NULL_HANDLE;
16707 
16708  // 1. Create VkBuffer.
16709  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16710  allocator->m_hDevice,
16711  pBufferCreateInfo,
16712  allocator->GetAllocationCallbacks(),
16713  pBuffer);
16714  if(res >= 0)
16715  {
16716  // 2. vkGetBufferMemoryRequirements.
16717  VkMemoryRequirements vkMemReq = {};
16718  bool requiresDedicatedAllocation = false;
16719  bool prefersDedicatedAllocation = false;
16720  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16721  requiresDedicatedAllocation, prefersDedicatedAllocation);
16722 
16723  // Make sure alignment requirements for specific buffer usages reported
16724  // in Physical Device Properties are included in alignment reported by memory requirements.
16725  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16726  {
16727  VMA_ASSERT(vkMemReq.alignment %
16728  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16729  }
16730  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16731  {
16732  VMA_ASSERT(vkMemReq.alignment %
16733  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16734  }
16735  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16736  {
16737  VMA_ASSERT(vkMemReq.alignment %
16738  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16739  }
16740 
16741  // 3. Allocate memory using allocator.
16742  res = allocator->AllocateMemory(
16743  vkMemReq,
16744  requiresDedicatedAllocation,
16745  prefersDedicatedAllocation,
16746  *pBuffer, // dedicatedBuffer
16747  VK_NULL_HANDLE, // dedicatedImage
16748  *pAllocationCreateInfo,
16749  VMA_SUBALLOCATION_TYPE_BUFFER,
16750  1, // allocationCount
16751  pAllocation);
16752 
16753 #if VMA_RECORDING_ENABLED
16754  if(allocator->GetRecorder() != VMA_NULL)
16755  {
16756  allocator->GetRecorder()->RecordCreateBuffer(
16757  allocator->GetCurrentFrameIndex(),
16758  *pBufferCreateInfo,
16759  *pAllocationCreateInfo,
16760  *pAllocation);
16761  }
16762 #endif
16763 
16764  if(res >= 0)
16765  {
16766  // 3. Bind buffer with memory.
16767  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
16768  {
16769  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
16770  }
16771  if(res >= 0)
16772  {
16773  // All steps succeeded.
16774  #if VMA_STATS_STRING_ENABLED
16775  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16776  #endif
16777  if(pAllocationInfo != VMA_NULL)
16778  {
16779  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16780  }
16781 
16782  return VK_SUCCESS;
16783  }
16784  allocator->FreeMemory(
16785  1, // allocationCount
16786  pAllocation);
16787  *pAllocation = VK_NULL_HANDLE;
16788  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16789  *pBuffer = VK_NULL_HANDLE;
16790  return res;
16791  }
16792  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16793  *pBuffer = VK_NULL_HANDLE;
16794  return res;
16795  }
16796  return res;
16797 }
16798 
16799 void vmaDestroyBuffer(
16800  VmaAllocator allocator,
16801  VkBuffer buffer,
16802  VmaAllocation allocation)
16803 {
16804  VMA_ASSERT(allocator);
16805 
16806  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16807  {
16808  return;
16809  }
16810 
16811  VMA_DEBUG_LOG("vmaDestroyBuffer");
16812 
16813  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16814 
16815 #if VMA_RECORDING_ENABLED
16816  if(allocator->GetRecorder() != VMA_NULL)
16817  {
16818  allocator->GetRecorder()->RecordDestroyBuffer(
16819  allocator->GetCurrentFrameIndex(),
16820  allocation);
16821  }
16822 #endif
16823 
16824  if(buffer != VK_NULL_HANDLE)
16825  {
16826  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16827  }
16828 
16829  if(allocation != VK_NULL_HANDLE)
16830  {
16831  allocator->FreeMemory(
16832  1, // allocationCount
16833  &allocation);
16834  }
16835 }
16836 
16837 VkResult vmaCreateImage(
16838  VmaAllocator allocator,
16839  const VkImageCreateInfo* pImageCreateInfo,
16840  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16841  VkImage* pImage,
16842  VmaAllocation* pAllocation,
16843  VmaAllocationInfo* pAllocationInfo)
16844 {
16845  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16846 
16847  if(pImageCreateInfo->extent.width == 0 ||
16848  pImageCreateInfo->extent.height == 0 ||
16849  pImageCreateInfo->extent.depth == 0 ||
16850  pImageCreateInfo->mipLevels == 0 ||
16851  pImageCreateInfo->arrayLayers == 0)
16852  {
16853  return VK_ERROR_VALIDATION_FAILED_EXT;
16854  }
16855 
16856  VMA_DEBUG_LOG("vmaCreateImage");
16857 
16858  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16859 
16860  *pImage = VK_NULL_HANDLE;
16861  *pAllocation = VK_NULL_HANDLE;
16862 
16863  // 1. Create VkImage.
16864  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16865  allocator->m_hDevice,
16866  pImageCreateInfo,
16867  allocator->GetAllocationCallbacks(),
16868  pImage);
16869  if(res >= 0)
16870  {
16871  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16872  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16873  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16874 
16875  // 2. Allocate memory using allocator.
16876  VkMemoryRequirements vkMemReq = {};
16877  bool requiresDedicatedAllocation = false;
16878  bool prefersDedicatedAllocation = false;
16879  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16880  requiresDedicatedAllocation, prefersDedicatedAllocation);
16881 
16882  res = allocator->AllocateMemory(
16883  vkMemReq,
16884  requiresDedicatedAllocation,
16885  prefersDedicatedAllocation,
16886  VK_NULL_HANDLE, // dedicatedBuffer
16887  *pImage, // dedicatedImage
16888  *pAllocationCreateInfo,
16889  suballocType,
16890  1, // allocationCount
16891  pAllocation);
16892 
16893 #if VMA_RECORDING_ENABLED
16894  if(allocator->GetRecorder() != VMA_NULL)
16895  {
16896  allocator->GetRecorder()->RecordCreateImage(
16897  allocator->GetCurrentFrameIndex(),
16898  *pImageCreateInfo,
16899  *pAllocationCreateInfo,
16900  *pAllocation);
16901  }
16902 #endif
16903 
16904  if(res >= 0)
16905  {
16906  // 3. Bind image with memory.
16907  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
16908  {
16909  res = allocator->BindImageMemory(*pAllocation, *pImage);
16910  }
16911  if(res >= 0)
16912  {
16913  // All steps succeeded.
16914  #if VMA_STATS_STRING_ENABLED
16915  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
16916  #endif
16917  if(pAllocationInfo != VMA_NULL)
16918  {
16919  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16920  }
16921 
16922  return VK_SUCCESS;
16923  }
16924  allocator->FreeMemory(
16925  1, // allocationCount
16926  pAllocation);
16927  *pAllocation = VK_NULL_HANDLE;
16928  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16929  *pImage = VK_NULL_HANDLE;
16930  return res;
16931  }
16932  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16933  *pImage = VK_NULL_HANDLE;
16934  return res;
16935  }
16936  return res;
16937 }
16938 
16939 void vmaDestroyImage(
16940  VmaAllocator allocator,
16941  VkImage image,
16942  VmaAllocation allocation)
16943 {
16944  VMA_ASSERT(allocator);
16945 
16946  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16947  {
16948  return;
16949  }
16950 
16951  VMA_DEBUG_LOG("vmaDestroyImage");
16952 
16953  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16954 
16955 #if VMA_RECORDING_ENABLED
16956  if(allocator->GetRecorder() != VMA_NULL)
16957  {
16958  allocator->GetRecorder()->RecordDestroyImage(
16959  allocator->GetCurrentFrameIndex(),
16960  allocation);
16961  }
16962 #endif
16963 
16964  if(image != VK_NULL_HANDLE)
16965  {
16966  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
16967  }
16968  if(allocation != VK_NULL_HANDLE)
16969  {
16970  allocator->FreeMemory(
16971  1, // allocationCount
16972  &allocation);
16973  }
16974 }
16975 
16976 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1753
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2053
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1811
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:2864
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1785
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2384
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1765
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2015
Definition: vk_mem_alloc.h:2119
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:2817
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1757
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2484
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1808
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2900
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2273
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1652
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2365
Definition: vk_mem_alloc.h:2090
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:2820
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1746
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2172
Definition: vk_mem_alloc.h:2042
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1820
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2301
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
Definition: vk_mem_alloc.h:1874
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1805
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2046
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1946
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1762
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:2854
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1945
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2904
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1837
VmaStatInfo total
Definition: vk_mem_alloc.h:1955
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2912
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2156
Definition: vk_mem_alloc.h:2114
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2895
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1763
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1688
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1814
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2315
Definition: vk_mem_alloc.h:2309
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:1769
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1881
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2494
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1758
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1783
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2193
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2335
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2371
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1744
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2318
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2869
VmaMemoryUsage
Definition: vk_mem_alloc.h:1993
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:2829
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2890
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:2908
Definition: vk_mem_alloc.h:2032
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2180
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1761
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1951
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1694
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:2808
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
Definition: vk_mem_alloc.h:2806
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:2835
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1715
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1787
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1720
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2910
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2167
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2381
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1754
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1934
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:2330
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1707
Definition: vk_mem_alloc.h:2305
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2097
Represents Opaque object that represents started defragmentation process.
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1947
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1711
Definition: vk_mem_alloc.h:2130
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2321
Definition: vk_mem_alloc.h:2041
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1760
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2162
Definition: vk_mem_alloc.h:2153
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1937
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1756
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2343
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1823
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2374
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2151
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2859
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2186
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1862
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1953
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2077
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1946
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1767
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1793
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:2805
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:2883
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1709
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1766
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2357
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1759
Definition: vk_mem_alloc.h:2108
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1801
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2508
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:1817
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1946
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1943
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2362
Parameters for defragmentation.
Definition: vk_mem_alloc.h:2814
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
Definition: vk_mem_alloc.h:2123
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:2489
Definition: vk_mem_alloc.h:2137
Definition: vk_mem_alloc.h:2149
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2906
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1752
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1941
Definition: vk_mem_alloc.h:1998
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2311
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1790
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1939
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1764
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1768
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2064
Definition: vk_mem_alloc.h:2144
Definition: vk_mem_alloc.h:2025
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2503
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1742
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1755
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2290
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Tries to resize an allocation in place, if there is enough free memory after it.
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2470
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2134
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2255
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1947
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1777
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1954
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2368
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1947
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:2874
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2475
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:2838