Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1644 /*
1645 Define this macro to 0/1 to disable/enable support for recording functionality,
1646 available through VmaAllocatorCreateInfo::pRecordSettings.
1647 */
1648 #ifndef VMA_RECORDING_ENABLED
1649  #ifdef _WIN32
1650  #define VMA_RECORDING_ENABLED 1
1651  #else
1652  #define VMA_RECORDING_ENABLED 0
1653  #endif
1654 #endif
1655 
1656 #ifndef NOMINMAX
1657  #define NOMINMAX // For windows.h
1658 #endif
1659 
1660 #ifndef VULKAN_H_
1661  #include <vulkan/vulkan.h>
1662 #endif
1663 
1664 #if VMA_RECORDING_ENABLED
1665  #include <windows.h>
1666 #endif
1667 
1668 #if !defined(VMA_DEDICATED_ALLOCATION)
1669  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1670  #define VMA_DEDICATED_ALLOCATION 1
1671  #else
1672  #define VMA_DEDICATED_ALLOCATION 0
1673  #endif
1674 #endif
1675 
1685 VK_DEFINE_HANDLE(VmaAllocator)
1686 
1687 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1689  VmaAllocator allocator,
1690  uint32_t memoryType,
1691  VkDeviceMemory memory,
1692  VkDeviceSize size);
1694 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1695  VmaAllocator allocator,
1696  uint32_t memoryType,
1697  VkDeviceMemory memory,
1698  VkDeviceSize size);
1699 
1713 
1743 
1746 typedef VkFlags VmaAllocatorCreateFlags;
1747 
1752 typedef struct VmaVulkanFunctions {
1753  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1754  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1755  PFN_vkAllocateMemory vkAllocateMemory;
1756  PFN_vkFreeMemory vkFreeMemory;
1757  PFN_vkMapMemory vkMapMemory;
1758  PFN_vkUnmapMemory vkUnmapMemory;
1759  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1760  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1761  PFN_vkBindBufferMemory vkBindBufferMemory;
1762  PFN_vkBindImageMemory vkBindImageMemory;
1763  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1764  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1765  PFN_vkCreateBuffer vkCreateBuffer;
1766  PFN_vkDestroyBuffer vkDestroyBuffer;
1767  PFN_vkCreateImage vkCreateImage;
1768  PFN_vkDestroyImage vkDestroyImage;
1769  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1770 #if VMA_DEDICATED_ALLOCATION
1771  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1772  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1773 #endif
1775 
1777 typedef enum VmaRecordFlagBits {
1784 
1787 typedef VkFlags VmaRecordFlags;
1788 
1790 typedef struct VmaRecordSettings
1791 {
1801  const char* pFilePath;
1803 
1806 {
1810 
1811  VkPhysicalDevice physicalDevice;
1813 
1814  VkDevice device;
1816 
1819 
1820  const VkAllocationCallbacks* pAllocationCallbacks;
1822 
1862  const VkDeviceSize* pHeapSizeLimit;
1883 
1885 VkResult vmaCreateAllocator(
1886  const VmaAllocatorCreateInfo* pCreateInfo,
1887  VmaAllocator* pAllocator);
1888 
1890 void vmaDestroyAllocator(
1891  VmaAllocator allocator);
1892 
1898  VmaAllocator allocator,
1899  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1900 
1906  VmaAllocator allocator,
1907  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1908 
1916  VmaAllocator allocator,
1917  uint32_t memoryTypeIndex,
1918  VkMemoryPropertyFlags* pFlags);
1919 
1929  VmaAllocator allocator,
1930  uint32_t frameIndex);
1931 
1934 typedef struct VmaStatInfo
1935 {
1937  uint32_t blockCount;
1943  VkDeviceSize usedBytes;
1945  VkDeviceSize unusedBytes;
1948 } VmaStatInfo;
1949 
1951 typedef struct VmaStats
1952 {
1953  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1954  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1956 } VmaStats;
1957 
1959 void vmaCalculateStats(
1960  VmaAllocator allocator,
1961  VmaStats* pStats);
1962 
1963 #ifndef VMA_STATS_STRING_ENABLED
1964 #define VMA_STATS_STRING_ENABLED 1
1965 #endif
1966 
1967 #if VMA_STATS_STRING_ENABLED
1968 
1970 
1972 void vmaBuildStatsString(
1973  VmaAllocator allocator,
1974  char** ppStatsString,
1975  VkBool32 detailedMap);
1976 
1977 void vmaFreeStatsString(
1978  VmaAllocator allocator,
1979  char* pStatsString);
1980 
1981 #endif // #if VMA_STATS_STRING_ENABLED
1982 
1991 VK_DEFINE_HANDLE(VmaPool)
1992 
1993 typedef enum VmaMemoryUsage
1994 {
2043 } VmaMemoryUsage;
2044 
2054 
2115 
2131 
2141 
2148 
2152 
2154 {
2167  VkMemoryPropertyFlags requiredFlags;
2172  VkMemoryPropertyFlags preferredFlags;
2180  uint32_t memoryTypeBits;
2193  void* pUserData;
2195 
2212 VkResult vmaFindMemoryTypeIndex(
2213  VmaAllocator allocator,
2214  uint32_t memoryTypeBits,
2215  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2216  uint32_t* pMemoryTypeIndex);
2217 
2231  VmaAllocator allocator,
2232  const VkBufferCreateInfo* pBufferCreateInfo,
2233  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2234  uint32_t* pMemoryTypeIndex);
2235 
2249  VmaAllocator allocator,
2250  const VkImageCreateInfo* pImageCreateInfo,
2251  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2252  uint32_t* pMemoryTypeIndex);
2253 
2274 
2291 
2302 
2308 
2311 typedef VkFlags VmaPoolCreateFlags;
2312 
2315 typedef struct VmaPoolCreateInfo {
2330  VkDeviceSize blockSize;
2359 
2362 typedef struct VmaPoolStats {
2365  VkDeviceSize size;
2368  VkDeviceSize unusedSize;
2381  VkDeviceSize unusedRangeSizeMax;
2384  size_t blockCount;
2385 } VmaPoolStats;
2386 
2393 VkResult vmaCreatePool(
2394  VmaAllocator allocator,
2395  const VmaPoolCreateInfo* pCreateInfo,
2396  VmaPool* pPool);
2397 
2400 void vmaDestroyPool(
2401  VmaAllocator allocator,
2402  VmaPool pool);
2403 
2410 void vmaGetPoolStats(
2411  VmaAllocator allocator,
2412  VmaPool pool,
2413  VmaPoolStats* pPoolStats);
2414 
2422  VmaAllocator allocator,
2423  VmaPool pool,
2424  size_t* pLostAllocationCount);
2425 
2440 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2441 
2466 VK_DEFINE_HANDLE(VmaAllocation)
2467 
2468 
2470 typedef struct VmaAllocationInfo {
2475  uint32_t memoryType;
2484  VkDeviceMemory deviceMemory;
2489  VkDeviceSize offset;
2494  VkDeviceSize size;
2508  void* pUserData;
2510 
2521 VkResult vmaAllocateMemory(
2522  VmaAllocator allocator,
2523  const VkMemoryRequirements* pVkMemoryRequirements,
2524  const VmaAllocationCreateInfo* pCreateInfo,
2525  VmaAllocation* pAllocation,
2526  VmaAllocationInfo* pAllocationInfo);
2527 
2547 VkResult vmaAllocateMemoryPages(
2548  VmaAllocator allocator,
2549  const VkMemoryRequirements* pVkMemoryRequirements,
2550  const VmaAllocationCreateInfo* pCreateInfo,
2551  size_t allocationCount,
2552  VmaAllocation* pAllocations,
2553  VmaAllocationInfo* pAllocationInfo);
2554 
2562  VmaAllocator allocator,
2563  VkBuffer buffer,
2564  const VmaAllocationCreateInfo* pCreateInfo,
2565  VmaAllocation* pAllocation,
2566  VmaAllocationInfo* pAllocationInfo);
2567 
2569 VkResult vmaAllocateMemoryForImage(
2570  VmaAllocator allocator,
2571  VkImage image,
2572  const VmaAllocationCreateInfo* pCreateInfo,
2573  VmaAllocation* pAllocation,
2574  VmaAllocationInfo* pAllocationInfo);
2575 
2580 void vmaFreeMemory(
2581  VmaAllocator allocator,
2582  VmaAllocation allocation);
2583 
2594 void vmaFreeMemoryPages(
2595  VmaAllocator allocator,
2596  size_t allocationCount,
2597  VmaAllocation* pAllocations);
2598 
2619 VkResult vmaResizeAllocation(
2620  VmaAllocator allocator,
2621  VmaAllocation allocation,
2622  VkDeviceSize newSize);
2623 
2641  VmaAllocator allocator,
2642  VmaAllocation allocation,
2643  VmaAllocationInfo* pAllocationInfo);
2644 
2659 VkBool32 vmaTouchAllocation(
2660  VmaAllocator allocator,
2661  VmaAllocation allocation);
2662 
2677  VmaAllocator allocator,
2678  VmaAllocation allocation,
2679  void* pUserData);
2680 
2692  VmaAllocator allocator,
2693  VmaAllocation* pAllocation);
2694 
2729 VkResult vmaMapMemory(
2730  VmaAllocator allocator,
2731  VmaAllocation allocation,
2732  void** ppData);
2733 
2738 void vmaUnmapMemory(
2739  VmaAllocator allocator,
2740  VmaAllocation allocation);
2741 
2758 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2759 
2776 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2777 
2794 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2795 
2802 VK_DEFINE_HANDLE(VmaDefragmentationContext)
2803 
2804 typedef enum VmaDefragmentationFlagBits {
2808 typedef VkFlags VmaDefragmentationFlags;
2809 
2814 typedef struct VmaDefragmentationInfo2 {
2838  uint32_t poolCount;
2859  VkDeviceSize maxCpuBytesToMove;
2869  VkDeviceSize maxGpuBytesToMove;
2883  VkCommandBuffer commandBuffer;
2885 
2890 typedef struct VmaDefragmentationInfo {
2895  VkDeviceSize maxBytesToMove;
2902 
2904 typedef struct VmaDefragmentationStats {
2906  VkDeviceSize bytesMoved;
2908  VkDeviceSize bytesFreed;
2914 
2941 VkResult vmaDefragmentationBegin(
2942  VmaAllocator allocator,
2943  const VmaDefragmentationInfo2* pInfo,
2944  VmaDefragmentationStats* pStats,
2945  VmaDefragmentationContext *pContext);
2946 
2952 VkResult vmaDefragmentationEnd(
2953  VmaAllocator allocator,
2954  VmaDefragmentationContext context);
2955 
2996 VkResult vmaDefragment(
2997  VmaAllocator allocator,
2998  VmaAllocation* pAllocations,
2999  size_t allocationCount,
3000  VkBool32* pAllocationsChanged,
3001  const VmaDefragmentationInfo *pDefragmentationInfo,
3002  VmaDefragmentationStats* pDefragmentationStats);
3003 
3016 VkResult vmaBindBufferMemory(
3017  VmaAllocator allocator,
3018  VmaAllocation allocation,
3019  VkBuffer buffer);
3020 
3033 VkResult vmaBindImageMemory(
3034  VmaAllocator allocator,
3035  VmaAllocation allocation,
3036  VkImage image);
3037 
3064 VkResult vmaCreateBuffer(
3065  VmaAllocator allocator,
3066  const VkBufferCreateInfo* pBufferCreateInfo,
3067  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3068  VkBuffer* pBuffer,
3069  VmaAllocation* pAllocation,
3070  VmaAllocationInfo* pAllocationInfo);
3071 
3083 void vmaDestroyBuffer(
3084  VmaAllocator allocator,
3085  VkBuffer buffer,
3086  VmaAllocation allocation);
3087 
3089 VkResult vmaCreateImage(
3090  VmaAllocator allocator,
3091  const VkImageCreateInfo* pImageCreateInfo,
3092  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3093  VkImage* pImage,
3094  VmaAllocation* pAllocation,
3095  VmaAllocationInfo* pAllocationInfo);
3096 
3108 void vmaDestroyImage(
3109  VmaAllocator allocator,
3110  VkImage image,
3111  VmaAllocation allocation);
3112 
3113 #ifdef __cplusplus
3114 }
3115 #endif
3116 
3117 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3118 
3119 // For Visual Studio IntelliSense.
3120 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3121 #define VMA_IMPLEMENTATION
3122 #endif
3123 
3124 #ifdef VMA_IMPLEMENTATION
3125 #undef VMA_IMPLEMENTATION
3126 
3127 #include <cstdint>
3128 #include <cstdlib>
3129 #include <cstring>
3130 
3131 /*******************************************************************************
3132 CONFIGURATION SECTION
3133 
3134 Define some of these macros before each #include of this header or change them
3135 here if you need other then default behavior depending on your environment.
3136 */
3137 
3138 /*
3139 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3140 internally, like:
3141 
3142  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3143 
3144 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3145 VmaAllocatorCreateInfo::pVulkanFunctions.
3146 */
3147 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3148 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3149 #endif
3150 
3151 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3152 //#define VMA_USE_STL_CONTAINERS 1
3153 
3154 /* Set this macro to 1 to make the library including and using STL containers:
3155 std::pair, std::vector, std::list, std::unordered_map.
3156 
3157 Set it to 0 or undefined to make the library using its own implementation of
3158 the containers.
3159 */
3160 #if VMA_USE_STL_CONTAINERS
3161  #define VMA_USE_STL_VECTOR 1
3162  #define VMA_USE_STL_UNORDERED_MAP 1
3163  #define VMA_USE_STL_LIST 1
3164 #endif
3165 
3166 #ifndef VMA_USE_STL_SHARED_MUTEX
3167  // Compiler conforms to C++17.
3168  #if __cplusplus >= 201703L
3169  #define VMA_USE_STL_SHARED_MUTEX 1
3170  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3171  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3172  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3173  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3174  #define VMA_USE_STL_SHARED_MUTEX 1
3175  #else
3176  #define VMA_USE_STL_SHARED_MUTEX 0
3177  #endif
3178 #endif
3179 
3180 /*
3181 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
3182 Library has its own container implementation.
3183 */
3184 #if VMA_USE_STL_VECTOR
3185  #include <vector>
3186 #endif
3187 
3188 #if VMA_USE_STL_UNORDERED_MAP
3189  #include <unordered_map>
3190 #endif
3191 
3192 #if VMA_USE_STL_LIST
3193  #include <list>
3194 #endif
3195 
3196 /*
3197 Following headers are used in this CONFIGURATION section only, so feel free to
3198 remove them if not needed.
3199 */
3200 #include <cassert> // for assert
3201 #include <algorithm> // for min, max
3202 #include <mutex>
3203 #include <atomic> // for std::atomic
3204 
3205 #ifndef VMA_NULL
3206  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3207  #define VMA_NULL nullptr
3208 #endif
3209 
3210 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3211 #include <cstdlib>
3212 void *aligned_alloc(size_t alignment, size_t size)
3213 {
3214  // alignment must be >= sizeof(void*)
3215  if(alignment < sizeof(void*))
3216  {
3217  alignment = sizeof(void*);
3218  }
3219 
3220  return memalign(alignment, size);
3221 }
3222 #elif defined(__APPLE__) || defined(__ANDROID__)
3223 #include <cstdlib>
3224 void *aligned_alloc(size_t alignment, size_t size)
3225 {
3226  // alignment must be >= sizeof(void*)
3227  if(alignment < sizeof(void*))
3228  {
3229  alignment = sizeof(void*);
3230  }
3231 
3232  void *pointer;
3233  if(posix_memalign(&pointer, alignment, size) == 0)
3234  return pointer;
3235  return VMA_NULL;
3236 }
3237 #endif
3238 
3239 // If your compiler is not compatible with C++11 and definition of
3240 // aligned_alloc() function is missing, uncommeting following line may help:
3241 
3242 //#include <malloc.h>
3243 
3244 // Normal assert to check for programmer's errors, especially in Debug configuration.
3245 #ifndef VMA_ASSERT
3246  #ifdef _DEBUG
3247  #define VMA_ASSERT(expr) assert(expr)
3248  #else
3249  #define VMA_ASSERT(expr)
3250  #endif
3251 #endif
3252 
3253 // Assert that will be called very often, like inside data structures e.g. operator[].
3254 // Making it non-empty can make program slow.
3255 #ifndef VMA_HEAVY_ASSERT
3256  #ifdef _DEBUG
3257  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3258  #else
3259  #define VMA_HEAVY_ASSERT(expr)
3260  #endif
3261 #endif
3262 
3263 #ifndef VMA_ALIGN_OF
3264  #define VMA_ALIGN_OF(type) (__alignof(type))
3265 #endif
3266 
3267 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3268  #if defined(_WIN32)
3269  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3270  #else
3271  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3272  #endif
3273 #endif
3274 
3275 #ifndef VMA_SYSTEM_FREE
3276  #if defined(_WIN32)
3277  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3278  #else
3279  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3280  #endif
3281 #endif
3282 
3283 #ifndef VMA_MIN
3284  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3285 #endif
3286 
3287 #ifndef VMA_MAX
3288  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3289 #endif
3290 
3291 #ifndef VMA_SWAP
3292  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3293 #endif
3294 
3295 #ifndef VMA_SORT
3296  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3297 #endif
3298 
3299 #ifndef VMA_DEBUG_LOG
3300  #define VMA_DEBUG_LOG(format, ...)
3301  /*
3302  #define VMA_DEBUG_LOG(format, ...) do { \
3303  printf(format, __VA_ARGS__); \
3304  printf("\n"); \
3305  } while(false)
3306  */
3307 #endif
3308 
3309 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3310 #if VMA_STATS_STRING_ENABLED
3311  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3312  {
3313  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3314  }
3315  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3316  {
3317  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3318  }
3319  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3320  {
3321  snprintf(outStr, strLen, "%p", ptr);
3322  }
3323 #endif
3324 
3325 #ifndef VMA_MUTEX
3326  class VmaMutex
3327  {
3328  public:
3329  void Lock() { m_Mutex.lock(); }
3330  void Unlock() { m_Mutex.unlock(); }
3331  private:
3332  std::mutex m_Mutex;
3333  };
3334  #define VMA_MUTEX VmaMutex
3335 #endif
3336 
3337 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3338 #ifndef VMA_RW_MUTEX
3339  #if VMA_USE_STL_SHARED_MUTEX
3340  // Use std::shared_mutex from C++17.
3341  #include <shared_mutex>
3342  class VmaRWMutex
3343  {
3344  public:
3345  void LockRead() { m_Mutex.lock_shared(); }
3346  void UnlockRead() { m_Mutex.unlock_shared(); }
3347  void LockWrite() { m_Mutex.lock(); }
3348  void UnlockWrite() { m_Mutex.unlock(); }
3349  private:
3350  std::shared_mutex m_Mutex;
3351  };
3352  #define VMA_RW_MUTEX VmaRWMutex
3353  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
3354  // Use SRWLOCK from WinAPI.
3355  // Minimum supported client = Windows Vista, server = Windows Server 2008.
3356  class VmaRWMutex
3357  {
3358  public:
3359  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3360  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3361  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3362  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3363  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3364  private:
3365  SRWLOCK m_Lock;
3366  };
3367  #define VMA_RW_MUTEX VmaRWMutex
3368  #else
3369  // Less efficient fallback: Use normal mutex.
3370  class VmaRWMutex
3371  {
3372  public:
3373  void LockRead() { m_Mutex.Lock(); }
3374  void UnlockRead() { m_Mutex.Unlock(); }
3375  void LockWrite() { m_Mutex.Lock(); }
3376  void UnlockWrite() { m_Mutex.Unlock(); }
3377  private:
3378  VMA_MUTEX m_Mutex;
3379  };
3380  #define VMA_RW_MUTEX VmaRWMutex
3381  #endif // #if VMA_USE_STL_SHARED_MUTEX
3382 #endif // #ifndef VMA_RW_MUTEX
3383 
3384 /*
3385 If providing your own implementation, you need to implement a subset of std::atomic:
3386 
3387 - Constructor(uint32_t desired)
3388 - uint32_t load() const
3389 - void store(uint32_t desired)
3390 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3391 */
3392 #ifndef VMA_ATOMIC_UINT32
3393  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3394 #endif
3395 
3396 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3397 
3401  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3402 #endif
3403 
3404 #ifndef VMA_DEBUG_ALIGNMENT
3405 
3409  #define VMA_DEBUG_ALIGNMENT (1)
3410 #endif
3411 
3412 #ifndef VMA_DEBUG_MARGIN
3413 
3417  #define VMA_DEBUG_MARGIN (0)
3418 #endif
3419 
3420 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3421 
3425  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3426 #endif
3427 
3428 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3429 
3434  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3435 #endif
3436 
3437 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3438 
3442  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3443 #endif
3444 
3445 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3446 
3450  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3451 #endif
3452 
3453 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3454  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3456 #endif
3457 
3458 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3459  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3461 #endif
3462 
3463 #ifndef VMA_CLASS_NO_COPY
3464  #define VMA_CLASS_NO_COPY(className) \
3465  private: \
3466  className(const className&) = delete; \
3467  className& operator=(const className&) = delete;
3468 #endif
3469 
3470 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3471 
3472 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3473 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3474 
3475 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3476 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3477 
3478 /*******************************************************************************
3479 END OF CONFIGURATION
3480 */
3481 
3482 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3483 
3484 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3485  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3486 
3487 // Returns number of bits set to 1 in (v).
3488 static inline uint32_t VmaCountBitsSet(uint32_t v)
3489 {
3490  uint32_t c = v - ((v >> 1) & 0x55555555);
3491  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3492  c = ((c >> 4) + c) & 0x0F0F0F0F;
3493  c = ((c >> 8) + c) & 0x00FF00FF;
3494  c = ((c >> 16) + c) & 0x0000FFFF;
3495  return c;
3496 }
3497 
3498 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3499 // Use types like uint32_t, uint64_t as T.
3500 template <typename T>
3501 static inline T VmaAlignUp(T val, T align)
3502 {
3503  return (val + align - 1) / align * align;
3504 }
3505 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3506 // Use types like uint32_t, uint64_t as T.
3507 template <typename T>
3508 static inline T VmaAlignDown(T val, T align)
3509 {
3510  return val / align * align;
3511 }
3512 
3513 // Division with mathematical rounding to nearest number.
3514 template <typename T>
3515 static inline T VmaRoundDiv(T x, T y)
3516 {
3517  return (x + (y / (T)2)) / y;
3518 }
3519 
3520 /*
3521 Returns true if given number is a power of two.
3522 T must be unsigned integer number or signed integer but always nonnegative.
3523 For 0 returns true.
3524 */
3525 template <typename T>
3526 inline bool VmaIsPow2(T x)
3527 {
3528  return (x & (x-1)) == 0;
3529 }
3530 
3531 // Returns smallest power of 2 greater or equal to v.
3532 static inline uint32_t VmaNextPow2(uint32_t v)
3533 {
3534  v--;
3535  v |= v >> 1;
3536  v |= v >> 2;
3537  v |= v >> 4;
3538  v |= v >> 8;
3539  v |= v >> 16;
3540  v++;
3541  return v;
3542 }
3543 static inline uint64_t VmaNextPow2(uint64_t v)
3544 {
3545  v--;
3546  v |= v >> 1;
3547  v |= v >> 2;
3548  v |= v >> 4;
3549  v |= v >> 8;
3550  v |= v >> 16;
3551  v |= v >> 32;
3552  v++;
3553  return v;
3554 }
3555 
3556 // Returns largest power of 2 less or equal to v.
3557 static inline uint32_t VmaPrevPow2(uint32_t v)
3558 {
3559  v |= v >> 1;
3560  v |= v >> 2;
3561  v |= v >> 4;
3562  v |= v >> 8;
3563  v |= v >> 16;
3564  v = v ^ (v >> 1);
3565  return v;
3566 }
3567 static inline uint64_t VmaPrevPow2(uint64_t v)
3568 {
3569  v |= v >> 1;
3570  v |= v >> 2;
3571  v |= v >> 4;
3572  v |= v >> 8;
3573  v |= v >> 16;
3574  v |= v >> 32;
3575  v = v ^ (v >> 1);
3576  return v;
3577 }
3578 
3579 static inline bool VmaStrIsEmpty(const char* pStr)
3580 {
3581  return pStr == VMA_NULL || *pStr == '\0';
3582 }
3583 
3584 #if VMA_STATS_STRING_ENABLED
3585 
3586 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3587 {
3588  switch(algorithm)
3589  {
3591  return "Linear";
3593  return "Buddy";
3594  case 0:
3595  return "Default";
3596  default:
3597  VMA_ASSERT(0);
3598  return "";
3599  }
3600 }
3601 
3602 #endif // #if VMA_STATS_STRING_ENABLED
3603 
3604 #ifndef VMA_SORT
3605 
3606 template<typename Iterator, typename Compare>
3607 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3608 {
3609  Iterator centerValue = end; --centerValue;
3610  Iterator insertIndex = beg;
3611  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3612  {
3613  if(cmp(*memTypeIndex, *centerValue))
3614  {
3615  if(insertIndex != memTypeIndex)
3616  {
3617  VMA_SWAP(*memTypeIndex, *insertIndex);
3618  }
3619  ++insertIndex;
3620  }
3621  }
3622  if(insertIndex != centerValue)
3623  {
3624  VMA_SWAP(*insertIndex, *centerValue);
3625  }
3626  return insertIndex;
3627 }
3628 
3629 template<typename Iterator, typename Compare>
3630 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3631 {
3632  if(beg < end)
3633  {
3634  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3635  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3636  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3637  }
3638 }
3639 
3640 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3641 
3642 #endif // #ifndef VMA_SORT
3643 
3644 /*
3645 Returns true if two memory blocks occupy overlapping pages.
3646 ResourceA must be in less memory offset than ResourceB.
3647 
3648 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3649 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3650 */
3651 static inline bool VmaBlocksOnSamePage(
3652  VkDeviceSize resourceAOffset,
3653  VkDeviceSize resourceASize,
3654  VkDeviceSize resourceBOffset,
3655  VkDeviceSize pageSize)
3656 {
3657  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3658  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3659  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3660  VkDeviceSize resourceBStart = resourceBOffset;
3661  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3662  return resourceAEndPage == resourceBStartPage;
3663 }
3664 
3665 enum VmaSuballocationType
3666 {
3667  VMA_SUBALLOCATION_TYPE_FREE = 0,
3668  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3669  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3670  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3671  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3672  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3673  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3674 };
3675 
3676 /*
3677 Returns true if given suballocation types could conflict and must respect
3678 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3679 or linear image and another one is optimal image. If type is unknown, behave
3680 conservatively.
3681 */
3682 static inline bool VmaIsBufferImageGranularityConflict(
3683  VmaSuballocationType suballocType1,
3684  VmaSuballocationType suballocType2)
3685 {
3686  if(suballocType1 > suballocType2)
3687  {
3688  VMA_SWAP(suballocType1, suballocType2);
3689  }
3690 
3691  switch(suballocType1)
3692  {
3693  case VMA_SUBALLOCATION_TYPE_FREE:
3694  return false;
3695  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3696  return true;
3697  case VMA_SUBALLOCATION_TYPE_BUFFER:
3698  return
3699  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3700  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3701  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3702  return
3703  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3704  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3705  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3706  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3707  return
3708  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3709  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3710  return false;
3711  default:
3712  VMA_ASSERT(0);
3713  return true;
3714  }
3715 }
3716 
3717 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3718 {
3719  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3720  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3721  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3722  {
3723  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3724  }
3725 }
3726 
3727 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3728 {
3729  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3730  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3731  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3732  {
3733  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3734  {
3735  return false;
3736  }
3737  }
3738  return true;
3739 }
3740 
3741 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3742 struct VmaMutexLock
3743 {
3744  VMA_CLASS_NO_COPY(VmaMutexLock)
3745 public:
3746  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
3747  m_pMutex(useMutex ? &mutex : VMA_NULL)
3748  { if(m_pMutex) { m_pMutex->Lock(); } }
3749  ~VmaMutexLock()
3750  { if(m_pMutex) { m_pMutex->Unlock(); } }
3751 private:
3752  VMA_MUTEX* m_pMutex;
3753 };
3754 
3755 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3756 struct VmaMutexLockRead
3757 {
3758  VMA_CLASS_NO_COPY(VmaMutexLockRead)
3759 public:
3760  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3761  m_pMutex(useMutex ? &mutex : VMA_NULL)
3762  { if(m_pMutex) { m_pMutex->LockRead(); } }
3763  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
3764 private:
3765  VMA_RW_MUTEX* m_pMutex;
3766 };
3767 
3768 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3769 struct VmaMutexLockWrite
3770 {
3771  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3772 public:
3773  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
3774  m_pMutex(useMutex ? &mutex : VMA_NULL)
3775  { if(m_pMutex) { m_pMutex->LockWrite(); } }
3776  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3777 private:
3778  VMA_RW_MUTEX* m_pMutex;
3779 };
3780 
3781 #if VMA_DEBUG_GLOBAL_MUTEX
3782  static VMA_MUTEX gDebugGlobalMutex;
3783  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3784 #else
3785  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3786 #endif
3787 
3788 // Minimum size of a free suballocation to register it in the free suballocation collection.
3789 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3790 
3791 /*
3792 Performs binary search and returns iterator to first element that is greater or
3793 equal to (key), according to comparison (cmp).
3794 
3795 Cmp should return true if first argument is less than second argument.
3796 
3797 Returned value is the found element, if present in the collection or place where
3798 new element with value (key) should be inserted.
3799 */
3800 template <typename CmpLess, typename IterT, typename KeyT>
3801 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3802 {
3803  size_t down = 0, up = (end - beg);
3804  while(down < up)
3805  {
3806  const size_t mid = (down + up) / 2;
3807  if(cmp(*(beg+mid), key))
3808  {
3809  down = mid + 1;
3810  }
3811  else
3812  {
3813  up = mid;
3814  }
3815  }
3816  return beg + down;
3817 }
3818 
3819 /*
3820 Returns true if all pointers in the array are not-null and unique.
3821 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3822 T must be pointer type, e.g. VmaAllocation, VmaPool.
3823 */
3824 template<typename T>
3825 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3826 {
3827  for(uint32_t i = 0; i < count; ++i)
3828  {
3829  const T iPtr = arr[i];
3830  if(iPtr == VMA_NULL)
3831  {
3832  return false;
3833  }
3834  for(uint32_t j = i + 1; j < count; ++j)
3835  {
3836  if(iPtr == arr[j])
3837  {
3838  return false;
3839  }
3840  }
3841  }
3842  return true;
3843 }
3844 
3846 // Memory allocation
3847 
3848 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3849 {
3850  if((pAllocationCallbacks != VMA_NULL) &&
3851  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3852  {
3853  return (*pAllocationCallbacks->pfnAllocation)(
3854  pAllocationCallbacks->pUserData,
3855  size,
3856  alignment,
3857  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3858  }
3859  else
3860  {
3861  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3862  }
3863 }
3864 
3865 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3866 {
3867  if((pAllocationCallbacks != VMA_NULL) &&
3868  (pAllocationCallbacks->pfnFree != VMA_NULL))
3869  {
3870  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3871  }
3872  else
3873  {
3874  VMA_SYSTEM_FREE(ptr);
3875  }
3876 }
3877 
3878 template<typename T>
3879 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3880 {
3881  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3882 }
3883 
3884 template<typename T>
3885 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3886 {
3887  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3888 }
3889 
3890 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3891 
3892 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3893 
3894 template<typename T>
3895 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3896 {
3897  ptr->~T();
3898  VmaFree(pAllocationCallbacks, ptr);
3899 }
3900 
3901 template<typename T>
3902 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3903 {
3904  if(ptr != VMA_NULL)
3905  {
3906  for(size_t i = count; i--; )
3907  {
3908  ptr[i].~T();
3909  }
3910  VmaFree(pAllocationCallbacks, ptr);
3911  }
3912 }
3913 
3914 // STL-compatible allocator.
3915 template<typename T>
3916 class VmaStlAllocator
3917 {
3918 public:
3919  const VkAllocationCallbacks* const m_pCallbacks;
3920  typedef T value_type;
3921 
3922  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3923  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3924 
3925  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3926  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3927 
3928  template<typename U>
3929  bool operator==(const VmaStlAllocator<U>& rhs) const
3930  {
3931  return m_pCallbacks == rhs.m_pCallbacks;
3932  }
3933  template<typename U>
3934  bool operator!=(const VmaStlAllocator<U>& rhs) const
3935  {
3936  return m_pCallbacks != rhs.m_pCallbacks;
3937  }
3938 
3939  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3940 };
3941 
3942 #if VMA_USE_STL_VECTOR
3943 
3944 #define VmaVector std::vector
3945 
3946 template<typename T, typename allocatorT>
3947 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3948 {
3949  vec.insert(vec.begin() + index, item);
3950 }
3951 
3952 template<typename T, typename allocatorT>
3953 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3954 {
3955  vec.erase(vec.begin() + index);
3956 }
3957 
3958 #else // #if VMA_USE_STL_VECTOR
3959 
3960 /* Class with interface compatible with subset of std::vector.
3961 T must be POD because constructors and destructors are not called and memcpy is
3962 used for these objects. */
3963 template<typename T, typename AllocatorT>
3964 class VmaVector
3965 {
3966 public:
3967  typedef T value_type;
3968 
3969  VmaVector(const AllocatorT& allocator) :
3970  m_Allocator(allocator),
3971  m_pArray(VMA_NULL),
3972  m_Count(0),
3973  m_Capacity(0)
3974  {
3975  }
3976 
3977  VmaVector(size_t count, const AllocatorT& allocator) :
3978  m_Allocator(allocator),
3979  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3980  m_Count(count),
3981  m_Capacity(count)
3982  {
3983  }
3984 
3985  VmaVector(const VmaVector<T, AllocatorT>& src) :
3986  m_Allocator(src.m_Allocator),
3987  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3988  m_Count(src.m_Count),
3989  m_Capacity(src.m_Count)
3990  {
3991  if(m_Count != 0)
3992  {
3993  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3994  }
3995  }
3996 
3997  ~VmaVector()
3998  {
3999  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4000  }
4001 
4002  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
4003  {
4004  if(&rhs != this)
4005  {
4006  resize(rhs.m_Count);
4007  if(m_Count != 0)
4008  {
4009  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4010  }
4011  }
4012  return *this;
4013  }
4014 
4015  bool empty() const { return m_Count == 0; }
4016  size_t size() const { return m_Count; }
4017  T* data() { return m_pArray; }
4018  const T* data() const { return m_pArray; }
4019 
4020  T& operator[](size_t index)
4021  {
4022  VMA_HEAVY_ASSERT(index < m_Count);
4023  return m_pArray[index];
4024  }
4025  const T& operator[](size_t index) const
4026  {
4027  VMA_HEAVY_ASSERT(index < m_Count);
4028  return m_pArray[index];
4029  }
4030 
4031  T& front()
4032  {
4033  VMA_HEAVY_ASSERT(m_Count > 0);
4034  return m_pArray[0];
4035  }
4036  const T& front() const
4037  {
4038  VMA_HEAVY_ASSERT(m_Count > 0);
4039  return m_pArray[0];
4040  }
4041  T& back()
4042  {
4043  VMA_HEAVY_ASSERT(m_Count > 0);
4044  return m_pArray[m_Count - 1];
4045  }
4046  const T& back() const
4047  {
4048  VMA_HEAVY_ASSERT(m_Count > 0);
4049  return m_pArray[m_Count - 1];
4050  }
4051 
4052  void reserve(size_t newCapacity, bool freeMemory = false)
4053  {
4054  newCapacity = VMA_MAX(newCapacity, m_Count);
4055 
4056  if((newCapacity < m_Capacity) && !freeMemory)
4057  {
4058  newCapacity = m_Capacity;
4059  }
4060 
4061  if(newCapacity != m_Capacity)
4062  {
4063  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4064  if(m_Count != 0)
4065  {
4066  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4067  }
4068  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4069  m_Capacity = newCapacity;
4070  m_pArray = newArray;
4071  }
4072  }
4073 
4074  void resize(size_t newCount, bool freeMemory = false)
4075  {
4076  size_t newCapacity = m_Capacity;
4077  if(newCount > m_Capacity)
4078  {
4079  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4080  }
4081  else if(freeMemory)
4082  {
4083  newCapacity = newCount;
4084  }
4085 
4086  if(newCapacity != m_Capacity)
4087  {
4088  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4089  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4090  if(elementsToCopy != 0)
4091  {
4092  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4093  }
4094  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4095  m_Capacity = newCapacity;
4096  m_pArray = newArray;
4097  }
4098 
4099  m_Count = newCount;
4100  }
4101 
4102  void clear(bool freeMemory = false)
4103  {
4104  resize(0, freeMemory);
4105  }
4106 
4107  void insert(size_t index, const T& src)
4108  {
4109  VMA_HEAVY_ASSERT(index <= m_Count);
4110  const size_t oldCount = size();
4111  resize(oldCount + 1);
4112  if(index < oldCount)
4113  {
4114  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4115  }
4116  m_pArray[index] = src;
4117  }
4118 
4119  void remove(size_t index)
4120  {
4121  VMA_HEAVY_ASSERT(index < m_Count);
4122  const size_t oldCount = size();
4123  if(index < oldCount - 1)
4124  {
4125  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4126  }
4127  resize(oldCount - 1);
4128  }
4129 
4130  void push_back(const T& src)
4131  {
4132  const size_t newIndex = size();
4133  resize(newIndex + 1);
4134  m_pArray[newIndex] = src;
4135  }
4136 
4137  void pop_back()
4138  {
4139  VMA_HEAVY_ASSERT(m_Count > 0);
4140  resize(size() - 1);
4141  }
4142 
4143  void push_front(const T& src)
4144  {
4145  insert(0, src);
4146  }
4147 
4148  void pop_front()
4149  {
4150  VMA_HEAVY_ASSERT(m_Count > 0);
4151  remove(0);
4152  }
4153 
4154  typedef T* iterator;
4155 
4156  iterator begin() { return m_pArray; }
4157  iterator end() { return m_pArray + m_Count; }
4158 
4159 private:
4160  AllocatorT m_Allocator;
4161  T* m_pArray;
4162  size_t m_Count;
4163  size_t m_Capacity;
4164 };
4165 
4166 template<typename T, typename allocatorT>
4167 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4168 {
4169  vec.insert(index, item);
4170 }
4171 
4172 template<typename T, typename allocatorT>
4173 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4174 {
4175  vec.remove(index);
4176 }
4177 
4178 #endif // #if VMA_USE_STL_VECTOR
4179 
4180 template<typename CmpLess, typename VectorT>
4181 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4182 {
4183  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4184  vector.data(),
4185  vector.data() + vector.size(),
4186  value,
4187  CmpLess()) - vector.data();
4188  VmaVectorInsert(vector, indexToInsert, value);
4189  return indexToInsert;
4190 }
4191 
4192 template<typename CmpLess, typename VectorT>
4193 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4194 {
4195  CmpLess comparator;
4196  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4197  vector.begin(),
4198  vector.end(),
4199  value,
4200  comparator);
4201  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4202  {
4203  size_t indexToRemove = it - vector.begin();
4204  VmaVectorRemove(vector, indexToRemove);
4205  return true;
4206  }
4207  return false;
4208 }
4209 
4210 template<typename CmpLess, typename IterT, typename KeyT>
4211 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
4212 {
4213  CmpLess comparator;
4214  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4215  beg, end, value, comparator);
4216  if(it == end ||
4217  (!comparator(*it, value) && !comparator(value, *it)))
4218  {
4219  return it;
4220  }
4221  return end;
4222 }
4223 
4225 // class VmaPoolAllocator
4226 
4227 /*
4228 Allocator for objects of type T using a list of arrays (pools) to speed up
4229 allocation. Number of elements that can be allocated is not bounded because
4230 allocator can create multiple blocks.
4231 */
4232 template<typename T>
4233 class VmaPoolAllocator
4234 {
4235  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4236 public:
4237  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4238  ~VmaPoolAllocator();
4239  void Clear();
4240  T* Alloc();
4241  void Free(T* ptr);
4242 
4243 private:
4244  union Item
4245  {
4246  uint32_t NextFreeIndex;
4247  T Value;
4248  };
4249 
4250  struct ItemBlock
4251  {
4252  Item* pItems;
4253  uint32_t Capacity;
4254  uint32_t FirstFreeIndex;
4255  };
4256 
4257  const VkAllocationCallbacks* m_pAllocationCallbacks;
4258  const uint32_t m_FirstBlockCapacity;
4259  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4260 
4261  ItemBlock& CreateNewBlock();
4262 };
4263 
4264 template<typename T>
4265 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
4266  m_pAllocationCallbacks(pAllocationCallbacks),
4267  m_FirstBlockCapacity(firstBlockCapacity),
4268  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4269 {
4270  VMA_ASSERT(m_FirstBlockCapacity > 1);
4271 }
4272 
4273 template<typename T>
4274 VmaPoolAllocator<T>::~VmaPoolAllocator()
4275 {
4276  Clear();
4277 }
4278 
4279 template<typename T>
4280 void VmaPoolAllocator<T>::Clear()
4281 {
4282  for(size_t i = m_ItemBlocks.size(); i--; )
4283  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4284  m_ItemBlocks.clear();
4285 }
4286 
4287 template<typename T>
4288 T* VmaPoolAllocator<T>::Alloc()
4289 {
4290  for(size_t i = m_ItemBlocks.size(); i--; )
4291  {
4292  ItemBlock& block = m_ItemBlocks[i];
4293  // This block has some free items: Use first one.
4294  if(block.FirstFreeIndex != UINT32_MAX)
4295  {
4296  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4297  block.FirstFreeIndex = pItem->NextFreeIndex;
4298  return &pItem->Value;
4299  }
4300  }
4301 
4302  // No block has free item: Create new one and use it.
4303  ItemBlock& newBlock = CreateNewBlock();
4304  Item* const pItem = &newBlock.pItems[0];
4305  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4306  return &pItem->Value;
4307 }
4308 
4309 template<typename T>
4310 void VmaPoolAllocator<T>::Free(T* ptr)
4311 {
4312  // Search all memory blocks to find ptr.
4313  for(size_t i = m_ItemBlocks.size(); i--; )
4314  {
4315  ItemBlock& block = m_ItemBlocks[i];
4316 
4317  // Casting to union.
4318  Item* pItemPtr;
4319  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4320 
4321  // Check if pItemPtr is in address range of this block.
4322  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4323  {
4324  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4325  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4326  block.FirstFreeIndex = index;
4327  return;
4328  }
4329  }
4330  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4331 }
4332 
4333 template<typename T>
4334 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4335 {
4336  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4337  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4338 
4339  const ItemBlock newBlock = {
4340  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4341  newBlockCapacity,
4342  0 };
4343 
4344  m_ItemBlocks.push_back(newBlock);
4345 
4346  // Setup singly-linked list of all free items in this block.
4347  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4348  newBlock.pItems[i].NextFreeIndex = i + 1;
4349  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4350  return m_ItemBlocks.back();
4351 }
4352 
4354 // class VmaRawList, VmaList
4355 
4356 #if VMA_USE_STL_LIST
4357 
4358 #define VmaList std::list
4359 
4360 #else // #if VMA_USE_STL_LIST
4361 
4362 template<typename T>
4363 struct VmaListItem
4364 {
4365  VmaListItem* pPrev;
4366  VmaListItem* pNext;
4367  T Value;
4368 };
4369 
4370 // Doubly linked list.
4371 template<typename T>
4372 class VmaRawList
4373 {
4374  VMA_CLASS_NO_COPY(VmaRawList)
4375 public:
4376  typedef VmaListItem<T> ItemType;
4377 
4378  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4379  ~VmaRawList();
4380  void Clear();
4381 
4382  size_t GetCount() const { return m_Count; }
4383  bool IsEmpty() const { return m_Count == 0; }
4384 
4385  ItemType* Front() { return m_pFront; }
4386  const ItemType* Front() const { return m_pFront; }
4387  ItemType* Back() { return m_pBack; }
4388  const ItemType* Back() const { return m_pBack; }
4389 
4390  ItemType* PushBack();
4391  ItemType* PushFront();
4392  ItemType* PushBack(const T& value);
4393  ItemType* PushFront(const T& value);
4394  void PopBack();
4395  void PopFront();
4396 
4397  // Item can be null - it means PushBack.
4398  ItemType* InsertBefore(ItemType* pItem);
4399  // Item can be null - it means PushFront.
4400  ItemType* InsertAfter(ItemType* pItem);
4401 
4402  ItemType* InsertBefore(ItemType* pItem, const T& value);
4403  ItemType* InsertAfter(ItemType* pItem, const T& value);
4404 
4405  void Remove(ItemType* pItem);
4406 
4407 private:
4408  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4409  VmaPoolAllocator<ItemType> m_ItemAllocator;
4410  ItemType* m_pFront;
4411  ItemType* m_pBack;
4412  size_t m_Count;
4413 };
4414 
4415 template<typename T>
4416 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4417  m_pAllocationCallbacks(pAllocationCallbacks),
4418  m_ItemAllocator(pAllocationCallbacks, 128),
4419  m_pFront(VMA_NULL),
4420  m_pBack(VMA_NULL),
4421  m_Count(0)
4422 {
4423 }
4424 
4425 template<typename T>
4426 VmaRawList<T>::~VmaRawList()
4427 {
4428  // Intentionally not calling Clear, because that would be unnecessary
4429  // computations to return all items to m_ItemAllocator as free.
4430 }
4431 
4432 template<typename T>
4433 void VmaRawList<T>::Clear()
4434 {
4435  if(IsEmpty() == false)
4436  {
4437  ItemType* pItem = m_pBack;
4438  while(pItem != VMA_NULL)
4439  {
4440  ItemType* const pPrevItem = pItem->pPrev;
4441  m_ItemAllocator.Free(pItem);
4442  pItem = pPrevItem;
4443  }
4444  m_pFront = VMA_NULL;
4445  m_pBack = VMA_NULL;
4446  m_Count = 0;
4447  }
4448 }
4449 
4450 template<typename T>
4451 VmaListItem<T>* VmaRawList<T>::PushBack()
4452 {
4453  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4454  pNewItem->pNext = VMA_NULL;
4455  if(IsEmpty())
4456  {
4457  pNewItem->pPrev = VMA_NULL;
4458  m_pFront = pNewItem;
4459  m_pBack = pNewItem;
4460  m_Count = 1;
4461  }
4462  else
4463  {
4464  pNewItem->pPrev = m_pBack;
4465  m_pBack->pNext = pNewItem;
4466  m_pBack = pNewItem;
4467  ++m_Count;
4468  }
4469  return pNewItem;
4470 }
4471 
4472 template<typename T>
4473 VmaListItem<T>* VmaRawList<T>::PushFront()
4474 {
4475  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4476  pNewItem->pPrev = VMA_NULL;
4477  if(IsEmpty())
4478  {
4479  pNewItem->pNext = VMA_NULL;
4480  m_pFront = pNewItem;
4481  m_pBack = pNewItem;
4482  m_Count = 1;
4483  }
4484  else
4485  {
4486  pNewItem->pNext = m_pFront;
4487  m_pFront->pPrev = pNewItem;
4488  m_pFront = pNewItem;
4489  ++m_Count;
4490  }
4491  return pNewItem;
4492 }
4493 
4494 template<typename T>
4495 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4496 {
4497  ItemType* const pNewItem = PushBack();
4498  pNewItem->Value = value;
4499  return pNewItem;
4500 }
4501 
4502 template<typename T>
4503 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4504 {
4505  ItemType* const pNewItem = PushFront();
4506  pNewItem->Value = value;
4507  return pNewItem;
4508 }
4509 
4510 template<typename T>
4511 void VmaRawList<T>::PopBack()
4512 {
4513  VMA_HEAVY_ASSERT(m_Count > 0);
4514  ItemType* const pBackItem = m_pBack;
4515  ItemType* const pPrevItem = pBackItem->pPrev;
4516  if(pPrevItem != VMA_NULL)
4517  {
4518  pPrevItem->pNext = VMA_NULL;
4519  }
4520  m_pBack = pPrevItem;
4521  m_ItemAllocator.Free(pBackItem);
4522  --m_Count;
4523 }
4524 
4525 template<typename T>
4526 void VmaRawList<T>::PopFront()
4527 {
4528  VMA_HEAVY_ASSERT(m_Count > 0);
4529  ItemType* const pFrontItem = m_pFront;
4530  ItemType* const pNextItem = pFrontItem->pNext;
4531  if(pNextItem != VMA_NULL)
4532  {
4533  pNextItem->pPrev = VMA_NULL;
4534  }
4535  m_pFront = pNextItem;
4536  m_ItemAllocator.Free(pFrontItem);
4537  --m_Count;
4538 }
4539 
4540 template<typename T>
4541 void VmaRawList<T>::Remove(ItemType* pItem)
4542 {
4543  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4544  VMA_HEAVY_ASSERT(m_Count > 0);
4545 
4546  if(pItem->pPrev != VMA_NULL)
4547  {
4548  pItem->pPrev->pNext = pItem->pNext;
4549  }
4550  else
4551  {
4552  VMA_HEAVY_ASSERT(m_pFront == pItem);
4553  m_pFront = pItem->pNext;
4554  }
4555 
4556  if(pItem->pNext != VMA_NULL)
4557  {
4558  pItem->pNext->pPrev = pItem->pPrev;
4559  }
4560  else
4561  {
4562  VMA_HEAVY_ASSERT(m_pBack == pItem);
4563  m_pBack = pItem->pPrev;
4564  }
4565 
4566  m_ItemAllocator.Free(pItem);
4567  --m_Count;
4568 }
4569 
4570 template<typename T>
4571 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4572 {
4573  if(pItem != VMA_NULL)
4574  {
4575  ItemType* const prevItem = pItem->pPrev;
4576  ItemType* const newItem = m_ItemAllocator.Alloc();
4577  newItem->pPrev = prevItem;
4578  newItem->pNext = pItem;
4579  pItem->pPrev = newItem;
4580  if(prevItem != VMA_NULL)
4581  {
4582  prevItem->pNext = newItem;
4583  }
4584  else
4585  {
4586  VMA_HEAVY_ASSERT(m_pFront == pItem);
4587  m_pFront = newItem;
4588  }
4589  ++m_Count;
4590  return newItem;
4591  }
4592  else
4593  return PushBack();
4594 }
4595 
4596 template<typename T>
4597 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4598 {
4599  if(pItem != VMA_NULL)
4600  {
4601  ItemType* const nextItem = pItem->pNext;
4602  ItemType* const newItem = m_ItemAllocator.Alloc();
4603  newItem->pNext = nextItem;
4604  newItem->pPrev = pItem;
4605  pItem->pNext = newItem;
4606  if(nextItem != VMA_NULL)
4607  {
4608  nextItem->pPrev = newItem;
4609  }
4610  else
4611  {
4612  VMA_HEAVY_ASSERT(m_pBack == pItem);
4613  m_pBack = newItem;
4614  }
4615  ++m_Count;
4616  return newItem;
4617  }
4618  else
4619  return PushFront();
4620 }
4621 
4622 template<typename T>
4623 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4624 {
4625  ItemType* const newItem = InsertBefore(pItem);
4626  newItem->Value = value;
4627  return newItem;
4628 }
4629 
4630 template<typename T>
4631 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4632 {
4633  ItemType* const newItem = InsertAfter(pItem);
4634  newItem->Value = value;
4635  return newItem;
4636 }
4637 
4638 template<typename T, typename AllocatorT>
4639 class VmaList
4640 {
4641  VMA_CLASS_NO_COPY(VmaList)
4642 public:
4643  class iterator
4644  {
4645  public:
4646  iterator() :
4647  m_pList(VMA_NULL),
4648  m_pItem(VMA_NULL)
4649  {
4650  }
4651 
4652  T& operator*() const
4653  {
4654  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4655  return m_pItem->Value;
4656  }
4657  T* operator->() const
4658  {
4659  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4660  return &m_pItem->Value;
4661  }
4662 
4663  iterator& operator++()
4664  {
4665  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4666  m_pItem = m_pItem->pNext;
4667  return *this;
4668  }
4669  iterator& operator--()
4670  {
4671  if(m_pItem != VMA_NULL)
4672  {
4673  m_pItem = m_pItem->pPrev;
4674  }
4675  else
4676  {
4677  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4678  m_pItem = m_pList->Back();
4679  }
4680  return *this;
4681  }
4682 
4683  iterator operator++(int)
4684  {
4685  iterator result = *this;
4686  ++*this;
4687  return result;
4688  }
4689  iterator operator--(int)
4690  {
4691  iterator result = *this;
4692  --*this;
4693  return result;
4694  }
4695 
4696  bool operator==(const iterator& rhs) const
4697  {
4698  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4699  return m_pItem == rhs.m_pItem;
4700  }
4701  bool operator!=(const iterator& rhs) const
4702  {
4703  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4704  return m_pItem != rhs.m_pItem;
4705  }
4706 
4707  private:
4708  VmaRawList<T>* m_pList;
4709  VmaListItem<T>* m_pItem;
4710 
4711  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4712  m_pList(pList),
4713  m_pItem(pItem)
4714  {
4715  }
4716 
4717  friend class VmaList<T, AllocatorT>;
4718  };
4719 
4720  class const_iterator
4721  {
4722  public:
4723  const_iterator() :
4724  m_pList(VMA_NULL),
4725  m_pItem(VMA_NULL)
4726  {
4727  }
4728 
4729  const_iterator(const iterator& src) :
4730  m_pList(src.m_pList),
4731  m_pItem(src.m_pItem)
4732  {
4733  }
4734 
4735  const T& operator*() const
4736  {
4737  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4738  return m_pItem->Value;
4739  }
4740  const T* operator->() const
4741  {
4742  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4743  return &m_pItem->Value;
4744  }
4745 
4746  const_iterator& operator++()
4747  {
4748  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4749  m_pItem = m_pItem->pNext;
4750  return *this;
4751  }
4752  const_iterator& operator--()
4753  {
4754  if(m_pItem != VMA_NULL)
4755  {
4756  m_pItem = m_pItem->pPrev;
4757  }
4758  else
4759  {
4760  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4761  m_pItem = m_pList->Back();
4762  }
4763  return *this;
4764  }
4765 
4766  const_iterator operator++(int)
4767  {
4768  const_iterator result = *this;
4769  ++*this;
4770  return result;
4771  }
4772  const_iterator operator--(int)
4773  {
4774  const_iterator result = *this;
4775  --*this;
4776  return result;
4777  }
4778 
4779  bool operator==(const const_iterator& rhs) const
4780  {
4781  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4782  return m_pItem == rhs.m_pItem;
4783  }
4784  bool operator!=(const const_iterator& rhs) const
4785  {
4786  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4787  return m_pItem != rhs.m_pItem;
4788  }
4789 
4790  private:
4791  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4792  m_pList(pList),
4793  m_pItem(pItem)
4794  {
4795  }
4796 
4797  const VmaRawList<T>* m_pList;
4798  const VmaListItem<T>* m_pItem;
4799 
4800  friend class VmaList<T, AllocatorT>;
4801  };
4802 
4803  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4804 
4805  bool empty() const { return m_RawList.IsEmpty(); }
4806  size_t size() const { return m_RawList.GetCount(); }
4807 
4808  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4809  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4810 
4811  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4812  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4813 
4814  void clear() { m_RawList.Clear(); }
4815  void push_back(const T& value) { m_RawList.PushBack(value); }
4816  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4817  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4818 
4819 private:
4820  VmaRawList<T> m_RawList;
4821 };
4822 
4823 #endif // #if VMA_USE_STL_LIST
4824 
4826 // class VmaMap
4827 
4828 // Unused in this version.
4829 #if 0
4830 
4831 #if VMA_USE_STL_UNORDERED_MAP
4832 
4833 #define VmaPair std::pair
4834 
4835 #define VMA_MAP_TYPE(KeyT, ValueT) \
4836  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4837 
4838 #else // #if VMA_USE_STL_UNORDERED_MAP
4839 
4840 template<typename T1, typename T2>
4841 struct VmaPair
4842 {
4843  T1 first;
4844  T2 second;
4845 
4846  VmaPair() : first(), second() { }
4847  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4848 };
4849 
4850 /* Class compatible with subset of interface of std::unordered_map.
4851 KeyT, ValueT must be POD because they will be stored in VmaVector.
4852 */
4853 template<typename KeyT, typename ValueT>
4854 class VmaMap
4855 {
4856 public:
4857  typedef VmaPair<KeyT, ValueT> PairType;
4858  typedef PairType* iterator;
4859 
4860  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4861 
4862  iterator begin() { return m_Vector.begin(); }
4863  iterator end() { return m_Vector.end(); }
4864 
4865  void insert(const PairType& pair);
4866  iterator find(const KeyT& key);
4867  void erase(iterator it);
4868 
4869 private:
4870  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4871 };
4872 
4873 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4874 
4875 template<typename FirstT, typename SecondT>
4876 struct VmaPairFirstLess
4877 {
4878  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4879  {
4880  return lhs.first < rhs.first;
4881  }
4882  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4883  {
4884  return lhs.first < rhsFirst;
4885  }
4886 };
4887 
4888 template<typename KeyT, typename ValueT>
4889 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4890 {
4891  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4892  m_Vector.data(),
4893  m_Vector.data() + m_Vector.size(),
4894  pair,
4895  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4896  VmaVectorInsert(m_Vector, indexToInsert, pair);
4897 }
4898 
4899 template<typename KeyT, typename ValueT>
4900 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4901 {
4902  PairType* it = VmaBinaryFindFirstNotLess(
4903  m_Vector.data(),
4904  m_Vector.data() + m_Vector.size(),
4905  key,
4906  VmaPairFirstLess<KeyT, ValueT>());
4907  if((it != m_Vector.end()) && (it->first == key))
4908  {
4909  return it;
4910  }
4911  else
4912  {
4913  return m_Vector.end();
4914  }
4915 }
4916 
4917 template<typename KeyT, typename ValueT>
4918 void VmaMap<KeyT, ValueT>::erase(iterator it)
4919 {
4920  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4921 }
4922 
4923 #endif // #if VMA_USE_STL_UNORDERED_MAP
4924 
4925 #endif // #if 0
4926 
4928 
4929 class VmaDeviceMemoryBlock;
4930 
4931 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4932 
4933 struct VmaAllocation_T
4934 {
4935 private:
4936  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4937 
4938  enum FLAGS
4939  {
4940  FLAG_USER_DATA_STRING = 0x01,
4941  };
4942 
4943 public:
4944  enum ALLOCATION_TYPE
4945  {
4946  ALLOCATION_TYPE_NONE,
4947  ALLOCATION_TYPE_BLOCK,
4948  ALLOCATION_TYPE_DEDICATED,
4949  };
4950 
4951  /*
4952  This struct cannot have constructor or destructor. It must be POD because it is
4953  allocated using VmaPoolAllocator.
4954  */
4955 
4956  void Ctor(uint32_t currentFrameIndex, bool userDataString)
4957  {
4958  m_Alignment = 1;
4959  m_Size = 0;
4960  m_pUserData = VMA_NULL;
4961  m_LastUseFrameIndex = currentFrameIndex;
4962  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
4963  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
4964  m_MapCount = 0;
4965  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
4966 
4967 #if VMA_STATS_STRING_ENABLED
4968  m_CreationFrameIndex = currentFrameIndex;
4969  m_BufferImageUsage = 0;
4970 #endif
4971  }
4972 
4973  void Dtor()
4974  {
4975  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4976 
4977  // Check if owned string was freed.
4978  VMA_ASSERT(m_pUserData == VMA_NULL);
4979  }
4980 
4981  void InitBlockAllocation(
4982  VmaDeviceMemoryBlock* block,
4983  VkDeviceSize offset,
4984  VkDeviceSize alignment,
4985  VkDeviceSize size,
4986  VmaSuballocationType suballocationType,
4987  bool mapped,
4988  bool canBecomeLost)
4989  {
4990  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4991  VMA_ASSERT(block != VMA_NULL);
4992  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4993  m_Alignment = alignment;
4994  m_Size = size;
4995  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4996  m_SuballocationType = (uint8_t)suballocationType;
4997  m_BlockAllocation.m_Block = block;
4998  m_BlockAllocation.m_Offset = offset;
4999  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
5000  }
5001 
5002  void InitLost()
5003  {
5004  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5005  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
5006  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5007  m_BlockAllocation.m_Block = VMA_NULL;
5008  m_BlockAllocation.m_Offset = 0;
5009  m_BlockAllocation.m_CanBecomeLost = true;
5010  }
5011 
5012  void ChangeBlockAllocation(
5013  VmaAllocator hAllocator,
5014  VmaDeviceMemoryBlock* block,
5015  VkDeviceSize offset);
5016 
5017  void ChangeSize(VkDeviceSize newSize);
5018  void ChangeOffset(VkDeviceSize newOffset);
5019 
5020  // pMappedData not null means allocation is created with MAPPED flag.
5021  void InitDedicatedAllocation(
5022  uint32_t memoryTypeIndex,
5023  VkDeviceMemory hMemory,
5024  VmaSuballocationType suballocationType,
5025  void* pMappedData,
5026  VkDeviceSize size)
5027  {
5028  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5029  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
5030  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
5031  m_Alignment = 0;
5032  m_Size = size;
5033  m_SuballocationType = (uint8_t)suballocationType;
5034  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5035  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
5036  m_DedicatedAllocation.m_hMemory = hMemory;
5037  m_DedicatedAllocation.m_pMappedData = pMappedData;
5038  }
5039 
5040  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5041  VkDeviceSize GetAlignment() const { return m_Alignment; }
5042  VkDeviceSize GetSize() const { return m_Size; }
5043  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5044  void* GetUserData() const { return m_pUserData; }
5045  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5046  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5047 
5048  VmaDeviceMemoryBlock* GetBlock() const
5049  {
5050  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5051  return m_BlockAllocation.m_Block;
5052  }
5053  VkDeviceSize GetOffset() const;
5054  VkDeviceMemory GetMemory() const;
5055  uint32_t GetMemoryTypeIndex() const;
5056  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5057  void* GetMappedData() const;
5058  bool CanBecomeLost() const;
5059 
5060  uint32_t GetLastUseFrameIndex() const
5061  {
5062  return m_LastUseFrameIndex.load();
5063  }
5064  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5065  {
5066  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5067  }
5068  /*
5069  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5070  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5071  - Else, returns false.
5072 
5073  If hAllocation is already lost, assert - you should not call it then.
5074  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5075  */
5076  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5077 
5078  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5079  {
5080  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5081  outInfo.blockCount = 1;
5082  outInfo.allocationCount = 1;
5083  outInfo.unusedRangeCount = 0;
5084  outInfo.usedBytes = m_Size;
5085  outInfo.unusedBytes = 0;
5086  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5087  outInfo.unusedRangeSizeMin = UINT64_MAX;
5088  outInfo.unusedRangeSizeMax = 0;
5089  }
5090 
5091  void BlockAllocMap();
5092  void BlockAllocUnmap();
5093  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5094  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5095 
5096 #if VMA_STATS_STRING_ENABLED
5097  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5098  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5099 
5100  void InitBufferImageUsage(uint32_t bufferImageUsage)
5101  {
5102  VMA_ASSERT(m_BufferImageUsage == 0);
5103  m_BufferImageUsage = bufferImageUsage;
5104  }
5105 
5106  void PrintParameters(class VmaJsonWriter& json) const;
5107 #endif
5108 
5109 private:
5110  VkDeviceSize m_Alignment;
5111  VkDeviceSize m_Size;
5112  void* m_pUserData;
5113  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5114  uint8_t m_Type; // ALLOCATION_TYPE
5115  uint8_t m_SuballocationType; // VmaSuballocationType
5116  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5117  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5118  uint8_t m_MapCount;
5119  uint8_t m_Flags; // enum FLAGS
5120 
5121  // Allocation out of VmaDeviceMemoryBlock.
5122  struct BlockAllocation
5123  {
5124  VmaDeviceMemoryBlock* m_Block;
5125  VkDeviceSize m_Offset;
5126  bool m_CanBecomeLost;
5127  };
5128 
5129  // Allocation for an object that has its own private VkDeviceMemory.
5130  struct DedicatedAllocation
5131  {
5132  uint32_t m_MemoryTypeIndex;
5133  VkDeviceMemory m_hMemory;
5134  void* m_pMappedData; // Not null means memory is mapped.
5135  };
5136 
5137  union
5138  {
5139  // Allocation out of VmaDeviceMemoryBlock.
5140  BlockAllocation m_BlockAllocation;
5141  // Allocation for an object that has its own private VkDeviceMemory.
5142  DedicatedAllocation m_DedicatedAllocation;
5143  };
5144 
5145 #if VMA_STATS_STRING_ENABLED
5146  uint32_t m_CreationFrameIndex;
5147  uint32_t m_BufferImageUsage; // 0 if unknown.
5148 #endif
5149 
5150  void FreeUserDataString(VmaAllocator hAllocator);
5151 };
5152 
5153 /*
5154 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5155 allocated memory block or free.
5156 */
5157 struct VmaSuballocation
5158 {
5159  VkDeviceSize offset;
5160  VkDeviceSize size;
5161  VmaAllocation hAllocation;
5162  VmaSuballocationType type;
5163 };
5164 
5165 // Comparator for offsets.
5166 struct VmaSuballocationOffsetLess
5167 {
5168  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5169  {
5170  return lhs.offset < rhs.offset;
5171  }
5172 };
5173 struct VmaSuballocationOffsetGreater
5174 {
5175  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5176  {
5177  return lhs.offset > rhs.offset;
5178  }
5179 };
5180 
5181 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5182 
5183 // Cost of one additional allocation lost, as equivalent in bytes.
5184 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5185 
5186 enum class VmaAllocationRequestType
5187 {
5188  Normal,
5189  // Used by "Linear" algorithm.
5190  UpperAddress,
5191  EndOf1st,
5192  EndOf2nd,
5193 };
5194 
5195 /*
5196 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5197 
5198 If canMakeOtherLost was false:
5199 - item points to a FREE suballocation.
5200 - itemsToMakeLostCount is 0.
5201 
5202 If canMakeOtherLost was true:
5203 - item points to first of sequence of suballocations, which are either FREE,
5204  or point to VmaAllocations that can become lost.
5205 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5206  the requested allocation to succeed.
5207 */
5208 struct VmaAllocationRequest
5209 {
5210  VkDeviceSize offset;
5211  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5212  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5213  VmaSuballocationList::iterator item;
5214  size_t itemsToMakeLostCount;
5215  void* customData;
5216  VmaAllocationRequestType type;
5217 
5218  VkDeviceSize CalcCost() const
5219  {
5220  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5221  }
5222 };
5223 
5224 /*
5225 Data structure used for bookkeeping of allocations and unused ranges of memory
5226 in a single VkDeviceMemory block.
5227 */
5228 class VmaBlockMetadata
5229 {
5230 public:
5231  VmaBlockMetadata(VmaAllocator hAllocator);
5232  virtual ~VmaBlockMetadata() { }
5233  virtual void Init(VkDeviceSize size) { m_Size = size; }
5234 
5235  // Validates all data structures inside this object. If not valid, returns false.
5236  virtual bool Validate() const = 0;
5237  VkDeviceSize GetSize() const { return m_Size; }
5238  virtual size_t GetAllocationCount() const = 0;
5239  virtual VkDeviceSize GetSumFreeSize() const = 0;
5240  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5241  // Returns true if this block is empty - contains only single free suballocation.
5242  virtual bool IsEmpty() const = 0;
5243 
5244  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5245  // Shouldn't modify blockCount.
5246  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5247 
5248 #if VMA_STATS_STRING_ENABLED
5249  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5250 #endif
5251 
5252  // Tries to find a place for suballocation with given parameters inside this block.
5253  // If succeeded, fills pAllocationRequest and returns true.
5254  // If failed, returns false.
5255  virtual bool CreateAllocationRequest(
5256  uint32_t currentFrameIndex,
5257  uint32_t frameInUseCount,
5258  VkDeviceSize bufferImageGranularity,
5259  VkDeviceSize allocSize,
5260  VkDeviceSize allocAlignment,
5261  bool upperAddress,
5262  VmaSuballocationType allocType,
5263  bool canMakeOtherLost,
5264  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5265  uint32_t strategy,
5266  VmaAllocationRequest* pAllocationRequest) = 0;
5267 
5268  virtual bool MakeRequestedAllocationsLost(
5269  uint32_t currentFrameIndex,
5270  uint32_t frameInUseCount,
5271  VmaAllocationRequest* pAllocationRequest) = 0;
5272 
5273  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5274 
5275  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5276 
5277  // Makes actual allocation based on request. Request must already be checked and valid.
5278  virtual void Alloc(
5279  const VmaAllocationRequest& request,
5280  VmaSuballocationType type,
5281  VkDeviceSize allocSize,
5282  VmaAllocation hAllocation) = 0;
5283 
5284  // Frees suballocation assigned to given memory region.
5285  virtual void Free(const VmaAllocation allocation) = 0;
5286  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5287 
5288  // Tries to resize (grow or shrink) space for given allocation, in place.
5289  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
5290 
5291 protected:
5292  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5293 
5294 #if VMA_STATS_STRING_ENABLED
5295  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5296  VkDeviceSize unusedBytes,
5297  size_t allocationCount,
5298  size_t unusedRangeCount) const;
5299  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5300  VkDeviceSize offset,
5301  VmaAllocation hAllocation) const;
5302  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5303  VkDeviceSize offset,
5304  VkDeviceSize size) const;
5305  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5306 #endif
5307 
5308 private:
5309  VkDeviceSize m_Size;
5310  const VkAllocationCallbacks* m_pAllocationCallbacks;
5311 };
5312 
5313 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5314  VMA_ASSERT(0 && "Validation failed: " #cond); \
5315  return false; \
5316  } } while(false)
5317 
5318 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5319 {
5320  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5321 public:
5322  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5323  virtual ~VmaBlockMetadata_Generic();
5324  virtual void Init(VkDeviceSize size);
5325 
5326  virtual bool Validate() const;
5327  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5328  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5329  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5330  virtual bool IsEmpty() const;
5331 
5332  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5333  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5334 
5335 #if VMA_STATS_STRING_ENABLED
5336  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5337 #endif
5338 
5339  virtual bool CreateAllocationRequest(
5340  uint32_t currentFrameIndex,
5341  uint32_t frameInUseCount,
5342  VkDeviceSize bufferImageGranularity,
5343  VkDeviceSize allocSize,
5344  VkDeviceSize allocAlignment,
5345  bool upperAddress,
5346  VmaSuballocationType allocType,
5347  bool canMakeOtherLost,
5348  uint32_t strategy,
5349  VmaAllocationRequest* pAllocationRequest);
5350 
5351  virtual bool MakeRequestedAllocationsLost(
5352  uint32_t currentFrameIndex,
5353  uint32_t frameInUseCount,
5354  VmaAllocationRequest* pAllocationRequest);
5355 
5356  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5357 
5358  virtual VkResult CheckCorruption(const void* pBlockData);
5359 
5360  virtual void Alloc(
5361  const VmaAllocationRequest& request,
5362  VmaSuballocationType type,
5363  VkDeviceSize allocSize,
5364  VmaAllocation hAllocation);
5365 
5366  virtual void Free(const VmaAllocation allocation);
5367  virtual void FreeAtOffset(VkDeviceSize offset);
5368 
5369  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
5370 
5372  // For defragmentation
5373 
5374  bool IsBufferImageGranularityConflictPossible(
5375  VkDeviceSize bufferImageGranularity,
5376  VmaSuballocationType& inOutPrevSuballocType) const;
5377 
5378 private:
5379  friend class VmaDefragmentationAlgorithm_Generic;
5380  friend class VmaDefragmentationAlgorithm_Fast;
5381 
5382  uint32_t m_FreeCount;
5383  VkDeviceSize m_SumFreeSize;
5384  VmaSuballocationList m_Suballocations;
5385  // Suballocations that are free and have size greater than certain threshold.
5386  // Sorted by size, ascending.
5387  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5388 
5389  bool ValidateFreeSuballocationList() const;
5390 
5391  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5392  // If yes, fills pOffset and returns true. If no, returns false.
5393  bool CheckAllocation(
5394  uint32_t currentFrameIndex,
5395  uint32_t frameInUseCount,
5396  VkDeviceSize bufferImageGranularity,
5397  VkDeviceSize allocSize,
5398  VkDeviceSize allocAlignment,
5399  VmaSuballocationType allocType,
5400  VmaSuballocationList::const_iterator suballocItem,
5401  bool canMakeOtherLost,
5402  VkDeviceSize* pOffset,
5403  size_t* itemsToMakeLostCount,
5404  VkDeviceSize* pSumFreeSize,
5405  VkDeviceSize* pSumItemSize) const;
5406  // Given free suballocation, it merges it with following one, which must also be free.
5407  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5408  // Releases given suballocation, making it free.
5409  // Merges it with adjacent free suballocations if applicable.
5410  // Returns iterator to new free suballocation at this place.
5411  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5412  // Given free suballocation, it inserts it into sorted list of
5413  // m_FreeSuballocationsBySize if it's suitable.
5414  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5415  // Given free suballocation, it removes it from sorted list of
5416  // m_FreeSuballocationsBySize if it's suitable.
5417  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5418 };
5419 
5420 /*
5421 Allocations and their references in internal data structure look like this:
5422 
5423 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5424 
5425  0 +-------+
5426  | |
5427  | |
5428  | |
5429  +-------+
5430  | Alloc | 1st[m_1stNullItemsBeginCount]
5431  +-------+
5432  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5433  +-------+
5434  | ... |
5435  +-------+
5436  | Alloc | 1st[1st.size() - 1]
5437  +-------+
5438  | |
5439  | |
5440  | |
5441 GetSize() +-------+
5442 
5443 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5444 
5445  0 +-------+
5446  | Alloc | 2nd[0]
5447  +-------+
5448  | Alloc | 2nd[1]
5449  +-------+
5450  | ... |
5451  +-------+
5452  | Alloc | 2nd[2nd.size() - 1]
5453  +-------+
5454  | |
5455  | |
5456  | |
5457  +-------+
5458  | Alloc | 1st[m_1stNullItemsBeginCount]
5459  +-------+
5460  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5461  +-------+
5462  | ... |
5463  +-------+
5464  | Alloc | 1st[1st.size() - 1]
5465  +-------+
5466  | |
5467 GetSize() +-------+
5468 
5469 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5470 
5471  0 +-------+
5472  | |
5473  | |
5474  | |
5475  +-------+
5476  | Alloc | 1st[m_1stNullItemsBeginCount]
5477  +-------+
5478  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5479  +-------+
5480  | ... |
5481  +-------+
5482  | Alloc | 1st[1st.size() - 1]
5483  +-------+
5484  | |
5485  | |
5486  | |
5487  +-------+
5488  | Alloc | 2nd[2nd.size() - 1]
5489  +-------+
5490  | ... |
5491  +-------+
5492  | Alloc | 2nd[1]
5493  +-------+
5494  | Alloc | 2nd[0]
5495 GetSize() +-------+
5496 
5497 */
5498 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5499 {
5500  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5501 public:
5502  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5503  virtual ~VmaBlockMetadata_Linear();
5504  virtual void Init(VkDeviceSize size);
5505 
5506  virtual bool Validate() const;
5507  virtual size_t GetAllocationCount() const;
5508  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5509  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5510  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5511 
5512  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5513  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5514 
5515 #if VMA_STATS_STRING_ENABLED
5516  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5517 #endif
5518 
5519  virtual bool CreateAllocationRequest(
5520  uint32_t currentFrameIndex,
5521  uint32_t frameInUseCount,
5522  VkDeviceSize bufferImageGranularity,
5523  VkDeviceSize allocSize,
5524  VkDeviceSize allocAlignment,
5525  bool upperAddress,
5526  VmaSuballocationType allocType,
5527  bool canMakeOtherLost,
5528  uint32_t strategy,
5529  VmaAllocationRequest* pAllocationRequest);
5530 
5531  virtual bool MakeRequestedAllocationsLost(
5532  uint32_t currentFrameIndex,
5533  uint32_t frameInUseCount,
5534  VmaAllocationRequest* pAllocationRequest);
5535 
5536  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5537 
5538  virtual VkResult CheckCorruption(const void* pBlockData);
5539 
5540  virtual void Alloc(
5541  const VmaAllocationRequest& request,
5542  VmaSuballocationType type,
5543  VkDeviceSize allocSize,
5544  VmaAllocation hAllocation);
5545 
5546  virtual void Free(const VmaAllocation allocation);
5547  virtual void FreeAtOffset(VkDeviceSize offset);
5548 
5549 private:
5550  /*
5551  There are two suballocation vectors, used in ping-pong way.
5552  The one with index m_1stVectorIndex is called 1st.
5553  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5554  2nd can be non-empty only when 1st is not empty.
5555  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5556  */
5557  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5558 
5559  enum SECOND_VECTOR_MODE
5560  {
5561  SECOND_VECTOR_EMPTY,
5562  /*
5563  Suballocations in 2nd vector are created later than the ones in 1st, but they
5564  all have smaller offset.
5565  */
5566  SECOND_VECTOR_RING_BUFFER,
5567  /*
5568  Suballocations in 2nd vector are upper side of double stack.
5569  They all have offsets higher than those in 1st vector.
5570  Top of this stack means smaller offsets, but higher indices in this vector.
5571  */
5572  SECOND_VECTOR_DOUBLE_STACK,
5573  };
5574 
5575  VkDeviceSize m_SumFreeSize;
5576  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5577  uint32_t m_1stVectorIndex;
5578  SECOND_VECTOR_MODE m_2ndVectorMode;
5579 
5580  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5581  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5582  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5583  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5584 
5585  // Number of items in 1st vector with hAllocation = null at the beginning.
5586  size_t m_1stNullItemsBeginCount;
5587  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5588  size_t m_1stNullItemsMiddleCount;
5589  // Number of items in 2nd vector with hAllocation = null.
5590  size_t m_2ndNullItemsCount;
5591 
5592  bool ShouldCompact1st() const;
5593  void CleanupAfterFree();
5594 
5595  bool CreateAllocationRequest_LowerAddress(
5596  uint32_t currentFrameIndex,
5597  uint32_t frameInUseCount,
5598  VkDeviceSize bufferImageGranularity,
5599  VkDeviceSize allocSize,
5600  VkDeviceSize allocAlignment,
5601  VmaSuballocationType allocType,
5602  bool canMakeOtherLost,
5603  uint32_t strategy,
5604  VmaAllocationRequest* pAllocationRequest);
5605  bool CreateAllocationRequest_UpperAddress(
5606  uint32_t currentFrameIndex,
5607  uint32_t frameInUseCount,
5608  VkDeviceSize bufferImageGranularity,
5609  VkDeviceSize allocSize,
5610  VkDeviceSize allocAlignment,
5611  VmaSuballocationType allocType,
5612  bool canMakeOtherLost,
5613  uint32_t strategy,
5614  VmaAllocationRequest* pAllocationRequest);
5615 };
5616 
5617 /*
5618 - GetSize() is the original size of allocated memory block.
5619 - m_UsableSize is this size aligned down to a power of two.
5620  All allocations and calculations happen relative to m_UsableSize.
5621 - GetUnusableSize() is the difference between them.
5622  It is repoted as separate, unused range, not available for allocations.
5623 
5624 Node at level 0 has size = m_UsableSize.
5625 Each next level contains nodes with size 2 times smaller than current level.
5626 m_LevelCount is the maximum number of levels to use in the current object.
5627 */
5628 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5629 {
5630  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5631 public:
5632  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5633  virtual ~VmaBlockMetadata_Buddy();
5634  virtual void Init(VkDeviceSize size);
5635 
5636  virtual bool Validate() const;
5637  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5638  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5639  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5640  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5641 
5642  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5643  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5644 
5645 #if VMA_STATS_STRING_ENABLED
5646  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5647 #endif
5648 
5649  virtual bool CreateAllocationRequest(
5650  uint32_t currentFrameIndex,
5651  uint32_t frameInUseCount,
5652  VkDeviceSize bufferImageGranularity,
5653  VkDeviceSize allocSize,
5654  VkDeviceSize allocAlignment,
5655  bool upperAddress,
5656  VmaSuballocationType allocType,
5657  bool canMakeOtherLost,
5658  uint32_t strategy,
5659  VmaAllocationRequest* pAllocationRequest);
5660 
5661  virtual bool MakeRequestedAllocationsLost(
5662  uint32_t currentFrameIndex,
5663  uint32_t frameInUseCount,
5664  VmaAllocationRequest* pAllocationRequest);
5665 
5666  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5667 
5668  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5669 
5670  virtual void Alloc(
5671  const VmaAllocationRequest& request,
5672  VmaSuballocationType type,
5673  VkDeviceSize allocSize,
5674  VmaAllocation hAllocation);
5675 
5676  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5677  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5678 
5679 private:
5680  static const VkDeviceSize MIN_NODE_SIZE = 32;
5681  static const size_t MAX_LEVELS = 30;
5682 
5683  struct ValidationContext
5684  {
5685  size_t calculatedAllocationCount;
5686  size_t calculatedFreeCount;
5687  VkDeviceSize calculatedSumFreeSize;
5688 
5689  ValidationContext() :
5690  calculatedAllocationCount(0),
5691  calculatedFreeCount(0),
5692  calculatedSumFreeSize(0) { }
5693  };
5694 
5695  struct Node
5696  {
5697  VkDeviceSize offset;
5698  enum TYPE
5699  {
5700  TYPE_FREE,
5701  TYPE_ALLOCATION,
5702  TYPE_SPLIT,
5703  TYPE_COUNT
5704  } type;
5705  Node* parent;
5706  Node* buddy;
5707 
5708  union
5709  {
5710  struct
5711  {
5712  Node* prev;
5713  Node* next;
5714  } free;
5715  struct
5716  {
5717  VmaAllocation alloc;
5718  } allocation;
5719  struct
5720  {
5721  Node* leftChild;
5722  } split;
5723  };
5724  };
5725 
5726  // Size of the memory block aligned down to a power of two.
5727  VkDeviceSize m_UsableSize;
5728  uint32_t m_LevelCount;
5729 
5730  Node* m_Root;
5731  struct {
5732  Node* front;
5733  Node* back;
5734  } m_FreeList[MAX_LEVELS];
5735  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5736  size_t m_AllocationCount;
5737  // Number of nodes in the tree with type == TYPE_FREE.
5738  size_t m_FreeCount;
5739  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5740  VkDeviceSize m_SumFreeSize;
5741 
5742  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5743  void DeleteNode(Node* node);
5744  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5745  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5746  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5747  // Alloc passed just for validation. Can be null.
5748  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5749  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5750  // Adds node to the front of FreeList at given level.
5751  // node->type must be FREE.
5752  // node->free.prev, next can be undefined.
5753  void AddToFreeListFront(uint32_t level, Node* node);
5754  // Removes node from FreeList at given level.
5755  // node->type must be FREE.
5756  // node->free.prev, next stay untouched.
5757  void RemoveFromFreeList(uint32_t level, Node* node);
5758 
5759 #if VMA_STATS_STRING_ENABLED
5760  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5761 #endif
5762 };
5763 
5764 /*
5765 Represents a single block of device memory (`VkDeviceMemory`) with all the
5766 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5767 
5768 Thread-safety: This class must be externally synchronized.
5769 */
5770 class VmaDeviceMemoryBlock
5771 {
5772  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5773 public:
5774  VmaBlockMetadata* m_pMetadata;
5775 
5776  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5777 
5778  ~VmaDeviceMemoryBlock()
5779  {
5780  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5781  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5782  }
5783 
5784  // Always call after construction.
5785  void Init(
5786  VmaAllocator hAllocator,
5787  VmaPool hParentPool,
5788  uint32_t newMemoryTypeIndex,
5789  VkDeviceMemory newMemory,
5790  VkDeviceSize newSize,
5791  uint32_t id,
5792  uint32_t algorithm);
5793  // Always call before destruction.
5794  void Destroy(VmaAllocator allocator);
5795 
5796  VmaPool GetParentPool() const { return m_hParentPool; }
5797  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5798  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5799  uint32_t GetId() const { return m_Id; }
5800  void* GetMappedData() const { return m_pMappedData; }
5801 
5802  // Validates all data structures inside this object. If not valid, returns false.
5803  bool Validate() const;
5804 
5805  VkResult CheckCorruption(VmaAllocator hAllocator);
5806 
5807  // ppData can be null.
5808  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5809  void Unmap(VmaAllocator hAllocator, uint32_t count);
5810 
5811  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5812  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5813 
5814  VkResult BindBufferMemory(
5815  const VmaAllocator hAllocator,
5816  const VmaAllocation hAllocation,
5817  VkBuffer hBuffer);
5818  VkResult BindImageMemory(
5819  const VmaAllocator hAllocator,
5820  const VmaAllocation hAllocation,
5821  VkImage hImage);
5822 
5823 private:
5824  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
5825  uint32_t m_MemoryTypeIndex;
5826  uint32_t m_Id;
5827  VkDeviceMemory m_hMemory;
5828 
5829  /*
5830  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5831  Also protects m_MapCount, m_pMappedData.
5832  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5833  */
5834  VMA_MUTEX m_Mutex;
5835  uint32_t m_MapCount;
5836  void* m_pMappedData;
5837 };
5838 
5839 struct VmaPointerLess
5840 {
5841  bool operator()(const void* lhs, const void* rhs) const
5842  {
5843  return lhs < rhs;
5844  }
5845 };
5846 
5847 struct VmaDefragmentationMove
5848 {
5849  size_t srcBlockIndex;
5850  size_t dstBlockIndex;
5851  VkDeviceSize srcOffset;
5852  VkDeviceSize dstOffset;
5853  VkDeviceSize size;
5854 };
5855 
5856 class VmaDefragmentationAlgorithm;
5857 
5858 /*
5859 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5860 Vulkan memory type.
5861 
5862 Synchronized internally with a mutex.
5863 */
5864 struct VmaBlockVector
5865 {
5866  VMA_CLASS_NO_COPY(VmaBlockVector)
5867 public:
5868  VmaBlockVector(
5869  VmaAllocator hAllocator,
5870  VmaPool hParentPool,
5871  uint32_t memoryTypeIndex,
5872  VkDeviceSize preferredBlockSize,
5873  size_t minBlockCount,
5874  size_t maxBlockCount,
5875  VkDeviceSize bufferImageGranularity,
5876  uint32_t frameInUseCount,
5877  bool isCustomPool,
5878  bool explicitBlockSize,
5879  uint32_t algorithm);
5880  ~VmaBlockVector();
5881 
5882  VkResult CreateMinBlocks();
5883 
5884  VmaPool GetParentPool() const { return m_hParentPool; }
5885  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5886  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5887  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5888  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5889  uint32_t GetAlgorithm() const { return m_Algorithm; }
5890 
5891  void GetPoolStats(VmaPoolStats* pStats);
5892 
5893  bool IsEmpty() const { return m_Blocks.empty(); }
5894  bool IsCorruptionDetectionEnabled() const;
5895 
5896  VkResult Allocate(
5897  uint32_t currentFrameIndex,
5898  VkDeviceSize size,
5899  VkDeviceSize alignment,
5900  const VmaAllocationCreateInfo& createInfo,
5901  VmaSuballocationType suballocType,
5902  size_t allocationCount,
5903  VmaAllocation* pAllocations);
5904 
5905  void Free(
5906  VmaAllocation hAllocation);
5907 
5908  // Adds statistics of this BlockVector to pStats.
5909  void AddStats(VmaStats* pStats);
5910 
5911 #if VMA_STATS_STRING_ENABLED
5912  void PrintDetailedMap(class VmaJsonWriter& json);
5913 #endif
5914 
5915  void MakePoolAllocationsLost(
5916  uint32_t currentFrameIndex,
5917  size_t* pLostAllocationCount);
5918  VkResult CheckCorruption();
5919 
5920  // Saves results in pCtx->res.
5921  void Defragment(
5922  class VmaBlockVectorDefragmentationContext* pCtx,
5923  VmaDefragmentationStats* pStats,
5924  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
5925  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
5926  VkCommandBuffer commandBuffer);
5927  void DefragmentationEnd(
5928  class VmaBlockVectorDefragmentationContext* pCtx,
5929  VmaDefragmentationStats* pStats);
5930 
5932  // To be used only while the m_Mutex is locked. Used during defragmentation.
5933 
5934  size_t GetBlockCount() const { return m_Blocks.size(); }
5935  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
5936  size_t CalcAllocationCount() const;
5937  bool IsBufferImageGranularityConflictPossible() const;
5938 
5939 private:
5940  friend class VmaDefragmentationAlgorithm_Generic;
5941 
5942  const VmaAllocator m_hAllocator;
5943  const VmaPool m_hParentPool;
5944  const uint32_t m_MemoryTypeIndex;
5945  const VkDeviceSize m_PreferredBlockSize;
5946  const size_t m_MinBlockCount;
5947  const size_t m_MaxBlockCount;
5948  const VkDeviceSize m_BufferImageGranularity;
5949  const uint32_t m_FrameInUseCount;
5950  const bool m_IsCustomPool;
5951  const bool m_ExplicitBlockSize;
5952  const uint32_t m_Algorithm;
5953  /* There can be at most one allocation that is completely empty - a
5954  hysteresis to avoid pessimistic case of alternating creation and destruction
5955  of a VkDeviceMemory. */
5956  bool m_HasEmptyBlock;
5957  VMA_RW_MUTEX m_Mutex;
5958  // Incrementally sorted by sumFreeSize, ascending.
5959  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5960  uint32_t m_NextBlockId;
5961 
5962  VkDeviceSize CalcMaxBlockSize() const;
5963 
5964  // Finds and removes given block from vector.
5965  void Remove(VmaDeviceMemoryBlock* pBlock);
5966 
5967  // Performs single step in sorting m_Blocks. They may not be fully sorted
5968  // after this call.
5969  void IncrementallySortBlocks();
5970 
5971  VkResult AllocatePage(
5972  uint32_t currentFrameIndex,
5973  VkDeviceSize size,
5974  VkDeviceSize alignment,
5975  const VmaAllocationCreateInfo& createInfo,
5976  VmaSuballocationType suballocType,
5977  VmaAllocation* pAllocation);
5978 
5979  // To be used only without CAN_MAKE_OTHER_LOST flag.
5980  VkResult AllocateFromBlock(
5981  VmaDeviceMemoryBlock* pBlock,
5982  uint32_t currentFrameIndex,
5983  VkDeviceSize size,
5984  VkDeviceSize alignment,
5985  VmaAllocationCreateFlags allocFlags,
5986  void* pUserData,
5987  VmaSuballocationType suballocType,
5988  uint32_t strategy,
5989  VmaAllocation* pAllocation);
5990 
5991  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5992 
5993  // Saves result to pCtx->res.
5994  void ApplyDefragmentationMovesCpu(
5995  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5996  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
5997  // Saves result to pCtx->res.
5998  void ApplyDefragmentationMovesGpu(
5999  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6000  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6001  VkCommandBuffer commandBuffer);
6002 
6003  /*
6004  Used during defragmentation. pDefragmentationStats is optional. It's in/out
6005  - updated with new data.
6006  */
6007  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
6008 };
6009 
6010 struct VmaPool_T
6011 {
6012  VMA_CLASS_NO_COPY(VmaPool_T)
6013 public:
6014  VmaBlockVector m_BlockVector;
6015 
6016  VmaPool_T(
6017  VmaAllocator hAllocator,
6018  const VmaPoolCreateInfo& createInfo,
6019  VkDeviceSize preferredBlockSize);
6020  ~VmaPool_T();
6021 
6022  uint32_t GetId() const { return m_Id; }
6023  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
6024 
6025 #if VMA_STATS_STRING_ENABLED
6026  //void PrintDetailedMap(class VmaStringBuilder& sb);
6027 #endif
6028 
6029 private:
6030  uint32_t m_Id;
6031 };
6032 
6033 /*
6034 Performs defragmentation:
6035 
6036 - Updates `pBlockVector->m_pMetadata`.
6037 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
6038 - Does not move actual data, only returns requested moves as `moves`.
6039 */
6040 class VmaDefragmentationAlgorithm
6041 {
6042  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
6043 public:
6044  VmaDefragmentationAlgorithm(
6045  VmaAllocator hAllocator,
6046  VmaBlockVector* pBlockVector,
6047  uint32_t currentFrameIndex) :
6048  m_hAllocator(hAllocator),
6049  m_pBlockVector(pBlockVector),
6050  m_CurrentFrameIndex(currentFrameIndex)
6051  {
6052  }
6053  virtual ~VmaDefragmentationAlgorithm()
6054  {
6055  }
6056 
6057  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
6058  virtual void AddAll() = 0;
6059 
6060  virtual VkResult Defragment(
6061  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6062  VkDeviceSize maxBytesToMove,
6063  uint32_t maxAllocationsToMove) = 0;
6064 
6065  virtual VkDeviceSize GetBytesMoved() const = 0;
6066  virtual uint32_t GetAllocationsMoved() const = 0;
6067 
6068 protected:
6069  VmaAllocator const m_hAllocator;
6070  VmaBlockVector* const m_pBlockVector;
6071  const uint32_t m_CurrentFrameIndex;
6072 
6073  struct AllocationInfo
6074  {
6075  VmaAllocation m_hAllocation;
6076  VkBool32* m_pChanged;
6077 
6078  AllocationInfo() :
6079  m_hAllocation(VK_NULL_HANDLE),
6080  m_pChanged(VMA_NULL)
6081  {
6082  }
6083  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6084  m_hAllocation(hAlloc),
6085  m_pChanged(pChanged)
6086  {
6087  }
6088  };
6089 };
6090 
6091 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6092 {
6093  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6094 public:
6095  VmaDefragmentationAlgorithm_Generic(
6096  VmaAllocator hAllocator,
6097  VmaBlockVector* pBlockVector,
6098  uint32_t currentFrameIndex,
6099  bool overlappingMoveSupported);
6100  virtual ~VmaDefragmentationAlgorithm_Generic();
6101 
6102  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6103  virtual void AddAll() { m_AllAllocations = true; }
6104 
6105  virtual VkResult Defragment(
6106  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6107  VkDeviceSize maxBytesToMove,
6108  uint32_t maxAllocationsToMove);
6109 
6110  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6111  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6112 
6113 private:
6114  uint32_t m_AllocationCount;
6115  bool m_AllAllocations;
6116 
6117  VkDeviceSize m_BytesMoved;
6118  uint32_t m_AllocationsMoved;
6119 
6120  struct AllocationInfoSizeGreater
6121  {
6122  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6123  {
6124  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6125  }
6126  };
6127 
6128  struct AllocationInfoOffsetGreater
6129  {
6130  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6131  {
6132  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6133  }
6134  };
6135 
6136  struct BlockInfo
6137  {
6138  size_t m_OriginalBlockIndex;
6139  VmaDeviceMemoryBlock* m_pBlock;
6140  bool m_HasNonMovableAllocations;
6141  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6142 
6143  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6144  m_OriginalBlockIndex(SIZE_MAX),
6145  m_pBlock(VMA_NULL),
6146  m_HasNonMovableAllocations(true),
6147  m_Allocations(pAllocationCallbacks)
6148  {
6149  }
6150 
6151  void CalcHasNonMovableAllocations()
6152  {
6153  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6154  const size_t defragmentAllocCount = m_Allocations.size();
6155  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6156  }
6157 
6158  void SortAllocationsBySizeDescending()
6159  {
6160  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6161  }
6162 
6163  void SortAllocationsByOffsetDescending()
6164  {
6165  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6166  }
6167  };
6168 
6169  struct BlockPointerLess
6170  {
6171  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6172  {
6173  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6174  }
6175  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6176  {
6177  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6178  }
6179  };
6180 
6181  // 1. Blocks with some non-movable allocations go first.
6182  // 2. Blocks with smaller sumFreeSize go first.
6183  struct BlockInfoCompareMoveDestination
6184  {
6185  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6186  {
6187  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6188  {
6189  return true;
6190  }
6191  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6192  {
6193  return false;
6194  }
6195  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6196  {
6197  return true;
6198  }
6199  return false;
6200  }
6201  };
6202 
6203  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6204  BlockInfoVector m_Blocks;
6205 
6206  VkResult DefragmentRound(
6207  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6208  VkDeviceSize maxBytesToMove,
6209  uint32_t maxAllocationsToMove);
6210 
6211  size_t CalcBlocksWithNonMovableCount() const;
6212 
6213  static bool MoveMakesSense(
6214  size_t dstBlockIndex, VkDeviceSize dstOffset,
6215  size_t srcBlockIndex, VkDeviceSize srcOffset);
6216 };
6217 
6218 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6219 {
6220  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6221 public:
6222  VmaDefragmentationAlgorithm_Fast(
6223  VmaAllocator hAllocator,
6224  VmaBlockVector* pBlockVector,
6225  uint32_t currentFrameIndex,
6226  bool overlappingMoveSupported);
6227  virtual ~VmaDefragmentationAlgorithm_Fast();
6228 
6229  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6230  virtual void AddAll() { m_AllAllocations = true; }
6231 
6232  virtual VkResult Defragment(
6233  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6234  VkDeviceSize maxBytesToMove,
6235  uint32_t maxAllocationsToMove);
6236 
6237  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6238  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6239 
6240 private:
6241  struct BlockInfo
6242  {
6243  size_t origBlockIndex;
6244  };
6245 
6246  class FreeSpaceDatabase
6247  {
6248  public:
6249  FreeSpaceDatabase()
6250  {
6251  FreeSpace s = {};
6252  s.blockInfoIndex = SIZE_MAX;
6253  for(size_t i = 0; i < MAX_COUNT; ++i)
6254  {
6255  m_FreeSpaces[i] = s;
6256  }
6257  }
6258 
6259  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6260  {
6261  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6262  {
6263  return;
6264  }
6265 
6266  // Find first invalid or the smallest structure.
6267  size_t bestIndex = SIZE_MAX;
6268  for(size_t i = 0; i < MAX_COUNT; ++i)
6269  {
6270  // Empty structure.
6271  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6272  {
6273  bestIndex = i;
6274  break;
6275  }
6276  if(m_FreeSpaces[i].size < size &&
6277  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6278  {
6279  bestIndex = i;
6280  }
6281  }
6282 
6283  if(bestIndex != SIZE_MAX)
6284  {
6285  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6286  m_FreeSpaces[bestIndex].offset = offset;
6287  m_FreeSpaces[bestIndex].size = size;
6288  }
6289  }
6290 
6291  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6292  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6293  {
6294  size_t bestIndex = SIZE_MAX;
6295  VkDeviceSize bestFreeSpaceAfter = 0;
6296  for(size_t i = 0; i < MAX_COUNT; ++i)
6297  {
6298  // Structure is valid.
6299  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6300  {
6301  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6302  // Allocation fits into this structure.
6303  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6304  {
6305  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6306  (dstOffset + size);
6307  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6308  {
6309  bestIndex = i;
6310  bestFreeSpaceAfter = freeSpaceAfter;
6311  }
6312  }
6313  }
6314  }
6315 
6316  if(bestIndex != SIZE_MAX)
6317  {
6318  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6319  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6320 
6321  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6322  {
6323  // Leave this structure for remaining empty space.
6324  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6325  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6326  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6327  }
6328  else
6329  {
6330  // This structure becomes invalid.
6331  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6332  }
6333 
6334  return true;
6335  }
6336 
6337  return false;
6338  }
6339 
6340  private:
6341  static const size_t MAX_COUNT = 4;
6342 
6343  struct FreeSpace
6344  {
6345  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6346  VkDeviceSize offset;
6347  VkDeviceSize size;
6348  } m_FreeSpaces[MAX_COUNT];
6349  };
6350 
6351  const bool m_OverlappingMoveSupported;
6352 
6353  uint32_t m_AllocationCount;
6354  bool m_AllAllocations;
6355 
6356  VkDeviceSize m_BytesMoved;
6357  uint32_t m_AllocationsMoved;
6358 
6359  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6360 
6361  void PreprocessMetadata();
6362  void PostprocessMetadata();
6363  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6364 };
6365 
6366 struct VmaBlockDefragmentationContext
6367 {
6368  enum BLOCK_FLAG
6369  {
6370  BLOCK_FLAG_USED = 0x00000001,
6371  };
6372  uint32_t flags;
6373  VkBuffer hBuffer;
6374 
6375  VmaBlockDefragmentationContext() :
6376  flags(0),
6377  hBuffer(VK_NULL_HANDLE)
6378  {
6379  }
6380 };
6381 
6382 class VmaBlockVectorDefragmentationContext
6383 {
6384  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6385 public:
6386  VkResult res;
6387  bool mutexLocked;
6388  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6389 
6390  VmaBlockVectorDefragmentationContext(
6391  VmaAllocator hAllocator,
6392  VmaPool hCustomPool, // Optional.
6393  VmaBlockVector* pBlockVector,
6394  uint32_t currFrameIndex,
6395  uint32_t flags);
6396  ~VmaBlockVectorDefragmentationContext();
6397 
6398  VmaPool GetCustomPool() const { return m_hCustomPool; }
6399  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6400  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6401 
6402  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6403  void AddAll() { m_AllAllocations = true; }
6404 
6405  void Begin(bool overlappingMoveSupported);
6406 
6407 private:
6408  const VmaAllocator m_hAllocator;
6409  // Null if not from custom pool.
6410  const VmaPool m_hCustomPool;
6411  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6412  VmaBlockVector* const m_pBlockVector;
6413  const uint32_t m_CurrFrameIndex;
6414  const uint32_t m_AlgorithmFlags;
6415  // Owner of this object.
6416  VmaDefragmentationAlgorithm* m_pAlgorithm;
6417 
6418  struct AllocInfo
6419  {
6420  VmaAllocation hAlloc;
6421  VkBool32* pChanged;
6422  };
6423  // Used between constructor and Begin.
6424  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6425  bool m_AllAllocations;
6426 };
6427 
6428 struct VmaDefragmentationContext_T
6429 {
6430 private:
6431  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6432 public:
6433  VmaDefragmentationContext_T(
6434  VmaAllocator hAllocator,
6435  uint32_t currFrameIndex,
6436  uint32_t flags,
6437  VmaDefragmentationStats* pStats);
6438  ~VmaDefragmentationContext_T();
6439 
6440  void AddPools(uint32_t poolCount, VmaPool* pPools);
6441  void AddAllocations(
6442  uint32_t allocationCount,
6443  VmaAllocation* pAllocations,
6444  VkBool32* pAllocationsChanged);
6445 
6446  /*
6447  Returns:
6448  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6449  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6450  - Negative value if error occured and object can be destroyed immediately.
6451  */
6452  VkResult Defragment(
6453  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6454  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6455  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6456 
6457 private:
6458  const VmaAllocator m_hAllocator;
6459  const uint32_t m_CurrFrameIndex;
6460  const uint32_t m_Flags;
6461  VmaDefragmentationStats* const m_pStats;
6462  // Owner of these objects.
6463  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6464  // Owner of these objects.
6465  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6466 };
6467 
6468 #if VMA_RECORDING_ENABLED
6469 
6470 class VmaRecorder
6471 {
6472 public:
6473  VmaRecorder();
6474  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6475  void WriteConfiguration(
6476  const VkPhysicalDeviceProperties& devProps,
6477  const VkPhysicalDeviceMemoryProperties& memProps,
6478  bool dedicatedAllocationExtensionEnabled);
6479  ~VmaRecorder();
6480 
6481  void RecordCreateAllocator(uint32_t frameIndex);
6482  void RecordDestroyAllocator(uint32_t frameIndex);
6483  void RecordCreatePool(uint32_t frameIndex,
6484  const VmaPoolCreateInfo& createInfo,
6485  VmaPool pool);
6486  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6487  void RecordAllocateMemory(uint32_t frameIndex,
6488  const VkMemoryRequirements& vkMemReq,
6489  const VmaAllocationCreateInfo& createInfo,
6490  VmaAllocation allocation);
6491  void RecordAllocateMemoryPages(uint32_t frameIndex,
6492  const VkMemoryRequirements& vkMemReq,
6493  const VmaAllocationCreateInfo& createInfo,
6494  uint64_t allocationCount,
6495  const VmaAllocation* pAllocations);
6496  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6497  const VkMemoryRequirements& vkMemReq,
6498  bool requiresDedicatedAllocation,
6499  bool prefersDedicatedAllocation,
6500  const VmaAllocationCreateInfo& createInfo,
6501  VmaAllocation allocation);
6502  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6503  const VkMemoryRequirements& vkMemReq,
6504  bool requiresDedicatedAllocation,
6505  bool prefersDedicatedAllocation,
6506  const VmaAllocationCreateInfo& createInfo,
6507  VmaAllocation allocation);
6508  void RecordFreeMemory(uint32_t frameIndex,
6509  VmaAllocation allocation);
6510  void RecordFreeMemoryPages(uint32_t frameIndex,
6511  uint64_t allocationCount,
6512  const VmaAllocation* pAllocations);
6513  void RecordResizeAllocation(
6514  uint32_t frameIndex,
6515  VmaAllocation allocation,
6516  VkDeviceSize newSize);
6517  void RecordSetAllocationUserData(uint32_t frameIndex,
6518  VmaAllocation allocation,
6519  const void* pUserData);
6520  void RecordCreateLostAllocation(uint32_t frameIndex,
6521  VmaAllocation allocation);
6522  void RecordMapMemory(uint32_t frameIndex,
6523  VmaAllocation allocation);
6524  void RecordUnmapMemory(uint32_t frameIndex,
6525  VmaAllocation allocation);
6526  void RecordFlushAllocation(uint32_t frameIndex,
6527  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6528  void RecordInvalidateAllocation(uint32_t frameIndex,
6529  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6530  void RecordCreateBuffer(uint32_t frameIndex,
6531  const VkBufferCreateInfo& bufCreateInfo,
6532  const VmaAllocationCreateInfo& allocCreateInfo,
6533  VmaAllocation allocation);
6534  void RecordCreateImage(uint32_t frameIndex,
6535  const VkImageCreateInfo& imageCreateInfo,
6536  const VmaAllocationCreateInfo& allocCreateInfo,
6537  VmaAllocation allocation);
6538  void RecordDestroyBuffer(uint32_t frameIndex,
6539  VmaAllocation allocation);
6540  void RecordDestroyImage(uint32_t frameIndex,
6541  VmaAllocation allocation);
6542  void RecordTouchAllocation(uint32_t frameIndex,
6543  VmaAllocation allocation);
6544  void RecordGetAllocationInfo(uint32_t frameIndex,
6545  VmaAllocation allocation);
6546  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6547  VmaPool pool);
6548  void RecordDefragmentationBegin(uint32_t frameIndex,
6549  const VmaDefragmentationInfo2& info,
6551  void RecordDefragmentationEnd(uint32_t frameIndex,
6553 
6554 private:
6555  struct CallParams
6556  {
6557  uint32_t threadId;
6558  double time;
6559  };
6560 
6561  class UserDataString
6562  {
6563  public:
6564  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6565  const char* GetString() const { return m_Str; }
6566 
6567  private:
6568  char m_PtrStr[17];
6569  const char* m_Str;
6570  };
6571 
6572  bool m_UseMutex;
6573  VmaRecordFlags m_Flags;
6574  FILE* m_File;
6575  VMA_MUTEX m_FileMutex;
6576  int64_t m_Freq;
6577  int64_t m_StartCounter;
6578 
6579  void GetBasicParams(CallParams& outParams);
6580 
6581  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6582  template<typename T>
6583  void PrintPointerList(uint64_t count, const T* pItems)
6584  {
6585  if(count)
6586  {
6587  fprintf(m_File, "%p", pItems[0]);
6588  for(uint64_t i = 1; i < count; ++i)
6589  {
6590  fprintf(m_File, " %p", pItems[i]);
6591  }
6592  }
6593  }
6594 
6595  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6596  void Flush();
6597 };
6598 
6599 #endif // #if VMA_RECORDING_ENABLED
6600 
6601 /*
6602 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
6603 */
6604 class VmaAllocationObjectAllocator
6605 {
6606  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
6607 public:
6608  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
6609 
6610  VmaAllocation Allocate();
6611  void Free(VmaAllocation hAlloc);
6612 
6613 private:
6614  VMA_MUTEX m_Mutex;
6615  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
6616 };
6617 
6618 // Main allocator object.
6619 struct VmaAllocator_T
6620 {
6621  VMA_CLASS_NO_COPY(VmaAllocator_T)
6622 public:
6623  bool m_UseMutex;
6624  bool m_UseKhrDedicatedAllocation;
6625  VkDevice m_hDevice;
6626  bool m_AllocationCallbacksSpecified;
6627  VkAllocationCallbacks m_AllocationCallbacks;
6628  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6629  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
6630 
6631  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
6632  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6633  VMA_MUTEX m_HeapSizeLimitMutex;
6634 
6635  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6636  VkPhysicalDeviceMemoryProperties m_MemProps;
6637 
6638  // Default pools.
6639  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6640 
6641  // Each vector is sorted by memory (handle value).
6642  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6643  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6644  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6645 
6646  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6647  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6648  ~VmaAllocator_T();
6649 
6650  const VkAllocationCallbacks* GetAllocationCallbacks() const
6651  {
6652  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6653  }
6654  const VmaVulkanFunctions& GetVulkanFunctions() const
6655  {
6656  return m_VulkanFunctions;
6657  }
6658 
6659  VkDeviceSize GetBufferImageGranularity() const
6660  {
6661  return VMA_MAX(
6662  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6663  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6664  }
6665 
6666  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
6667  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6668 
6669  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6670  {
6671  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6672  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6673  }
6674  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
6675  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6676  {
6677  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6678  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6679  }
6680  // Minimum alignment for all allocations in specific memory type.
6681  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6682  {
6683  return IsMemoryTypeNonCoherent(memTypeIndex) ?
6684  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6685  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6686  }
6687 
6688  bool IsIntegratedGpu() const
6689  {
6690  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6691  }
6692 
6693 #if VMA_RECORDING_ENABLED
6694  VmaRecorder* GetRecorder() const { return m_pRecorder; }
6695 #endif
6696 
6697  void GetBufferMemoryRequirements(
6698  VkBuffer hBuffer,
6699  VkMemoryRequirements& memReq,
6700  bool& requiresDedicatedAllocation,
6701  bool& prefersDedicatedAllocation) const;
6702  void GetImageMemoryRequirements(
6703  VkImage hImage,
6704  VkMemoryRequirements& memReq,
6705  bool& requiresDedicatedAllocation,
6706  bool& prefersDedicatedAllocation) const;
6707 
6708  // Main allocation function.
6709  VkResult AllocateMemory(
6710  const VkMemoryRequirements& vkMemReq,
6711  bool requiresDedicatedAllocation,
6712  bool prefersDedicatedAllocation,
6713  VkBuffer dedicatedBuffer,
6714  VkImage dedicatedImage,
6715  const VmaAllocationCreateInfo& createInfo,
6716  VmaSuballocationType suballocType,
6717  size_t allocationCount,
6718  VmaAllocation* pAllocations);
6719 
6720  // Main deallocation function.
6721  void FreeMemory(
6722  size_t allocationCount,
6723  const VmaAllocation* pAllocations);
6724 
6725  VkResult ResizeAllocation(
6726  const VmaAllocation alloc,
6727  VkDeviceSize newSize);
6728 
6729  void CalculateStats(VmaStats* pStats);
6730 
6731 #if VMA_STATS_STRING_ENABLED
6732  void PrintDetailedMap(class VmaJsonWriter& json);
6733 #endif
6734 
6735  VkResult DefragmentationBegin(
6736  const VmaDefragmentationInfo2& info,
6737  VmaDefragmentationStats* pStats,
6738  VmaDefragmentationContext* pContext);
6739  VkResult DefragmentationEnd(
6740  VmaDefragmentationContext context);
6741 
6742  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
6743  bool TouchAllocation(VmaAllocation hAllocation);
6744 
6745  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
6746  void DestroyPool(VmaPool pool);
6747  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
6748 
6749  void SetCurrentFrameIndex(uint32_t frameIndex);
6750  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6751 
6752  void MakePoolAllocationsLost(
6753  VmaPool hPool,
6754  size_t* pLostAllocationCount);
6755  VkResult CheckPoolCorruption(VmaPool hPool);
6756  VkResult CheckCorruption(uint32_t memoryTypeBits);
6757 
6758  void CreateLostAllocation(VmaAllocation* pAllocation);
6759 
6760  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6761  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6762 
6763  VkResult Map(VmaAllocation hAllocation, void** ppData);
6764  void Unmap(VmaAllocation hAllocation);
6765 
6766  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
6767  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
6768 
6769  void FlushOrInvalidateAllocation(
6770  VmaAllocation hAllocation,
6771  VkDeviceSize offset, VkDeviceSize size,
6772  VMA_CACHE_OPERATION op);
6773 
6774  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
6775 
6776 private:
6777  VkDeviceSize m_PreferredLargeHeapBlockSize;
6778 
6779  VkPhysicalDevice m_PhysicalDevice;
6780  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6781 
6782  VMA_RW_MUTEX m_PoolsMutex;
6783  // Protected by m_PoolsMutex. Sorted by pointer value.
6784  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6785  uint32_t m_NextPoolId;
6786 
6787  VmaVulkanFunctions m_VulkanFunctions;
6788 
6789 #if VMA_RECORDING_ENABLED
6790  VmaRecorder* m_pRecorder;
6791 #endif
6792 
6793  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
6794 
6795  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6796 
6797  VkResult AllocateMemoryOfType(
6798  VkDeviceSize size,
6799  VkDeviceSize alignment,
6800  bool dedicatedAllocation,
6801  VkBuffer dedicatedBuffer,
6802  VkImage dedicatedImage,
6803  const VmaAllocationCreateInfo& createInfo,
6804  uint32_t memTypeIndex,
6805  VmaSuballocationType suballocType,
6806  size_t allocationCount,
6807  VmaAllocation* pAllocations);
6808 
6809  // Helper function only to be used inside AllocateDedicatedMemory.
6810  VkResult AllocateDedicatedMemoryPage(
6811  VkDeviceSize size,
6812  VmaSuballocationType suballocType,
6813  uint32_t memTypeIndex,
6814  const VkMemoryAllocateInfo& allocInfo,
6815  bool map,
6816  bool isUserDataString,
6817  void* pUserData,
6818  VmaAllocation* pAllocation);
6819 
6820  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
6821  VkResult AllocateDedicatedMemory(
6822  VkDeviceSize size,
6823  VmaSuballocationType suballocType,
6824  uint32_t memTypeIndex,
6825  bool map,
6826  bool isUserDataString,
6827  void* pUserData,
6828  VkBuffer dedicatedBuffer,
6829  VkImage dedicatedImage,
6830  size_t allocationCount,
6831  VmaAllocation* pAllocations);
6832 
6833  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
6834  void FreeDedicatedMemory(VmaAllocation allocation);
6835 };
6836 
6838 // Memory allocation #2 after VmaAllocator_T definition
6839 
6840 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
6841 {
6842  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6843 }
6844 
6845 static void VmaFree(VmaAllocator hAllocator, void* ptr)
6846 {
6847  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6848 }
6849 
6850 template<typename T>
6851 static T* VmaAllocate(VmaAllocator hAllocator)
6852 {
6853  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
6854 }
6855 
6856 template<typename T>
6857 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
6858 {
6859  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
6860 }
6861 
6862 template<typename T>
6863 static void vma_delete(VmaAllocator hAllocator, T* ptr)
6864 {
6865  if(ptr != VMA_NULL)
6866  {
6867  ptr->~T();
6868  VmaFree(hAllocator, ptr);
6869  }
6870 }
6871 
6872 template<typename T>
6873 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6874 {
6875  if(ptr != VMA_NULL)
6876  {
6877  for(size_t i = count; i--; )
6878  ptr[i].~T();
6879  VmaFree(hAllocator, ptr);
6880  }
6881 }
6882 
6884 // VmaStringBuilder
6885 
6886 #if VMA_STATS_STRING_ENABLED
6887 
6888 class VmaStringBuilder
6889 {
6890 public:
6891  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
6892  size_t GetLength() const { return m_Data.size(); }
6893  const char* GetData() const { return m_Data.data(); }
6894 
6895  void Add(char ch) { m_Data.push_back(ch); }
6896  void Add(const char* pStr);
6897  void AddNewLine() { Add('\n'); }
6898  void AddNumber(uint32_t num);
6899  void AddNumber(uint64_t num);
6900  void AddPointer(const void* ptr);
6901 
6902 private:
6903  VmaVector< char, VmaStlAllocator<char> > m_Data;
6904 };
6905 
6906 void VmaStringBuilder::Add(const char* pStr)
6907 {
6908  const size_t strLen = strlen(pStr);
6909  if(strLen > 0)
6910  {
6911  const size_t oldCount = m_Data.size();
6912  m_Data.resize(oldCount + strLen);
6913  memcpy(m_Data.data() + oldCount, pStr, strLen);
6914  }
6915 }
6916 
6917 void VmaStringBuilder::AddNumber(uint32_t num)
6918 {
6919  char buf[11];
6920  VmaUint32ToStr(buf, sizeof(buf), num);
6921  Add(buf);
6922 }
6923 
6924 void VmaStringBuilder::AddNumber(uint64_t num)
6925 {
6926  char buf[21];
6927  VmaUint64ToStr(buf, sizeof(buf), num);
6928  Add(buf);
6929 }
6930 
6931 void VmaStringBuilder::AddPointer(const void* ptr)
6932 {
6933  char buf[21];
6934  VmaPtrToStr(buf, sizeof(buf), ptr);
6935  Add(buf);
6936 }
6937 
6938 #endif // #if VMA_STATS_STRING_ENABLED
6939 
6941 // VmaJsonWriter
6942 
6943 #if VMA_STATS_STRING_ENABLED
6944 
6945 class VmaJsonWriter
6946 {
6947  VMA_CLASS_NO_COPY(VmaJsonWriter)
6948 public:
6949  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6950  ~VmaJsonWriter();
6951 
6952  void BeginObject(bool singleLine = false);
6953  void EndObject();
6954 
6955  void BeginArray(bool singleLine = false);
6956  void EndArray();
6957 
6958  void WriteString(const char* pStr);
6959  void BeginString(const char* pStr = VMA_NULL);
6960  void ContinueString(const char* pStr);
6961  void ContinueString(uint32_t n);
6962  void ContinueString(uint64_t n);
6963  void ContinueString_Pointer(const void* ptr);
6964  void EndString(const char* pStr = VMA_NULL);
6965 
6966  void WriteNumber(uint32_t n);
6967  void WriteNumber(uint64_t n);
6968  void WriteBool(bool b);
6969  void WriteNull();
6970 
6971 private:
6972  static const char* const INDENT;
6973 
6974  enum COLLECTION_TYPE
6975  {
6976  COLLECTION_TYPE_OBJECT,
6977  COLLECTION_TYPE_ARRAY,
6978  };
6979  struct StackItem
6980  {
6981  COLLECTION_TYPE type;
6982  uint32_t valueCount;
6983  bool singleLineMode;
6984  };
6985 
6986  VmaStringBuilder& m_SB;
6987  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6988  bool m_InsideString;
6989 
6990  void BeginValue(bool isString);
6991  void WriteIndent(bool oneLess = false);
6992 };
6993 
6994 const char* const VmaJsonWriter::INDENT = " ";
6995 
6996 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6997  m_SB(sb),
6998  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6999  m_InsideString(false)
7000 {
7001 }
7002 
7003 VmaJsonWriter::~VmaJsonWriter()
7004 {
7005  VMA_ASSERT(!m_InsideString);
7006  VMA_ASSERT(m_Stack.empty());
7007 }
7008 
7009 void VmaJsonWriter::BeginObject(bool singleLine)
7010 {
7011  VMA_ASSERT(!m_InsideString);
7012 
7013  BeginValue(false);
7014  m_SB.Add('{');
7015 
7016  StackItem item;
7017  item.type = COLLECTION_TYPE_OBJECT;
7018  item.valueCount = 0;
7019  item.singleLineMode = singleLine;
7020  m_Stack.push_back(item);
7021 }
7022 
7023 void VmaJsonWriter::EndObject()
7024 {
7025  VMA_ASSERT(!m_InsideString);
7026 
7027  WriteIndent(true);
7028  m_SB.Add('}');
7029 
7030  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
7031  m_Stack.pop_back();
7032 }
7033 
7034 void VmaJsonWriter::BeginArray(bool singleLine)
7035 {
7036  VMA_ASSERT(!m_InsideString);
7037 
7038  BeginValue(false);
7039  m_SB.Add('[');
7040 
7041  StackItem item;
7042  item.type = COLLECTION_TYPE_ARRAY;
7043  item.valueCount = 0;
7044  item.singleLineMode = singleLine;
7045  m_Stack.push_back(item);
7046 }
7047 
7048 void VmaJsonWriter::EndArray()
7049 {
7050  VMA_ASSERT(!m_InsideString);
7051 
7052  WriteIndent(true);
7053  m_SB.Add(']');
7054 
7055  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
7056  m_Stack.pop_back();
7057 }
7058 
7059 void VmaJsonWriter::WriteString(const char* pStr)
7060 {
7061  BeginString(pStr);
7062  EndString();
7063 }
7064 
7065 void VmaJsonWriter::BeginString(const char* pStr)
7066 {
7067  VMA_ASSERT(!m_InsideString);
7068 
7069  BeginValue(true);
7070  m_SB.Add('"');
7071  m_InsideString = true;
7072  if(pStr != VMA_NULL && pStr[0] != '\0')
7073  {
7074  ContinueString(pStr);
7075  }
7076 }
7077 
7078 void VmaJsonWriter::ContinueString(const char* pStr)
7079 {
7080  VMA_ASSERT(m_InsideString);
7081 
7082  const size_t strLen = strlen(pStr);
7083  for(size_t i = 0; i < strLen; ++i)
7084  {
7085  char ch = pStr[i];
7086  if(ch == '\\')
7087  {
7088  m_SB.Add("\\\\");
7089  }
7090  else if(ch == '"')
7091  {
7092  m_SB.Add("\\\"");
7093  }
7094  else if(ch >= 32)
7095  {
7096  m_SB.Add(ch);
7097  }
7098  else switch(ch)
7099  {
7100  case '\b':
7101  m_SB.Add("\\b");
7102  break;
7103  case '\f':
7104  m_SB.Add("\\f");
7105  break;
7106  case '\n':
7107  m_SB.Add("\\n");
7108  break;
7109  case '\r':
7110  m_SB.Add("\\r");
7111  break;
7112  case '\t':
7113  m_SB.Add("\\t");
7114  break;
7115  default:
7116  VMA_ASSERT(0 && "Character not currently supported.");
7117  break;
7118  }
7119  }
7120 }
7121 
7122 void VmaJsonWriter::ContinueString(uint32_t n)
7123 {
7124  VMA_ASSERT(m_InsideString);
7125  m_SB.AddNumber(n);
7126 }
7127 
7128 void VmaJsonWriter::ContinueString(uint64_t n)
7129 {
7130  VMA_ASSERT(m_InsideString);
7131  m_SB.AddNumber(n);
7132 }
7133 
7134 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7135 {
7136  VMA_ASSERT(m_InsideString);
7137  m_SB.AddPointer(ptr);
7138 }
7139 
7140 void VmaJsonWriter::EndString(const char* pStr)
7141 {
7142  VMA_ASSERT(m_InsideString);
7143  if(pStr != VMA_NULL && pStr[0] != '\0')
7144  {
7145  ContinueString(pStr);
7146  }
7147  m_SB.Add('"');
7148  m_InsideString = false;
7149 }
7150 
7151 void VmaJsonWriter::WriteNumber(uint32_t n)
7152 {
7153  VMA_ASSERT(!m_InsideString);
7154  BeginValue(false);
7155  m_SB.AddNumber(n);
7156 }
7157 
7158 void VmaJsonWriter::WriteNumber(uint64_t n)
7159 {
7160  VMA_ASSERT(!m_InsideString);
7161  BeginValue(false);
7162  m_SB.AddNumber(n);
7163 }
7164 
7165 void VmaJsonWriter::WriteBool(bool b)
7166 {
7167  VMA_ASSERT(!m_InsideString);
7168  BeginValue(false);
7169  m_SB.Add(b ? "true" : "false");
7170 }
7171 
7172 void VmaJsonWriter::WriteNull()
7173 {
7174  VMA_ASSERT(!m_InsideString);
7175  BeginValue(false);
7176  m_SB.Add("null");
7177 }
7178 
7179 void VmaJsonWriter::BeginValue(bool isString)
7180 {
7181  if(!m_Stack.empty())
7182  {
7183  StackItem& currItem = m_Stack.back();
7184  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7185  currItem.valueCount % 2 == 0)
7186  {
7187  VMA_ASSERT(isString);
7188  }
7189 
7190  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7191  currItem.valueCount % 2 != 0)
7192  {
7193  m_SB.Add(": ");
7194  }
7195  else if(currItem.valueCount > 0)
7196  {
7197  m_SB.Add(", ");
7198  WriteIndent();
7199  }
7200  else
7201  {
7202  WriteIndent();
7203  }
7204  ++currItem.valueCount;
7205  }
7206 }
7207 
7208 void VmaJsonWriter::WriteIndent(bool oneLess)
7209 {
7210  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7211  {
7212  m_SB.AddNewLine();
7213 
7214  size_t count = m_Stack.size();
7215  if(count > 0 && oneLess)
7216  {
7217  --count;
7218  }
7219  for(size_t i = 0; i < count; ++i)
7220  {
7221  m_SB.Add(INDENT);
7222  }
7223  }
7224 }
7225 
7226 #endif // #if VMA_STATS_STRING_ENABLED
7227 
7229 
7230 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7231 {
7232  if(IsUserDataString())
7233  {
7234  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7235 
7236  FreeUserDataString(hAllocator);
7237 
7238  if(pUserData != VMA_NULL)
7239  {
7240  const char* const newStrSrc = (char*)pUserData;
7241  const size_t newStrLen = strlen(newStrSrc);
7242  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
7243  memcpy(newStrDst, newStrSrc, newStrLen + 1);
7244  m_pUserData = newStrDst;
7245  }
7246  }
7247  else
7248  {
7249  m_pUserData = pUserData;
7250  }
7251 }
7252 
7253 void VmaAllocation_T::ChangeBlockAllocation(
7254  VmaAllocator hAllocator,
7255  VmaDeviceMemoryBlock* block,
7256  VkDeviceSize offset)
7257 {
7258  VMA_ASSERT(block != VMA_NULL);
7259  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7260 
7261  // Move mapping reference counter from old block to new block.
7262  if(block != m_BlockAllocation.m_Block)
7263  {
7264  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7265  if(IsPersistentMap())
7266  ++mapRefCount;
7267  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7268  block->Map(hAllocator, mapRefCount, VMA_NULL);
7269  }
7270 
7271  m_BlockAllocation.m_Block = block;
7272  m_BlockAllocation.m_Offset = offset;
7273 }
7274 
7275 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
7276 {
7277  VMA_ASSERT(newSize > 0);
7278  m_Size = newSize;
7279 }
7280 
7281 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7282 {
7283  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7284  m_BlockAllocation.m_Offset = newOffset;
7285 }
7286 
7287 VkDeviceSize VmaAllocation_T::GetOffset() const
7288 {
7289  switch(m_Type)
7290  {
7291  case ALLOCATION_TYPE_BLOCK:
7292  return m_BlockAllocation.m_Offset;
7293  case ALLOCATION_TYPE_DEDICATED:
7294  return 0;
7295  default:
7296  VMA_ASSERT(0);
7297  return 0;
7298  }
7299 }
7300 
7301 VkDeviceMemory VmaAllocation_T::GetMemory() const
7302 {
7303  switch(m_Type)
7304  {
7305  case ALLOCATION_TYPE_BLOCK:
7306  return m_BlockAllocation.m_Block->GetDeviceMemory();
7307  case ALLOCATION_TYPE_DEDICATED:
7308  return m_DedicatedAllocation.m_hMemory;
7309  default:
7310  VMA_ASSERT(0);
7311  return VK_NULL_HANDLE;
7312  }
7313 }
7314 
7315 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7316 {
7317  switch(m_Type)
7318  {
7319  case ALLOCATION_TYPE_BLOCK:
7320  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7321  case ALLOCATION_TYPE_DEDICATED:
7322  return m_DedicatedAllocation.m_MemoryTypeIndex;
7323  default:
7324  VMA_ASSERT(0);
7325  return UINT32_MAX;
7326  }
7327 }
7328 
7329 void* VmaAllocation_T::GetMappedData() const
7330 {
7331  switch(m_Type)
7332  {
7333  case ALLOCATION_TYPE_BLOCK:
7334  if(m_MapCount != 0)
7335  {
7336  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7337  VMA_ASSERT(pBlockData != VMA_NULL);
7338  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7339  }
7340  else
7341  {
7342  return VMA_NULL;
7343  }
7344  break;
7345  case ALLOCATION_TYPE_DEDICATED:
7346  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7347  return m_DedicatedAllocation.m_pMappedData;
7348  default:
7349  VMA_ASSERT(0);
7350  return VMA_NULL;
7351  }
7352 }
7353 
7354 bool VmaAllocation_T::CanBecomeLost() const
7355 {
7356  switch(m_Type)
7357  {
7358  case ALLOCATION_TYPE_BLOCK:
7359  return m_BlockAllocation.m_CanBecomeLost;
7360  case ALLOCATION_TYPE_DEDICATED:
7361  return false;
7362  default:
7363  VMA_ASSERT(0);
7364  return false;
7365  }
7366 }
7367 
7368 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7369 {
7370  VMA_ASSERT(CanBecomeLost());
7371 
7372  /*
7373  Warning: This is a carefully designed algorithm.
7374  Do not modify unless you really know what you're doing :)
7375  */
7376  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7377  for(;;)
7378  {
7379  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7380  {
7381  VMA_ASSERT(0);
7382  return false;
7383  }
7384  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7385  {
7386  return false;
7387  }
7388  else // Last use time earlier than current time.
7389  {
7390  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7391  {
7392  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7393  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7394  return true;
7395  }
7396  }
7397  }
7398 }
7399 
7400 #if VMA_STATS_STRING_ENABLED
7401 
7402 // Correspond to values of enum VmaSuballocationType.
7403 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7404  "FREE",
7405  "UNKNOWN",
7406  "BUFFER",
7407  "IMAGE_UNKNOWN",
7408  "IMAGE_LINEAR",
7409  "IMAGE_OPTIMAL",
7410 };
7411 
7412 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7413 {
7414  json.WriteString("Type");
7415  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7416 
7417  json.WriteString("Size");
7418  json.WriteNumber(m_Size);
7419 
7420  if(m_pUserData != VMA_NULL)
7421  {
7422  json.WriteString("UserData");
7423  if(IsUserDataString())
7424  {
7425  json.WriteString((const char*)m_pUserData);
7426  }
7427  else
7428  {
7429  json.BeginString();
7430  json.ContinueString_Pointer(m_pUserData);
7431  json.EndString();
7432  }
7433  }
7434 
7435  json.WriteString("CreationFrameIndex");
7436  json.WriteNumber(m_CreationFrameIndex);
7437 
7438  json.WriteString("LastUseFrameIndex");
7439  json.WriteNumber(GetLastUseFrameIndex());
7440 
7441  if(m_BufferImageUsage != 0)
7442  {
7443  json.WriteString("Usage");
7444  json.WriteNumber(m_BufferImageUsage);
7445  }
7446 }
7447 
7448 #endif
7449 
7450 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7451 {
7452  VMA_ASSERT(IsUserDataString());
7453  if(m_pUserData != VMA_NULL)
7454  {
7455  char* const oldStr = (char*)m_pUserData;
7456  const size_t oldStrLen = strlen(oldStr);
7457  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7458  m_pUserData = VMA_NULL;
7459  }
7460 }
7461 
7462 void VmaAllocation_T::BlockAllocMap()
7463 {
7464  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7465 
7466  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7467  {
7468  ++m_MapCount;
7469  }
7470  else
7471  {
7472  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7473  }
7474 }
7475 
7476 void VmaAllocation_T::BlockAllocUnmap()
7477 {
7478  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7479 
7480  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7481  {
7482  --m_MapCount;
7483  }
7484  else
7485  {
7486  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7487  }
7488 }
7489 
7490 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7491 {
7492  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7493 
7494  if(m_MapCount != 0)
7495  {
7496  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7497  {
7498  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7499  *ppData = m_DedicatedAllocation.m_pMappedData;
7500  ++m_MapCount;
7501  return VK_SUCCESS;
7502  }
7503  else
7504  {
7505  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7506  return VK_ERROR_MEMORY_MAP_FAILED;
7507  }
7508  }
7509  else
7510  {
7511  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7512  hAllocator->m_hDevice,
7513  m_DedicatedAllocation.m_hMemory,
7514  0, // offset
7515  VK_WHOLE_SIZE,
7516  0, // flags
7517  ppData);
7518  if(result == VK_SUCCESS)
7519  {
7520  m_DedicatedAllocation.m_pMappedData = *ppData;
7521  m_MapCount = 1;
7522  }
7523  return result;
7524  }
7525 }
7526 
7527 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7528 {
7529  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7530 
7531  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7532  {
7533  --m_MapCount;
7534  if(m_MapCount == 0)
7535  {
7536  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7537  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7538  hAllocator->m_hDevice,
7539  m_DedicatedAllocation.m_hMemory);
7540  }
7541  }
7542  else
7543  {
7544  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7545  }
7546 }
7547 
7548 #if VMA_STATS_STRING_ENABLED
7549 
7550 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7551 {
7552  json.BeginObject();
7553 
7554  json.WriteString("Blocks");
7555  json.WriteNumber(stat.blockCount);
7556 
7557  json.WriteString("Allocations");
7558  json.WriteNumber(stat.allocationCount);
7559 
7560  json.WriteString("UnusedRanges");
7561  json.WriteNumber(stat.unusedRangeCount);
7562 
7563  json.WriteString("UsedBytes");
7564  json.WriteNumber(stat.usedBytes);
7565 
7566  json.WriteString("UnusedBytes");
7567  json.WriteNumber(stat.unusedBytes);
7568 
7569  if(stat.allocationCount > 1)
7570  {
7571  json.WriteString("AllocationSize");
7572  json.BeginObject(true);
7573  json.WriteString("Min");
7574  json.WriteNumber(stat.allocationSizeMin);
7575  json.WriteString("Avg");
7576  json.WriteNumber(stat.allocationSizeAvg);
7577  json.WriteString("Max");
7578  json.WriteNumber(stat.allocationSizeMax);
7579  json.EndObject();
7580  }
7581 
7582  if(stat.unusedRangeCount > 1)
7583  {
7584  json.WriteString("UnusedRangeSize");
7585  json.BeginObject(true);
7586  json.WriteString("Min");
7587  json.WriteNumber(stat.unusedRangeSizeMin);
7588  json.WriteString("Avg");
7589  json.WriteNumber(stat.unusedRangeSizeAvg);
7590  json.WriteString("Max");
7591  json.WriteNumber(stat.unusedRangeSizeMax);
7592  json.EndObject();
7593  }
7594 
7595  json.EndObject();
7596 }
7597 
7598 #endif // #if VMA_STATS_STRING_ENABLED
7599 
7600 struct VmaSuballocationItemSizeLess
7601 {
7602  bool operator()(
7603  const VmaSuballocationList::iterator lhs,
7604  const VmaSuballocationList::iterator rhs) const
7605  {
7606  return lhs->size < rhs->size;
7607  }
7608  bool operator()(
7609  const VmaSuballocationList::iterator lhs,
7610  VkDeviceSize rhsSize) const
7611  {
7612  return lhs->size < rhsSize;
7613  }
7614 };
7615 
7616 
7618 // class VmaBlockMetadata
7619 
7620 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7621  m_Size(0),
7622  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7623 {
7624 }
7625 
7626 #if VMA_STATS_STRING_ENABLED
7627 
7628 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7629  VkDeviceSize unusedBytes,
7630  size_t allocationCount,
7631  size_t unusedRangeCount) const
7632 {
7633  json.BeginObject();
7634 
7635  json.WriteString("TotalBytes");
7636  json.WriteNumber(GetSize());
7637 
7638  json.WriteString("UnusedBytes");
7639  json.WriteNumber(unusedBytes);
7640 
7641  json.WriteString("Allocations");
7642  json.WriteNumber((uint64_t)allocationCount);
7643 
7644  json.WriteString("UnusedRanges");
7645  json.WriteNumber((uint64_t)unusedRangeCount);
7646 
7647  json.WriteString("Suballocations");
7648  json.BeginArray();
7649 }
7650 
7651 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
7652  VkDeviceSize offset,
7653  VmaAllocation hAllocation) const
7654 {
7655  json.BeginObject(true);
7656 
7657  json.WriteString("Offset");
7658  json.WriteNumber(offset);
7659 
7660  hAllocation->PrintParameters(json);
7661 
7662  json.EndObject();
7663 }
7664 
7665 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
7666  VkDeviceSize offset,
7667  VkDeviceSize size) const
7668 {
7669  json.BeginObject(true);
7670 
7671  json.WriteString("Offset");
7672  json.WriteNumber(offset);
7673 
7674  json.WriteString("Type");
7675  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7676 
7677  json.WriteString("Size");
7678  json.WriteNumber(size);
7679 
7680  json.EndObject();
7681 }
7682 
7683 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
7684 {
7685  json.EndArray();
7686  json.EndObject();
7687 }
7688 
7689 #endif // #if VMA_STATS_STRING_ENABLED
7690 
7692 // class VmaBlockMetadata_Generic
7693 
7694 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
7695  VmaBlockMetadata(hAllocator),
7696  m_FreeCount(0),
7697  m_SumFreeSize(0),
7698  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7699  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7700 {
7701 }
7702 
7703 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7704 {
7705 }
7706 
7707 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7708 {
7709  VmaBlockMetadata::Init(size);
7710 
7711  m_FreeCount = 1;
7712  m_SumFreeSize = size;
7713 
7714  VmaSuballocation suballoc = {};
7715  suballoc.offset = 0;
7716  suballoc.size = size;
7717  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7718  suballoc.hAllocation = VK_NULL_HANDLE;
7719 
7720  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7721  m_Suballocations.push_back(suballoc);
7722  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7723  --suballocItem;
7724  m_FreeSuballocationsBySize.push_back(suballocItem);
7725 }
7726 
7727 bool VmaBlockMetadata_Generic::Validate() const
7728 {
7729  VMA_VALIDATE(!m_Suballocations.empty());
7730 
7731  // Expected offset of new suballocation as calculated from previous ones.
7732  VkDeviceSize calculatedOffset = 0;
7733  // Expected number of free suballocations as calculated from traversing their list.
7734  uint32_t calculatedFreeCount = 0;
7735  // Expected sum size of free suballocations as calculated from traversing their list.
7736  VkDeviceSize calculatedSumFreeSize = 0;
7737  // Expected number of free suballocations that should be registered in
7738  // m_FreeSuballocationsBySize calculated from traversing their list.
7739  size_t freeSuballocationsToRegister = 0;
7740  // True if previous visited suballocation was free.
7741  bool prevFree = false;
7742 
7743  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7744  suballocItem != m_Suballocations.cend();
7745  ++suballocItem)
7746  {
7747  const VmaSuballocation& subAlloc = *suballocItem;
7748 
7749  // Actual offset of this suballocation doesn't match expected one.
7750  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7751 
7752  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7753  // Two adjacent free suballocations are invalid. They should be merged.
7754  VMA_VALIDATE(!prevFree || !currFree);
7755 
7756  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7757 
7758  if(currFree)
7759  {
7760  calculatedSumFreeSize += subAlloc.size;
7761  ++calculatedFreeCount;
7762  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7763  {
7764  ++freeSuballocationsToRegister;
7765  }
7766 
7767  // Margin required between allocations - every free space must be at least that large.
7768  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7769  }
7770  else
7771  {
7772  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7773  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7774 
7775  // Margin required between allocations - previous allocation must be free.
7776  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7777  }
7778 
7779  calculatedOffset += subAlloc.size;
7780  prevFree = currFree;
7781  }
7782 
7783  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
7784  // match expected one.
7785  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7786 
7787  VkDeviceSize lastSize = 0;
7788  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7789  {
7790  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7791 
7792  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
7793  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7794  // They must be sorted by size ascending.
7795  VMA_VALIDATE(suballocItem->size >= lastSize);
7796 
7797  lastSize = suballocItem->size;
7798  }
7799 
7800  // Check if totals match calculacted values.
7801  VMA_VALIDATE(ValidateFreeSuballocationList());
7802  VMA_VALIDATE(calculatedOffset == GetSize());
7803  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7804  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7805 
7806  return true;
7807 }
7808 
7809 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
7810 {
7811  if(!m_FreeSuballocationsBySize.empty())
7812  {
7813  return m_FreeSuballocationsBySize.back()->size;
7814  }
7815  else
7816  {
7817  return 0;
7818  }
7819 }
7820 
7821 bool VmaBlockMetadata_Generic::IsEmpty() const
7822 {
7823  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7824 }
7825 
7826 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7827 {
7828  outInfo.blockCount = 1;
7829 
7830  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7831  outInfo.allocationCount = rangeCount - m_FreeCount;
7832  outInfo.unusedRangeCount = m_FreeCount;
7833 
7834  outInfo.unusedBytes = m_SumFreeSize;
7835  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
7836 
7837  outInfo.allocationSizeMin = UINT64_MAX;
7838  outInfo.allocationSizeMax = 0;
7839  outInfo.unusedRangeSizeMin = UINT64_MAX;
7840  outInfo.unusedRangeSizeMax = 0;
7841 
7842  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7843  suballocItem != m_Suballocations.cend();
7844  ++suballocItem)
7845  {
7846  const VmaSuballocation& suballoc = *suballocItem;
7847  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7848  {
7849  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7850  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
7851  }
7852  else
7853  {
7854  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
7855  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
7856  }
7857  }
7858 }
7859 
7860 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
7861 {
7862  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7863 
7864  inoutStats.size += GetSize();
7865  inoutStats.unusedSize += m_SumFreeSize;
7866  inoutStats.allocationCount += rangeCount - m_FreeCount;
7867  inoutStats.unusedRangeCount += m_FreeCount;
7868  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
7869 }
7870 
7871 #if VMA_STATS_STRING_ENABLED
7872 
7873 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7874 {
7875  PrintDetailedMap_Begin(json,
7876  m_SumFreeSize, // unusedBytes
7877  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7878  m_FreeCount); // unusedRangeCount
7879 
7880  size_t i = 0;
7881  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7882  suballocItem != m_Suballocations.cend();
7883  ++suballocItem, ++i)
7884  {
7885  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7886  {
7887  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
7888  }
7889  else
7890  {
7891  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
7892  }
7893  }
7894 
7895  PrintDetailedMap_End(json);
7896 }
7897 
7898 #endif // #if VMA_STATS_STRING_ENABLED
7899 
7900 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7901  uint32_t currentFrameIndex,
7902  uint32_t frameInUseCount,
7903  VkDeviceSize bufferImageGranularity,
7904  VkDeviceSize allocSize,
7905  VkDeviceSize allocAlignment,
7906  bool upperAddress,
7907  VmaSuballocationType allocType,
7908  bool canMakeOtherLost,
7909  uint32_t strategy,
7910  VmaAllocationRequest* pAllocationRequest)
7911 {
7912  VMA_ASSERT(allocSize > 0);
7913  VMA_ASSERT(!upperAddress);
7914  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7915  VMA_ASSERT(pAllocationRequest != VMA_NULL);
7916  VMA_HEAVY_ASSERT(Validate());
7917 
7918  pAllocationRequest->type = VmaAllocationRequestType::Normal;
7919 
7920  // There is not enough total free space in this block to fullfill the request: Early return.
7921  if(canMakeOtherLost == false &&
7922  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
7923  {
7924  return false;
7925  }
7926 
7927  // New algorithm, efficiently searching freeSuballocationsBySize.
7928  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7929  if(freeSuballocCount > 0)
7930  {
7932  {
7933  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7934  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7935  m_FreeSuballocationsBySize.data(),
7936  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7937  allocSize + 2 * VMA_DEBUG_MARGIN,
7938  VmaSuballocationItemSizeLess());
7939  size_t index = it - m_FreeSuballocationsBySize.data();
7940  for(; index < freeSuballocCount; ++index)
7941  {
7942  if(CheckAllocation(
7943  currentFrameIndex,
7944  frameInUseCount,
7945  bufferImageGranularity,
7946  allocSize,
7947  allocAlignment,
7948  allocType,
7949  m_FreeSuballocationsBySize[index],
7950  false, // canMakeOtherLost
7951  &pAllocationRequest->offset,
7952  &pAllocationRequest->itemsToMakeLostCount,
7953  &pAllocationRequest->sumFreeSize,
7954  &pAllocationRequest->sumItemSize))
7955  {
7956  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7957  return true;
7958  }
7959  }
7960  }
7961  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7962  {
7963  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7964  it != m_Suballocations.end();
7965  ++it)
7966  {
7967  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
7968  currentFrameIndex,
7969  frameInUseCount,
7970  bufferImageGranularity,
7971  allocSize,
7972  allocAlignment,
7973  allocType,
7974  it,
7975  false, // canMakeOtherLost
7976  &pAllocationRequest->offset,
7977  &pAllocationRequest->itemsToMakeLostCount,
7978  &pAllocationRequest->sumFreeSize,
7979  &pAllocationRequest->sumItemSize))
7980  {
7981  pAllocationRequest->item = it;
7982  return true;
7983  }
7984  }
7985  }
7986  else // WORST_FIT, FIRST_FIT
7987  {
7988  // Search staring from biggest suballocations.
7989  for(size_t index = freeSuballocCount; index--; )
7990  {
7991  if(CheckAllocation(
7992  currentFrameIndex,
7993  frameInUseCount,
7994  bufferImageGranularity,
7995  allocSize,
7996  allocAlignment,
7997  allocType,
7998  m_FreeSuballocationsBySize[index],
7999  false, // canMakeOtherLost
8000  &pAllocationRequest->offset,
8001  &pAllocationRequest->itemsToMakeLostCount,
8002  &pAllocationRequest->sumFreeSize,
8003  &pAllocationRequest->sumItemSize))
8004  {
8005  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8006  return true;
8007  }
8008  }
8009  }
8010  }
8011 
8012  if(canMakeOtherLost)
8013  {
8014  // Brute-force algorithm. TODO: Come up with something better.
8015 
8016  bool found = false;
8017  VmaAllocationRequest tmpAllocRequest = {};
8018  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
8019  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
8020  suballocIt != m_Suballocations.end();
8021  ++suballocIt)
8022  {
8023  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
8024  suballocIt->hAllocation->CanBecomeLost())
8025  {
8026  if(CheckAllocation(
8027  currentFrameIndex,
8028  frameInUseCount,
8029  bufferImageGranularity,
8030  allocSize,
8031  allocAlignment,
8032  allocType,
8033  suballocIt,
8034  canMakeOtherLost,
8035  &tmpAllocRequest.offset,
8036  &tmpAllocRequest.itemsToMakeLostCount,
8037  &tmpAllocRequest.sumFreeSize,
8038  &tmpAllocRequest.sumItemSize))
8039  {
8041  {
8042  *pAllocationRequest = tmpAllocRequest;
8043  pAllocationRequest->item = suballocIt;
8044  break;
8045  }
8046  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
8047  {
8048  *pAllocationRequest = tmpAllocRequest;
8049  pAllocationRequest->item = suballocIt;
8050  found = true;
8051  }
8052  }
8053  }
8054  }
8055 
8056  return found;
8057  }
8058 
8059  return false;
8060 }
8061 
8062 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
8063  uint32_t currentFrameIndex,
8064  uint32_t frameInUseCount,
8065  VmaAllocationRequest* pAllocationRequest)
8066 {
8067  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
8068 
8069  while(pAllocationRequest->itemsToMakeLostCount > 0)
8070  {
8071  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
8072  {
8073  ++pAllocationRequest->item;
8074  }
8075  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8076  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
8077  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
8078  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8079  {
8080  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
8081  --pAllocationRequest->itemsToMakeLostCount;
8082  }
8083  else
8084  {
8085  return false;
8086  }
8087  }
8088 
8089  VMA_HEAVY_ASSERT(Validate());
8090  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8091  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8092 
8093  return true;
8094 }
8095 
8096 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8097 {
8098  uint32_t lostAllocationCount = 0;
8099  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8100  it != m_Suballocations.end();
8101  ++it)
8102  {
8103  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8104  it->hAllocation->CanBecomeLost() &&
8105  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8106  {
8107  it = FreeSuballocation(it);
8108  ++lostAllocationCount;
8109  }
8110  }
8111  return lostAllocationCount;
8112 }
8113 
8114 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8115 {
8116  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8117  it != m_Suballocations.end();
8118  ++it)
8119  {
8120  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8121  {
8122  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8123  {
8124  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8125  return VK_ERROR_VALIDATION_FAILED_EXT;
8126  }
8127  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8128  {
8129  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8130  return VK_ERROR_VALIDATION_FAILED_EXT;
8131  }
8132  }
8133  }
8134 
8135  return VK_SUCCESS;
8136 }
8137 
8138 void VmaBlockMetadata_Generic::Alloc(
8139  const VmaAllocationRequest& request,
8140  VmaSuballocationType type,
8141  VkDeviceSize allocSize,
8142  VmaAllocation hAllocation)
8143 {
8144  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
8145  VMA_ASSERT(request.item != m_Suballocations.end());
8146  VmaSuballocation& suballoc = *request.item;
8147  // Given suballocation is a free block.
8148  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8149  // Given offset is inside this suballocation.
8150  VMA_ASSERT(request.offset >= suballoc.offset);
8151  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8152  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8153  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8154 
8155  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8156  // it to become used.
8157  UnregisterFreeSuballocation(request.item);
8158 
8159  suballoc.offset = request.offset;
8160  suballoc.size = allocSize;
8161  suballoc.type = type;
8162  suballoc.hAllocation = hAllocation;
8163 
8164  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8165  if(paddingEnd)
8166  {
8167  VmaSuballocation paddingSuballoc = {};
8168  paddingSuballoc.offset = request.offset + allocSize;
8169  paddingSuballoc.size = paddingEnd;
8170  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8171  VmaSuballocationList::iterator next = request.item;
8172  ++next;
8173  const VmaSuballocationList::iterator paddingEndItem =
8174  m_Suballocations.insert(next, paddingSuballoc);
8175  RegisterFreeSuballocation(paddingEndItem);
8176  }
8177 
8178  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8179  if(paddingBegin)
8180  {
8181  VmaSuballocation paddingSuballoc = {};
8182  paddingSuballoc.offset = request.offset - paddingBegin;
8183  paddingSuballoc.size = paddingBegin;
8184  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8185  const VmaSuballocationList::iterator paddingBeginItem =
8186  m_Suballocations.insert(request.item, paddingSuballoc);
8187  RegisterFreeSuballocation(paddingBeginItem);
8188  }
8189 
8190  // Update totals.
8191  m_FreeCount = m_FreeCount - 1;
8192  if(paddingBegin > 0)
8193  {
8194  ++m_FreeCount;
8195  }
8196  if(paddingEnd > 0)
8197  {
8198  ++m_FreeCount;
8199  }
8200  m_SumFreeSize -= allocSize;
8201 }
8202 
8203 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8204 {
8205  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8206  suballocItem != m_Suballocations.end();
8207  ++suballocItem)
8208  {
8209  VmaSuballocation& suballoc = *suballocItem;
8210  if(suballoc.hAllocation == allocation)
8211  {
8212  FreeSuballocation(suballocItem);
8213  VMA_HEAVY_ASSERT(Validate());
8214  return;
8215  }
8216  }
8217  VMA_ASSERT(0 && "Not found!");
8218 }
8219 
8220 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8221 {
8222  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8223  suballocItem != m_Suballocations.end();
8224  ++suballocItem)
8225  {
8226  VmaSuballocation& suballoc = *suballocItem;
8227  if(suballoc.offset == offset)
8228  {
8229  FreeSuballocation(suballocItem);
8230  return;
8231  }
8232  }
8233  VMA_ASSERT(0 && "Not found!");
8234 }
8235 
8236 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
8237 {
8238  typedef VmaSuballocationList::iterator iter_type;
8239  for(iter_type suballocItem = m_Suballocations.begin();
8240  suballocItem != m_Suballocations.end();
8241  ++suballocItem)
8242  {
8243  VmaSuballocation& suballoc = *suballocItem;
8244  if(suballoc.hAllocation == alloc)
8245  {
8246  iter_type nextItem = suballocItem;
8247  ++nextItem;
8248 
8249  // Should have been ensured on higher level.
8250  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
8251 
8252  // Shrinking.
8253  if(newSize < alloc->GetSize())
8254  {
8255  const VkDeviceSize sizeDiff = suballoc.size - newSize;
8256 
8257  // There is next item.
8258  if(nextItem != m_Suballocations.end())
8259  {
8260  // Next item is free.
8261  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8262  {
8263  // Grow this next item backward.
8264  UnregisterFreeSuballocation(nextItem);
8265  nextItem->offset -= sizeDiff;
8266  nextItem->size += sizeDiff;
8267  RegisterFreeSuballocation(nextItem);
8268  }
8269  // Next item is not free.
8270  else
8271  {
8272  // Create free item after current one.
8273  VmaSuballocation newFreeSuballoc;
8274  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8275  newFreeSuballoc.offset = suballoc.offset + newSize;
8276  newFreeSuballoc.size = sizeDiff;
8277  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8278  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
8279  RegisterFreeSuballocation(newFreeSuballocIt);
8280 
8281  ++m_FreeCount;
8282  }
8283  }
8284  // This is the last item.
8285  else
8286  {
8287  // Create free item at the end.
8288  VmaSuballocation newFreeSuballoc;
8289  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8290  newFreeSuballoc.offset = suballoc.offset + newSize;
8291  newFreeSuballoc.size = sizeDiff;
8292  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8293  m_Suballocations.push_back(newFreeSuballoc);
8294 
8295  iter_type newFreeSuballocIt = m_Suballocations.end();
8296  RegisterFreeSuballocation(--newFreeSuballocIt);
8297 
8298  ++m_FreeCount;
8299  }
8300 
8301  suballoc.size = newSize;
8302  m_SumFreeSize += sizeDiff;
8303  }
8304  // Growing.
8305  else
8306  {
8307  const VkDeviceSize sizeDiff = newSize - suballoc.size;
8308 
8309  // There is next item.
8310  if(nextItem != m_Suballocations.end())
8311  {
8312  // Next item is free.
8313  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8314  {
8315  // There is not enough free space, including margin.
8316  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
8317  {
8318  return false;
8319  }
8320 
8321  // There is more free space than required.
8322  if(nextItem->size > sizeDiff)
8323  {
8324  // Move and shrink this next item.
8325  UnregisterFreeSuballocation(nextItem);
8326  nextItem->offset += sizeDiff;
8327  nextItem->size -= sizeDiff;
8328  RegisterFreeSuballocation(nextItem);
8329  }
8330  // There is exactly the amount of free space required.
8331  else
8332  {
8333  // Remove this next free item.
8334  UnregisterFreeSuballocation(nextItem);
8335  m_Suballocations.erase(nextItem);
8336  --m_FreeCount;
8337  }
8338  }
8339  // Next item is not free - there is no space to grow.
8340  else
8341  {
8342  return false;
8343  }
8344  }
8345  // This is the last item - there is no space to grow.
8346  else
8347  {
8348  return false;
8349  }
8350 
8351  suballoc.size = newSize;
8352  m_SumFreeSize -= sizeDiff;
8353  }
8354 
8355  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
8356  return true;
8357  }
8358  }
8359  VMA_ASSERT(0 && "Not found!");
8360  return false;
8361 }
8362 
8363 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8364 {
8365  VkDeviceSize lastSize = 0;
8366  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8367  {
8368  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8369 
8370  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8371  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8372  VMA_VALIDATE(it->size >= lastSize);
8373  lastSize = it->size;
8374  }
8375  return true;
8376 }
8377 
8378 bool VmaBlockMetadata_Generic::CheckAllocation(
8379  uint32_t currentFrameIndex,
8380  uint32_t frameInUseCount,
8381  VkDeviceSize bufferImageGranularity,
8382  VkDeviceSize allocSize,
8383  VkDeviceSize allocAlignment,
8384  VmaSuballocationType allocType,
8385  VmaSuballocationList::const_iterator suballocItem,
8386  bool canMakeOtherLost,
8387  VkDeviceSize* pOffset,
8388  size_t* itemsToMakeLostCount,
8389  VkDeviceSize* pSumFreeSize,
8390  VkDeviceSize* pSumItemSize) const
8391 {
8392  VMA_ASSERT(allocSize > 0);
8393  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8394  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8395  VMA_ASSERT(pOffset != VMA_NULL);
8396 
8397  *itemsToMakeLostCount = 0;
8398  *pSumFreeSize = 0;
8399  *pSumItemSize = 0;
8400 
8401  if(canMakeOtherLost)
8402  {
8403  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8404  {
8405  *pSumFreeSize = suballocItem->size;
8406  }
8407  else
8408  {
8409  if(suballocItem->hAllocation->CanBecomeLost() &&
8410  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8411  {
8412  ++*itemsToMakeLostCount;
8413  *pSumItemSize = suballocItem->size;
8414  }
8415  else
8416  {
8417  return false;
8418  }
8419  }
8420 
8421  // Remaining size is too small for this request: Early return.
8422  if(GetSize() - suballocItem->offset < allocSize)
8423  {
8424  return false;
8425  }
8426 
8427  // Start from offset equal to beginning of this suballocation.
8428  *pOffset = suballocItem->offset;
8429 
8430  // Apply VMA_DEBUG_MARGIN at the beginning.
8431  if(VMA_DEBUG_MARGIN > 0)
8432  {
8433  *pOffset += VMA_DEBUG_MARGIN;
8434  }
8435 
8436  // Apply alignment.
8437  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8438 
8439  // Check previous suballocations for BufferImageGranularity conflicts.
8440  // Make bigger alignment if necessary.
8441  if(bufferImageGranularity > 1)
8442  {
8443  bool bufferImageGranularityConflict = false;
8444  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8445  while(prevSuballocItem != m_Suballocations.cbegin())
8446  {
8447  --prevSuballocItem;
8448  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8449  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8450  {
8451  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8452  {
8453  bufferImageGranularityConflict = true;
8454  break;
8455  }
8456  }
8457  else
8458  // Already on previous page.
8459  break;
8460  }
8461  if(bufferImageGranularityConflict)
8462  {
8463  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8464  }
8465  }
8466 
8467  // Now that we have final *pOffset, check if we are past suballocItem.
8468  // If yes, return false - this function should be called for another suballocItem as starting point.
8469  if(*pOffset >= suballocItem->offset + suballocItem->size)
8470  {
8471  return false;
8472  }
8473 
8474  // Calculate padding at the beginning based on current offset.
8475  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8476 
8477  // Calculate required margin at the end.
8478  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8479 
8480  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8481  // Another early return check.
8482  if(suballocItem->offset + totalSize > GetSize())
8483  {
8484  return false;
8485  }
8486 
8487  // Advance lastSuballocItem until desired size is reached.
8488  // Update itemsToMakeLostCount.
8489  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8490  if(totalSize > suballocItem->size)
8491  {
8492  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8493  while(remainingSize > 0)
8494  {
8495  ++lastSuballocItem;
8496  if(lastSuballocItem == m_Suballocations.cend())
8497  {
8498  return false;
8499  }
8500  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8501  {
8502  *pSumFreeSize += lastSuballocItem->size;
8503  }
8504  else
8505  {
8506  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8507  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8508  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8509  {
8510  ++*itemsToMakeLostCount;
8511  *pSumItemSize += lastSuballocItem->size;
8512  }
8513  else
8514  {
8515  return false;
8516  }
8517  }
8518  remainingSize = (lastSuballocItem->size < remainingSize) ?
8519  remainingSize - lastSuballocItem->size : 0;
8520  }
8521  }
8522 
8523  // Check next suballocations for BufferImageGranularity conflicts.
8524  // If conflict exists, we must mark more allocations lost or fail.
8525  if(bufferImageGranularity > 1)
8526  {
8527  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8528  ++nextSuballocItem;
8529  while(nextSuballocItem != m_Suballocations.cend())
8530  {
8531  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8532  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8533  {
8534  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8535  {
8536  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8537  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8538  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8539  {
8540  ++*itemsToMakeLostCount;
8541  }
8542  else
8543  {
8544  return false;
8545  }
8546  }
8547  }
8548  else
8549  {
8550  // Already on next page.
8551  break;
8552  }
8553  ++nextSuballocItem;
8554  }
8555  }
8556  }
8557  else
8558  {
8559  const VmaSuballocation& suballoc = *suballocItem;
8560  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8561 
8562  *pSumFreeSize = suballoc.size;
8563 
8564  // Size of this suballocation is too small for this request: Early return.
8565  if(suballoc.size < allocSize)
8566  {
8567  return false;
8568  }
8569 
8570  // Start from offset equal to beginning of this suballocation.
8571  *pOffset = suballoc.offset;
8572 
8573  // Apply VMA_DEBUG_MARGIN at the beginning.
8574  if(VMA_DEBUG_MARGIN > 0)
8575  {
8576  *pOffset += VMA_DEBUG_MARGIN;
8577  }
8578 
8579  // Apply alignment.
8580  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8581 
8582  // Check previous suballocations for BufferImageGranularity conflicts.
8583  // Make bigger alignment if necessary.
8584  if(bufferImageGranularity > 1)
8585  {
8586  bool bufferImageGranularityConflict = false;
8587  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8588  while(prevSuballocItem != m_Suballocations.cbegin())
8589  {
8590  --prevSuballocItem;
8591  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8592  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8593  {
8594  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8595  {
8596  bufferImageGranularityConflict = true;
8597  break;
8598  }
8599  }
8600  else
8601  // Already on previous page.
8602  break;
8603  }
8604  if(bufferImageGranularityConflict)
8605  {
8606  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8607  }
8608  }
8609 
8610  // Calculate padding at the beginning based on current offset.
8611  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8612 
8613  // Calculate required margin at the end.
8614  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8615 
8616  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8617  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8618  {
8619  return false;
8620  }
8621 
8622  // Check next suballocations for BufferImageGranularity conflicts.
8623  // If conflict exists, allocation cannot be made here.
8624  if(bufferImageGranularity > 1)
8625  {
8626  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8627  ++nextSuballocItem;
8628  while(nextSuballocItem != m_Suballocations.cend())
8629  {
8630  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8631  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8632  {
8633  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8634  {
8635  return false;
8636  }
8637  }
8638  else
8639  {
8640  // Already on next page.
8641  break;
8642  }
8643  ++nextSuballocItem;
8644  }
8645  }
8646  }
8647 
8648  // All tests passed: Success. pOffset is already filled.
8649  return true;
8650 }
8651 
8652 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8653 {
8654  VMA_ASSERT(item != m_Suballocations.end());
8655  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8656 
8657  VmaSuballocationList::iterator nextItem = item;
8658  ++nextItem;
8659  VMA_ASSERT(nextItem != m_Suballocations.end());
8660  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8661 
8662  item->size += nextItem->size;
8663  --m_FreeCount;
8664  m_Suballocations.erase(nextItem);
8665 }
8666 
8667 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8668 {
8669  // Change this suballocation to be marked as free.
8670  VmaSuballocation& suballoc = *suballocItem;
8671  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8672  suballoc.hAllocation = VK_NULL_HANDLE;
8673 
8674  // Update totals.
8675  ++m_FreeCount;
8676  m_SumFreeSize += suballoc.size;
8677 
8678  // Merge with previous and/or next suballocation if it's also free.
8679  bool mergeWithNext = false;
8680  bool mergeWithPrev = false;
8681 
8682  VmaSuballocationList::iterator nextItem = suballocItem;
8683  ++nextItem;
8684  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8685  {
8686  mergeWithNext = true;
8687  }
8688 
8689  VmaSuballocationList::iterator prevItem = suballocItem;
8690  if(suballocItem != m_Suballocations.begin())
8691  {
8692  --prevItem;
8693  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8694  {
8695  mergeWithPrev = true;
8696  }
8697  }
8698 
8699  if(mergeWithNext)
8700  {
8701  UnregisterFreeSuballocation(nextItem);
8702  MergeFreeWithNext(suballocItem);
8703  }
8704 
8705  if(mergeWithPrev)
8706  {
8707  UnregisterFreeSuballocation(prevItem);
8708  MergeFreeWithNext(prevItem);
8709  RegisterFreeSuballocation(prevItem);
8710  return prevItem;
8711  }
8712  else
8713  {
8714  RegisterFreeSuballocation(suballocItem);
8715  return suballocItem;
8716  }
8717 }
8718 
8719 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8720 {
8721  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8722  VMA_ASSERT(item->size > 0);
8723 
8724  // You may want to enable this validation at the beginning or at the end of
8725  // this function, depending on what do you want to check.
8726  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8727 
8728  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8729  {
8730  if(m_FreeSuballocationsBySize.empty())
8731  {
8732  m_FreeSuballocationsBySize.push_back(item);
8733  }
8734  else
8735  {
8736  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8737  }
8738  }
8739 
8740  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8741 }
8742 
8743 
8744 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8745 {
8746  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8747  VMA_ASSERT(item->size > 0);
8748 
8749  // You may want to enable this validation at the beginning or at the end of
8750  // this function, depending on what do you want to check.
8751  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8752 
8753  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8754  {
8755  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8756  m_FreeSuballocationsBySize.data(),
8757  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8758  item,
8759  VmaSuballocationItemSizeLess());
8760  for(size_t index = it - m_FreeSuballocationsBySize.data();
8761  index < m_FreeSuballocationsBySize.size();
8762  ++index)
8763  {
8764  if(m_FreeSuballocationsBySize[index] == item)
8765  {
8766  VmaVectorRemove(m_FreeSuballocationsBySize, index);
8767  return;
8768  }
8769  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8770  }
8771  VMA_ASSERT(0 && "Not found.");
8772  }
8773 
8774  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8775 }
8776 
8777 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8778  VkDeviceSize bufferImageGranularity,
8779  VmaSuballocationType& inOutPrevSuballocType) const
8780 {
8781  if(bufferImageGranularity == 1 || IsEmpty())
8782  {
8783  return false;
8784  }
8785 
8786  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8787  bool typeConflictFound = false;
8788  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8789  it != m_Suballocations.cend();
8790  ++it)
8791  {
8792  const VmaSuballocationType suballocType = it->type;
8793  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8794  {
8795  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8796  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8797  {
8798  typeConflictFound = true;
8799  }
8800  inOutPrevSuballocType = suballocType;
8801  }
8802  }
8803 
8804  return typeConflictFound || minAlignment >= bufferImageGranularity;
8805 }
8806 
8808 // class VmaBlockMetadata_Linear
8809 
8810 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
8811  VmaBlockMetadata(hAllocator),
8812  m_SumFreeSize(0),
8813  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8814  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8815  m_1stVectorIndex(0),
8816  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8817  m_1stNullItemsBeginCount(0),
8818  m_1stNullItemsMiddleCount(0),
8819  m_2ndNullItemsCount(0)
8820 {
8821 }
8822 
8823 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8824 {
8825 }
8826 
8827 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8828 {
8829  VmaBlockMetadata::Init(size);
8830  m_SumFreeSize = size;
8831 }
8832 
8833 bool VmaBlockMetadata_Linear::Validate() const
8834 {
8835  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8836  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8837 
8838  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8839  VMA_VALIDATE(!suballocations1st.empty() ||
8840  suballocations2nd.empty() ||
8841  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8842 
8843  if(!suballocations1st.empty())
8844  {
8845  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
8846  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8847  // Null item at the end should be just pop_back().
8848  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8849  }
8850  if(!suballocations2nd.empty())
8851  {
8852  // Null item at the end should be just pop_back().
8853  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8854  }
8855 
8856  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8857  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8858 
8859  VkDeviceSize sumUsedSize = 0;
8860  const size_t suballoc1stCount = suballocations1st.size();
8861  VkDeviceSize offset = VMA_DEBUG_MARGIN;
8862 
8863  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8864  {
8865  const size_t suballoc2ndCount = suballocations2nd.size();
8866  size_t nullItem2ndCount = 0;
8867  for(size_t i = 0; i < suballoc2ndCount; ++i)
8868  {
8869  const VmaSuballocation& suballoc = suballocations2nd[i];
8870  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8871 
8872  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8873  VMA_VALIDATE(suballoc.offset >= offset);
8874 
8875  if(!currFree)
8876  {
8877  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8878  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8879  sumUsedSize += suballoc.size;
8880  }
8881  else
8882  {
8883  ++nullItem2ndCount;
8884  }
8885 
8886  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8887  }
8888 
8889  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8890  }
8891 
8892  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8893  {
8894  const VmaSuballocation& suballoc = suballocations1st[i];
8895  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8896  suballoc.hAllocation == VK_NULL_HANDLE);
8897  }
8898 
8899  size_t nullItem1stCount = m_1stNullItemsBeginCount;
8900 
8901  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8902  {
8903  const VmaSuballocation& suballoc = suballocations1st[i];
8904  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8905 
8906  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8907  VMA_VALIDATE(suballoc.offset >= offset);
8908  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8909 
8910  if(!currFree)
8911  {
8912  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8913  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8914  sumUsedSize += suballoc.size;
8915  }
8916  else
8917  {
8918  ++nullItem1stCount;
8919  }
8920 
8921  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8922  }
8923  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8924 
8925  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8926  {
8927  const size_t suballoc2ndCount = suballocations2nd.size();
8928  size_t nullItem2ndCount = 0;
8929  for(size_t i = suballoc2ndCount; i--; )
8930  {
8931  const VmaSuballocation& suballoc = suballocations2nd[i];
8932  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8933 
8934  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8935  VMA_VALIDATE(suballoc.offset >= offset);
8936 
8937  if(!currFree)
8938  {
8939  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8940  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8941  sumUsedSize += suballoc.size;
8942  }
8943  else
8944  {
8945  ++nullItem2ndCount;
8946  }
8947 
8948  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8949  }
8950 
8951  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8952  }
8953 
8954  VMA_VALIDATE(offset <= GetSize());
8955  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
8956 
8957  return true;
8958 }
8959 
8960 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
8961 {
8962  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
8963  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
8964 }
8965 
8966 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
8967 {
8968  const VkDeviceSize size = GetSize();
8969 
8970  /*
8971  We don't consider gaps inside allocation vectors with freed allocations because
8972  they are not suitable for reuse in linear allocator. We consider only space that
8973  is available for new allocations.
8974  */
8975  if(IsEmpty())
8976  {
8977  return size;
8978  }
8979 
8980  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8981 
8982  switch(m_2ndVectorMode)
8983  {
8984  case SECOND_VECTOR_EMPTY:
8985  /*
8986  Available space is after end of 1st, as well as before beginning of 1st (which
8987  whould make it a ring buffer).
8988  */
8989  {
8990  const size_t suballocations1stCount = suballocations1st.size();
8991  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
8992  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8993  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
8994  return VMA_MAX(
8995  firstSuballoc.offset,
8996  size - (lastSuballoc.offset + lastSuballoc.size));
8997  }
8998  break;
8999 
9000  case SECOND_VECTOR_RING_BUFFER:
9001  /*
9002  Available space is only between end of 2nd and beginning of 1st.
9003  */
9004  {
9005  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9006  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
9007  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
9008  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
9009  }
9010  break;
9011 
9012  case SECOND_VECTOR_DOUBLE_STACK:
9013  /*
9014  Available space is only between end of 1st and top of 2nd.
9015  */
9016  {
9017  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9018  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
9019  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
9020  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
9021  }
9022  break;
9023 
9024  default:
9025  VMA_ASSERT(0);
9026  return 0;
9027  }
9028 }
9029 
9030 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9031 {
9032  const VkDeviceSize size = GetSize();
9033  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9034  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9035  const size_t suballoc1stCount = suballocations1st.size();
9036  const size_t suballoc2ndCount = suballocations2nd.size();
9037 
9038  outInfo.blockCount = 1;
9039  outInfo.allocationCount = (uint32_t)GetAllocationCount();
9040  outInfo.unusedRangeCount = 0;
9041  outInfo.usedBytes = 0;
9042  outInfo.allocationSizeMin = UINT64_MAX;
9043  outInfo.allocationSizeMax = 0;
9044  outInfo.unusedRangeSizeMin = UINT64_MAX;
9045  outInfo.unusedRangeSizeMax = 0;
9046 
9047  VkDeviceSize lastOffset = 0;
9048 
9049  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9050  {
9051  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9052  size_t nextAlloc2ndIndex = 0;
9053  while(lastOffset < freeSpace2ndTo1stEnd)
9054  {
9055  // Find next non-null allocation or move nextAllocIndex to the end.
9056  while(nextAlloc2ndIndex < suballoc2ndCount &&
9057  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9058  {
9059  ++nextAlloc2ndIndex;
9060  }
9061 
9062  // Found non-null allocation.
9063  if(nextAlloc2ndIndex < suballoc2ndCount)
9064  {
9065  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9066 
9067  // 1. Process free space before this allocation.
9068  if(lastOffset < suballoc.offset)
9069  {
9070  // There is free space from lastOffset to suballoc.offset.
9071  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9072  ++outInfo.unusedRangeCount;
9073  outInfo.unusedBytes += unusedRangeSize;
9074  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9075  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9076  }
9077 
9078  // 2. Process this allocation.
9079  // There is allocation with suballoc.offset, suballoc.size.
9080  outInfo.usedBytes += suballoc.size;
9081  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9082  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9083 
9084  // 3. Prepare for next iteration.
9085  lastOffset = suballoc.offset + suballoc.size;
9086  ++nextAlloc2ndIndex;
9087  }
9088  // We are at the end.
9089  else
9090  {
9091  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9092  if(lastOffset < freeSpace2ndTo1stEnd)
9093  {
9094  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9095  ++outInfo.unusedRangeCount;
9096  outInfo.unusedBytes += unusedRangeSize;
9097  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9098  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9099  }
9100 
9101  // End of loop.
9102  lastOffset = freeSpace2ndTo1stEnd;
9103  }
9104  }
9105  }
9106 
9107  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9108  const VkDeviceSize freeSpace1stTo2ndEnd =
9109  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9110  while(lastOffset < freeSpace1stTo2ndEnd)
9111  {
9112  // Find next non-null allocation or move nextAllocIndex to the end.
9113  while(nextAlloc1stIndex < suballoc1stCount &&
9114  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9115  {
9116  ++nextAlloc1stIndex;
9117  }
9118 
9119  // Found non-null allocation.
9120  if(nextAlloc1stIndex < suballoc1stCount)
9121  {
9122  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9123 
9124  // 1. Process free space before this allocation.
9125  if(lastOffset < suballoc.offset)
9126  {
9127  // There is free space from lastOffset to suballoc.offset.
9128  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9129  ++outInfo.unusedRangeCount;
9130  outInfo.unusedBytes += unusedRangeSize;
9131  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9132  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9133  }
9134 
9135  // 2. Process this allocation.
9136  // There is allocation with suballoc.offset, suballoc.size.
9137  outInfo.usedBytes += suballoc.size;
9138  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9139  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9140 
9141  // 3. Prepare for next iteration.
9142  lastOffset = suballoc.offset + suballoc.size;
9143  ++nextAlloc1stIndex;
9144  }
9145  // We are at the end.
9146  else
9147  {
9148  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9149  if(lastOffset < freeSpace1stTo2ndEnd)
9150  {
9151  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9152  ++outInfo.unusedRangeCount;
9153  outInfo.unusedBytes += unusedRangeSize;
9154  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9155  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9156  }
9157 
9158  // End of loop.
9159  lastOffset = freeSpace1stTo2ndEnd;
9160  }
9161  }
9162 
9163  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9164  {
9165  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9166  while(lastOffset < size)
9167  {
9168  // Find next non-null allocation or move nextAllocIndex to the end.
9169  while(nextAlloc2ndIndex != SIZE_MAX &&
9170  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9171  {
9172  --nextAlloc2ndIndex;
9173  }
9174 
9175  // Found non-null allocation.
9176  if(nextAlloc2ndIndex != SIZE_MAX)
9177  {
9178  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9179 
9180  // 1. Process free space before this allocation.
9181  if(lastOffset < suballoc.offset)
9182  {
9183  // There is free space from lastOffset to suballoc.offset.
9184  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9185  ++outInfo.unusedRangeCount;
9186  outInfo.unusedBytes += unusedRangeSize;
9187  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9188  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9189  }
9190 
9191  // 2. Process this allocation.
9192  // There is allocation with suballoc.offset, suballoc.size.
9193  outInfo.usedBytes += suballoc.size;
9194  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9195  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9196 
9197  // 3. Prepare for next iteration.
9198  lastOffset = suballoc.offset + suballoc.size;
9199  --nextAlloc2ndIndex;
9200  }
9201  // We are at the end.
9202  else
9203  {
9204  // There is free space from lastOffset to size.
9205  if(lastOffset < size)
9206  {
9207  const VkDeviceSize unusedRangeSize = size - lastOffset;
9208  ++outInfo.unusedRangeCount;
9209  outInfo.unusedBytes += unusedRangeSize;
9210  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9211  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9212  }
9213 
9214  // End of loop.
9215  lastOffset = size;
9216  }
9217  }
9218  }
9219 
9220  outInfo.unusedBytes = size - outInfo.usedBytes;
9221 }
9222 
9223 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9224 {
9225  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9226  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9227  const VkDeviceSize size = GetSize();
9228  const size_t suballoc1stCount = suballocations1st.size();
9229  const size_t suballoc2ndCount = suballocations2nd.size();
9230 
9231  inoutStats.size += size;
9232 
9233  VkDeviceSize lastOffset = 0;
9234 
9235  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9236  {
9237  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9238  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9239  while(lastOffset < freeSpace2ndTo1stEnd)
9240  {
9241  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9242  while(nextAlloc2ndIndex < suballoc2ndCount &&
9243  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9244  {
9245  ++nextAlloc2ndIndex;
9246  }
9247 
9248  // Found non-null allocation.
9249  if(nextAlloc2ndIndex < suballoc2ndCount)
9250  {
9251  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9252 
9253  // 1. Process free space before this allocation.
9254  if(lastOffset < suballoc.offset)
9255  {
9256  // There is free space from lastOffset to suballoc.offset.
9257  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9258  inoutStats.unusedSize += unusedRangeSize;
9259  ++inoutStats.unusedRangeCount;
9260  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9261  }
9262 
9263  // 2. Process this allocation.
9264  // There is allocation with suballoc.offset, suballoc.size.
9265  ++inoutStats.allocationCount;
9266 
9267  // 3. Prepare for next iteration.
9268  lastOffset = suballoc.offset + suballoc.size;
9269  ++nextAlloc2ndIndex;
9270  }
9271  // We are at the end.
9272  else
9273  {
9274  if(lastOffset < freeSpace2ndTo1stEnd)
9275  {
9276  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9277  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9278  inoutStats.unusedSize += unusedRangeSize;
9279  ++inoutStats.unusedRangeCount;
9280  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9281  }
9282 
9283  // End of loop.
9284  lastOffset = freeSpace2ndTo1stEnd;
9285  }
9286  }
9287  }
9288 
9289  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9290  const VkDeviceSize freeSpace1stTo2ndEnd =
9291  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9292  while(lastOffset < freeSpace1stTo2ndEnd)
9293  {
9294  // Find next non-null allocation or move nextAllocIndex to the end.
9295  while(nextAlloc1stIndex < suballoc1stCount &&
9296  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9297  {
9298  ++nextAlloc1stIndex;
9299  }
9300 
9301  // Found non-null allocation.
9302  if(nextAlloc1stIndex < suballoc1stCount)
9303  {
9304  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9305 
9306  // 1. Process free space before this allocation.
9307  if(lastOffset < suballoc.offset)
9308  {
9309  // There is free space from lastOffset to suballoc.offset.
9310  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9311  inoutStats.unusedSize += unusedRangeSize;
9312  ++inoutStats.unusedRangeCount;
9313  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9314  }
9315 
9316  // 2. Process this allocation.
9317  // There is allocation with suballoc.offset, suballoc.size.
9318  ++inoutStats.allocationCount;
9319 
9320  // 3. Prepare for next iteration.
9321  lastOffset = suballoc.offset + suballoc.size;
9322  ++nextAlloc1stIndex;
9323  }
9324  // We are at the end.
9325  else
9326  {
9327  if(lastOffset < freeSpace1stTo2ndEnd)
9328  {
9329  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9330  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9331  inoutStats.unusedSize += unusedRangeSize;
9332  ++inoutStats.unusedRangeCount;
9333  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9334  }
9335 
9336  // End of loop.
9337  lastOffset = freeSpace1stTo2ndEnd;
9338  }
9339  }
9340 
9341  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9342  {
9343  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9344  while(lastOffset < size)
9345  {
9346  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9347  while(nextAlloc2ndIndex != SIZE_MAX &&
9348  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9349  {
9350  --nextAlloc2ndIndex;
9351  }
9352 
9353  // Found non-null allocation.
9354  if(nextAlloc2ndIndex != SIZE_MAX)
9355  {
9356  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9357 
9358  // 1. Process free space before this allocation.
9359  if(lastOffset < suballoc.offset)
9360  {
9361  // There is free space from lastOffset to suballoc.offset.
9362  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9363  inoutStats.unusedSize += unusedRangeSize;
9364  ++inoutStats.unusedRangeCount;
9365  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9366  }
9367 
9368  // 2. Process this allocation.
9369  // There is allocation with suballoc.offset, suballoc.size.
9370  ++inoutStats.allocationCount;
9371 
9372  // 3. Prepare for next iteration.
9373  lastOffset = suballoc.offset + suballoc.size;
9374  --nextAlloc2ndIndex;
9375  }
9376  // We are at the end.
9377  else
9378  {
9379  if(lastOffset < size)
9380  {
9381  // There is free space from lastOffset to size.
9382  const VkDeviceSize unusedRangeSize = size - lastOffset;
9383  inoutStats.unusedSize += unusedRangeSize;
9384  ++inoutStats.unusedRangeCount;
9385  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9386  }
9387 
9388  // End of loop.
9389  lastOffset = size;
9390  }
9391  }
9392  }
9393 }
9394 
9395 #if VMA_STATS_STRING_ENABLED
9396 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9397 {
9398  const VkDeviceSize size = GetSize();
9399  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9400  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9401  const size_t suballoc1stCount = suballocations1st.size();
9402  const size_t suballoc2ndCount = suballocations2nd.size();
9403 
9404  // FIRST PASS
9405 
9406  size_t unusedRangeCount = 0;
9407  VkDeviceSize usedBytes = 0;
9408 
9409  VkDeviceSize lastOffset = 0;
9410 
9411  size_t alloc2ndCount = 0;
9412  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9413  {
9414  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9415  size_t nextAlloc2ndIndex = 0;
9416  while(lastOffset < freeSpace2ndTo1stEnd)
9417  {
9418  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9419  while(nextAlloc2ndIndex < suballoc2ndCount &&
9420  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9421  {
9422  ++nextAlloc2ndIndex;
9423  }
9424 
9425  // Found non-null allocation.
9426  if(nextAlloc2ndIndex < suballoc2ndCount)
9427  {
9428  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9429 
9430  // 1. Process free space before this allocation.
9431  if(lastOffset < suballoc.offset)
9432  {
9433  // There is free space from lastOffset to suballoc.offset.
9434  ++unusedRangeCount;
9435  }
9436 
9437  // 2. Process this allocation.
9438  // There is allocation with suballoc.offset, suballoc.size.
9439  ++alloc2ndCount;
9440  usedBytes += suballoc.size;
9441 
9442  // 3. Prepare for next iteration.
9443  lastOffset = suballoc.offset + suballoc.size;
9444  ++nextAlloc2ndIndex;
9445  }
9446  // We are at the end.
9447  else
9448  {
9449  if(lastOffset < freeSpace2ndTo1stEnd)
9450  {
9451  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9452  ++unusedRangeCount;
9453  }
9454 
9455  // End of loop.
9456  lastOffset = freeSpace2ndTo1stEnd;
9457  }
9458  }
9459  }
9460 
9461  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9462  size_t alloc1stCount = 0;
9463  const VkDeviceSize freeSpace1stTo2ndEnd =
9464  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9465  while(lastOffset < freeSpace1stTo2ndEnd)
9466  {
9467  // Find next non-null allocation or move nextAllocIndex to the end.
9468  while(nextAlloc1stIndex < suballoc1stCount &&
9469  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9470  {
9471  ++nextAlloc1stIndex;
9472  }
9473 
9474  // Found non-null allocation.
9475  if(nextAlloc1stIndex < suballoc1stCount)
9476  {
9477  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9478 
9479  // 1. Process free space before this allocation.
9480  if(lastOffset < suballoc.offset)
9481  {
9482  // There is free space from lastOffset to suballoc.offset.
9483  ++unusedRangeCount;
9484  }
9485 
9486  // 2. Process this allocation.
9487  // There is allocation with suballoc.offset, suballoc.size.
9488  ++alloc1stCount;
9489  usedBytes += suballoc.size;
9490 
9491  // 3. Prepare for next iteration.
9492  lastOffset = suballoc.offset + suballoc.size;
9493  ++nextAlloc1stIndex;
9494  }
9495  // We are at the end.
9496  else
9497  {
9498  if(lastOffset < size)
9499  {
9500  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9501  ++unusedRangeCount;
9502  }
9503 
9504  // End of loop.
9505  lastOffset = freeSpace1stTo2ndEnd;
9506  }
9507  }
9508 
9509  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9510  {
9511  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9512  while(lastOffset < size)
9513  {
9514  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9515  while(nextAlloc2ndIndex != SIZE_MAX &&
9516  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9517  {
9518  --nextAlloc2ndIndex;
9519  }
9520 
9521  // Found non-null allocation.
9522  if(nextAlloc2ndIndex != SIZE_MAX)
9523  {
9524  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9525 
9526  // 1. Process free space before this allocation.
9527  if(lastOffset < suballoc.offset)
9528  {
9529  // There is free space from lastOffset to suballoc.offset.
9530  ++unusedRangeCount;
9531  }
9532 
9533  // 2. Process this allocation.
9534  // There is allocation with suballoc.offset, suballoc.size.
9535  ++alloc2ndCount;
9536  usedBytes += suballoc.size;
9537 
9538  // 3. Prepare for next iteration.
9539  lastOffset = suballoc.offset + suballoc.size;
9540  --nextAlloc2ndIndex;
9541  }
9542  // We are at the end.
9543  else
9544  {
9545  if(lastOffset < size)
9546  {
9547  // There is free space from lastOffset to size.
9548  ++unusedRangeCount;
9549  }
9550 
9551  // End of loop.
9552  lastOffset = size;
9553  }
9554  }
9555  }
9556 
9557  const VkDeviceSize unusedBytes = size - usedBytes;
9558  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9559 
9560  // SECOND PASS
9561  lastOffset = 0;
9562 
9563  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9564  {
9565  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9566  size_t nextAlloc2ndIndex = 0;
9567  while(lastOffset < freeSpace2ndTo1stEnd)
9568  {
9569  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9570  while(nextAlloc2ndIndex < suballoc2ndCount &&
9571  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9572  {
9573  ++nextAlloc2ndIndex;
9574  }
9575 
9576  // Found non-null allocation.
9577  if(nextAlloc2ndIndex < suballoc2ndCount)
9578  {
9579  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9580 
9581  // 1. Process free space before this allocation.
9582  if(lastOffset < suballoc.offset)
9583  {
9584  // There is free space from lastOffset to suballoc.offset.
9585  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9586  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9587  }
9588 
9589  // 2. Process this allocation.
9590  // There is allocation with suballoc.offset, suballoc.size.
9591  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9592 
9593  // 3. Prepare for next iteration.
9594  lastOffset = suballoc.offset + suballoc.size;
9595  ++nextAlloc2ndIndex;
9596  }
9597  // We are at the end.
9598  else
9599  {
9600  if(lastOffset < freeSpace2ndTo1stEnd)
9601  {
9602  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9603  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9604  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9605  }
9606 
9607  // End of loop.
9608  lastOffset = freeSpace2ndTo1stEnd;
9609  }
9610  }
9611  }
9612 
9613  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9614  while(lastOffset < freeSpace1stTo2ndEnd)
9615  {
9616  // Find next non-null allocation or move nextAllocIndex to the end.
9617  while(nextAlloc1stIndex < suballoc1stCount &&
9618  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9619  {
9620  ++nextAlloc1stIndex;
9621  }
9622 
9623  // Found non-null allocation.
9624  if(nextAlloc1stIndex < suballoc1stCount)
9625  {
9626  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9627 
9628  // 1. Process free space before this allocation.
9629  if(lastOffset < suballoc.offset)
9630  {
9631  // There is free space from lastOffset to suballoc.offset.
9632  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9633  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9634  }
9635 
9636  // 2. Process this allocation.
9637  // There is allocation with suballoc.offset, suballoc.size.
9638  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9639 
9640  // 3. Prepare for next iteration.
9641  lastOffset = suballoc.offset + suballoc.size;
9642  ++nextAlloc1stIndex;
9643  }
9644  // We are at the end.
9645  else
9646  {
9647  if(lastOffset < freeSpace1stTo2ndEnd)
9648  {
9649  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9650  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9651  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9652  }
9653 
9654  // End of loop.
9655  lastOffset = freeSpace1stTo2ndEnd;
9656  }
9657  }
9658 
9659  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9660  {
9661  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9662  while(lastOffset < size)
9663  {
9664  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9665  while(nextAlloc2ndIndex != SIZE_MAX &&
9666  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9667  {
9668  --nextAlloc2ndIndex;
9669  }
9670 
9671  // Found non-null allocation.
9672  if(nextAlloc2ndIndex != SIZE_MAX)
9673  {
9674  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9675 
9676  // 1. Process free space before this allocation.
9677  if(lastOffset < suballoc.offset)
9678  {
9679  // There is free space from lastOffset to suballoc.offset.
9680  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9681  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9682  }
9683 
9684  // 2. Process this allocation.
9685  // There is allocation with suballoc.offset, suballoc.size.
9686  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9687 
9688  // 3. Prepare for next iteration.
9689  lastOffset = suballoc.offset + suballoc.size;
9690  --nextAlloc2ndIndex;
9691  }
9692  // We are at the end.
9693  else
9694  {
9695  if(lastOffset < size)
9696  {
9697  // There is free space from lastOffset to size.
9698  const VkDeviceSize unusedRangeSize = size - lastOffset;
9699  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9700  }
9701 
9702  // End of loop.
9703  lastOffset = size;
9704  }
9705  }
9706  }
9707 
9708  PrintDetailedMap_End(json);
9709 }
9710 #endif // #if VMA_STATS_STRING_ENABLED
9711 
9712 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9713  uint32_t currentFrameIndex,
9714  uint32_t frameInUseCount,
9715  VkDeviceSize bufferImageGranularity,
9716  VkDeviceSize allocSize,
9717  VkDeviceSize allocAlignment,
9718  bool upperAddress,
9719  VmaSuballocationType allocType,
9720  bool canMakeOtherLost,
9721  uint32_t strategy,
9722  VmaAllocationRequest* pAllocationRequest)
9723 {
9724  VMA_ASSERT(allocSize > 0);
9725  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9726  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9727  VMA_HEAVY_ASSERT(Validate());
9728  return upperAddress ?
9729  CreateAllocationRequest_UpperAddress(
9730  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9731  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
9732  CreateAllocationRequest_LowerAddress(
9733  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9734  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
9735 }
9736 
9737 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
9738  uint32_t currentFrameIndex,
9739  uint32_t frameInUseCount,
9740  VkDeviceSize bufferImageGranularity,
9741  VkDeviceSize allocSize,
9742  VkDeviceSize allocAlignment,
9743  VmaSuballocationType allocType,
9744  bool canMakeOtherLost,
9745  uint32_t strategy,
9746  VmaAllocationRequest* pAllocationRequest)
9747 {
9748  const VkDeviceSize size = GetSize();
9749  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9750  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9751 
9752  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9753  {
9754  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9755  return false;
9756  }
9757 
9758  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9759  if(allocSize > size)
9760  {
9761  return false;
9762  }
9763  VkDeviceSize resultBaseOffset = size - allocSize;
9764  if(!suballocations2nd.empty())
9765  {
9766  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9767  resultBaseOffset = lastSuballoc.offset - allocSize;
9768  if(allocSize > lastSuballoc.offset)
9769  {
9770  return false;
9771  }
9772  }
9773 
9774  // Start from offset equal to end of free space.
9775  VkDeviceSize resultOffset = resultBaseOffset;
9776 
9777  // Apply VMA_DEBUG_MARGIN at the end.
9778  if(VMA_DEBUG_MARGIN > 0)
9779  {
9780  if(resultOffset < VMA_DEBUG_MARGIN)
9781  {
9782  return false;
9783  }
9784  resultOffset -= VMA_DEBUG_MARGIN;
9785  }
9786 
9787  // Apply alignment.
9788  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9789 
9790  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9791  // Make bigger alignment if necessary.
9792  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9793  {
9794  bool bufferImageGranularityConflict = false;
9795  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9796  {
9797  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9798  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9799  {
9800  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9801  {
9802  bufferImageGranularityConflict = true;
9803  break;
9804  }
9805  }
9806  else
9807  // Already on previous page.
9808  break;
9809  }
9810  if(bufferImageGranularityConflict)
9811  {
9812  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9813  }
9814  }
9815 
9816  // There is enough free space.
9817  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9818  suballocations1st.back().offset + suballocations1st.back().size :
9819  0;
9820  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9821  {
9822  // Check previous suballocations for BufferImageGranularity conflicts.
9823  // If conflict exists, allocation cannot be made here.
9824  if(bufferImageGranularity > 1)
9825  {
9826  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9827  {
9828  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9829  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9830  {
9831  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9832  {
9833  return false;
9834  }
9835  }
9836  else
9837  {
9838  // Already on next page.
9839  break;
9840  }
9841  }
9842  }
9843 
9844  // All tests passed: Success.
9845  pAllocationRequest->offset = resultOffset;
9846  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9847  pAllocationRequest->sumItemSize = 0;
9848  // pAllocationRequest->item unused.
9849  pAllocationRequest->itemsToMakeLostCount = 0;
9850  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
9851  return true;
9852  }
9853 
9854  return false;
9855 }
9856 
9857 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
9858  uint32_t currentFrameIndex,
9859  uint32_t frameInUseCount,
9860  VkDeviceSize bufferImageGranularity,
9861  VkDeviceSize allocSize,
9862  VkDeviceSize allocAlignment,
9863  VmaSuballocationType allocType,
9864  bool canMakeOtherLost,
9865  uint32_t strategy,
9866  VmaAllocationRequest* pAllocationRequest)
9867 {
9868  const VkDeviceSize size = GetSize();
9869  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9870  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9871 
9872  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9873  {
9874  // Try to allocate at the end of 1st vector.
9875 
9876  VkDeviceSize resultBaseOffset = 0;
9877  if(!suballocations1st.empty())
9878  {
9879  const VmaSuballocation& lastSuballoc = suballocations1st.back();
9880  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9881  }
9882 
9883  // Start from offset equal to beginning of free space.
9884  VkDeviceSize resultOffset = resultBaseOffset;
9885 
9886  // Apply VMA_DEBUG_MARGIN at the beginning.
9887  if(VMA_DEBUG_MARGIN > 0)
9888  {
9889  resultOffset += VMA_DEBUG_MARGIN;
9890  }
9891 
9892  // Apply alignment.
9893  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9894 
9895  // Check previous suballocations for BufferImageGranularity conflicts.
9896  // Make bigger alignment if necessary.
9897  if(bufferImageGranularity > 1 && !suballocations1st.empty())
9898  {
9899  bool bufferImageGranularityConflict = false;
9900  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9901  {
9902  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9903  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9904  {
9905  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9906  {
9907  bufferImageGranularityConflict = true;
9908  break;
9909  }
9910  }
9911  else
9912  // Already on previous page.
9913  break;
9914  }
9915  if(bufferImageGranularityConflict)
9916  {
9917  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9918  }
9919  }
9920 
9921  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9922  suballocations2nd.back().offset : size;
9923 
9924  // There is enough free space at the end after alignment.
9925  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9926  {
9927  // Check next suballocations for BufferImageGranularity conflicts.
9928  // If conflict exists, allocation cannot be made here.
9929  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9930  {
9931  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9932  {
9933  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9934  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9935  {
9936  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9937  {
9938  return false;
9939  }
9940  }
9941  else
9942  {
9943  // Already on previous page.
9944  break;
9945  }
9946  }
9947  }
9948 
9949  // All tests passed: Success.
9950  pAllocationRequest->offset = resultOffset;
9951  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
9952  pAllocationRequest->sumItemSize = 0;
9953  // pAllocationRequest->item, customData unused.
9954  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
9955  pAllocationRequest->itemsToMakeLostCount = 0;
9956  return true;
9957  }
9958  }
9959 
9960  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9961  // beginning of 1st vector as the end of free space.
9962  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9963  {
9964  VMA_ASSERT(!suballocations1st.empty());
9965 
9966  VkDeviceSize resultBaseOffset = 0;
9967  if(!suballocations2nd.empty())
9968  {
9969  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9970  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9971  }
9972 
9973  // Start from offset equal to beginning of free space.
9974  VkDeviceSize resultOffset = resultBaseOffset;
9975 
9976  // Apply VMA_DEBUG_MARGIN at the beginning.
9977  if(VMA_DEBUG_MARGIN > 0)
9978  {
9979  resultOffset += VMA_DEBUG_MARGIN;
9980  }
9981 
9982  // Apply alignment.
9983  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9984 
9985  // Check previous suballocations for BufferImageGranularity conflicts.
9986  // Make bigger alignment if necessary.
9987  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9988  {
9989  bool bufferImageGranularityConflict = false;
9990  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
9991  {
9992  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
9993  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9994  {
9995  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9996  {
9997  bufferImageGranularityConflict = true;
9998  break;
9999  }
10000  }
10001  else
10002  // Already on previous page.
10003  break;
10004  }
10005  if(bufferImageGranularityConflict)
10006  {
10007  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10008  }
10009  }
10010 
10011  pAllocationRequest->itemsToMakeLostCount = 0;
10012  pAllocationRequest->sumItemSize = 0;
10013  size_t index1st = m_1stNullItemsBeginCount;
10014 
10015  if(canMakeOtherLost)
10016  {
10017  while(index1st < suballocations1st.size() &&
10018  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
10019  {
10020  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
10021  const VmaSuballocation& suballoc = suballocations1st[index1st];
10022  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
10023  {
10024  // No problem.
10025  }
10026  else
10027  {
10028  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10029  if(suballoc.hAllocation->CanBecomeLost() &&
10030  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10031  {
10032  ++pAllocationRequest->itemsToMakeLostCount;
10033  pAllocationRequest->sumItemSize += suballoc.size;
10034  }
10035  else
10036  {
10037  return false;
10038  }
10039  }
10040  ++index1st;
10041  }
10042 
10043  // Check next suballocations for BufferImageGranularity conflicts.
10044  // If conflict exists, we must mark more allocations lost or fail.
10045  if(bufferImageGranularity > 1)
10046  {
10047  while(index1st < suballocations1st.size())
10048  {
10049  const VmaSuballocation& suballoc = suballocations1st[index1st];
10050  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
10051  {
10052  if(suballoc.hAllocation != VK_NULL_HANDLE)
10053  {
10054  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
10055  if(suballoc.hAllocation->CanBecomeLost() &&
10056  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10057  {
10058  ++pAllocationRequest->itemsToMakeLostCount;
10059  pAllocationRequest->sumItemSize += suballoc.size;
10060  }
10061  else
10062  {
10063  return false;
10064  }
10065  }
10066  }
10067  else
10068  {
10069  // Already on next page.
10070  break;
10071  }
10072  ++index1st;
10073  }
10074  }
10075 
10076  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
10077  if(index1st == suballocations1st.size() &&
10078  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
10079  {
10080  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
10081  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
10082  }
10083  }
10084 
10085  // There is enough free space at the end after alignment.
10086  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
10087  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
10088  {
10089  // Check next suballocations for BufferImageGranularity conflicts.
10090  // If conflict exists, allocation cannot be made here.
10091  if(bufferImageGranularity > 1)
10092  {
10093  for(size_t nextSuballocIndex = index1st;
10094  nextSuballocIndex < suballocations1st.size();
10095  nextSuballocIndex++)
10096  {
10097  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
10098  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10099  {
10100  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10101  {
10102  return false;
10103  }
10104  }
10105  else
10106  {
10107  // Already on next page.
10108  break;
10109  }
10110  }
10111  }
10112 
10113  // All tests passed: Success.
10114  pAllocationRequest->offset = resultOffset;
10115  pAllocationRequest->sumFreeSize =
10116  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
10117  - resultBaseOffset
10118  - pAllocationRequest->sumItemSize;
10119  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
10120  // pAllocationRequest->item, customData unused.
10121  return true;
10122  }
10123  }
10124 
10125  return false;
10126 }
10127 
10128 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10129  uint32_t currentFrameIndex,
10130  uint32_t frameInUseCount,
10131  VmaAllocationRequest* pAllocationRequest)
10132 {
10133  if(pAllocationRequest->itemsToMakeLostCount == 0)
10134  {
10135  return true;
10136  }
10137 
10138  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10139 
10140  // We always start from 1st.
10141  SuballocationVectorType* suballocations = &AccessSuballocations1st();
10142  size_t index = m_1stNullItemsBeginCount;
10143  size_t madeLostCount = 0;
10144  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10145  {
10146  if(index == suballocations->size())
10147  {
10148  index = 0;
10149  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
10150  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10151  {
10152  suballocations = &AccessSuballocations2nd();
10153  }
10154  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
10155  // suballocations continues pointing at AccessSuballocations1st().
10156  VMA_ASSERT(!suballocations->empty());
10157  }
10158  VmaSuballocation& suballoc = (*suballocations)[index];
10159  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10160  {
10161  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10162  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10163  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10164  {
10165  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10166  suballoc.hAllocation = VK_NULL_HANDLE;
10167  m_SumFreeSize += suballoc.size;
10168  if(suballocations == &AccessSuballocations1st())
10169  {
10170  ++m_1stNullItemsMiddleCount;
10171  }
10172  else
10173  {
10174  ++m_2ndNullItemsCount;
10175  }
10176  ++madeLostCount;
10177  }
10178  else
10179  {
10180  return false;
10181  }
10182  }
10183  ++index;
10184  }
10185 
10186  CleanupAfterFree();
10187  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10188 
10189  return true;
10190 }
10191 
10192 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10193 {
10194  uint32_t lostAllocationCount = 0;
10195 
10196  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10197  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10198  {
10199  VmaSuballocation& suballoc = suballocations1st[i];
10200  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10201  suballoc.hAllocation->CanBecomeLost() &&
10202  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10203  {
10204  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10205  suballoc.hAllocation = VK_NULL_HANDLE;
10206  ++m_1stNullItemsMiddleCount;
10207  m_SumFreeSize += suballoc.size;
10208  ++lostAllocationCount;
10209  }
10210  }
10211 
10212  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10213  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10214  {
10215  VmaSuballocation& suballoc = suballocations2nd[i];
10216  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10217  suballoc.hAllocation->CanBecomeLost() &&
10218  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10219  {
10220  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10221  suballoc.hAllocation = VK_NULL_HANDLE;
10222  ++m_2ndNullItemsCount;
10223  m_SumFreeSize += suballoc.size;
10224  ++lostAllocationCount;
10225  }
10226  }
10227 
10228  if(lostAllocationCount)
10229  {
10230  CleanupAfterFree();
10231  }
10232 
10233  return lostAllocationCount;
10234 }
10235 
10236 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10237 {
10238  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10239  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10240  {
10241  const VmaSuballocation& suballoc = suballocations1st[i];
10242  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10243  {
10244  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10245  {
10246  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10247  return VK_ERROR_VALIDATION_FAILED_EXT;
10248  }
10249  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10250  {
10251  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10252  return VK_ERROR_VALIDATION_FAILED_EXT;
10253  }
10254  }
10255  }
10256 
10257  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10258  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10259  {
10260  const VmaSuballocation& suballoc = suballocations2nd[i];
10261  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10262  {
10263  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10264  {
10265  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10266  return VK_ERROR_VALIDATION_FAILED_EXT;
10267  }
10268  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10269  {
10270  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10271  return VK_ERROR_VALIDATION_FAILED_EXT;
10272  }
10273  }
10274  }
10275 
10276  return VK_SUCCESS;
10277 }
10278 
10279 void VmaBlockMetadata_Linear::Alloc(
10280  const VmaAllocationRequest& request,
10281  VmaSuballocationType type,
10282  VkDeviceSize allocSize,
10283  VmaAllocation hAllocation)
10284 {
10285  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10286 
10287  switch(request.type)
10288  {
10289  case VmaAllocationRequestType::UpperAddress:
10290  {
10291  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10292  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10293  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10294  suballocations2nd.push_back(newSuballoc);
10295  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10296  }
10297  break;
10298  case VmaAllocationRequestType::EndOf1st:
10299  {
10300  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10301 
10302  VMA_ASSERT(suballocations1st.empty() ||
10303  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
10304  // Check if it fits before the end of the block.
10305  VMA_ASSERT(request.offset + allocSize <= GetSize());
10306 
10307  suballocations1st.push_back(newSuballoc);
10308  }
10309  break;
10310  case VmaAllocationRequestType::EndOf2nd:
10311  {
10312  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10313  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10314  VMA_ASSERT(!suballocations1st.empty() &&
10315  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
10316  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10317 
10318  switch(m_2ndVectorMode)
10319  {
10320  case SECOND_VECTOR_EMPTY:
10321  // First allocation from second part ring buffer.
10322  VMA_ASSERT(suballocations2nd.empty());
10323  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10324  break;
10325  case SECOND_VECTOR_RING_BUFFER:
10326  // 2-part ring buffer is already started.
10327  VMA_ASSERT(!suballocations2nd.empty());
10328  break;
10329  case SECOND_VECTOR_DOUBLE_STACK:
10330  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10331  break;
10332  default:
10333  VMA_ASSERT(0);
10334  }
10335 
10336  suballocations2nd.push_back(newSuballoc);
10337  }
10338  break;
10339  default:
10340  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10341  }
10342 
10343  m_SumFreeSize -= newSuballoc.size;
10344 }
10345 
10346 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10347 {
10348  FreeAtOffset(allocation->GetOffset());
10349 }
10350 
10351 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10352 {
10353  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10354  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10355 
10356  if(!suballocations1st.empty())
10357  {
10358  // First allocation: Mark it as next empty at the beginning.
10359  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10360  if(firstSuballoc.offset == offset)
10361  {
10362  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10363  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10364  m_SumFreeSize += firstSuballoc.size;
10365  ++m_1stNullItemsBeginCount;
10366  CleanupAfterFree();
10367  return;
10368  }
10369  }
10370 
10371  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10372  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10373  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10374  {
10375  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10376  if(lastSuballoc.offset == offset)
10377  {
10378  m_SumFreeSize += lastSuballoc.size;
10379  suballocations2nd.pop_back();
10380  CleanupAfterFree();
10381  return;
10382  }
10383  }
10384  // Last allocation in 1st vector.
10385  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10386  {
10387  VmaSuballocation& lastSuballoc = suballocations1st.back();
10388  if(lastSuballoc.offset == offset)
10389  {
10390  m_SumFreeSize += lastSuballoc.size;
10391  suballocations1st.pop_back();
10392  CleanupAfterFree();
10393  return;
10394  }
10395  }
10396 
10397  // Item from the middle of 1st vector.
10398  {
10399  VmaSuballocation refSuballoc;
10400  refSuballoc.offset = offset;
10401  // Rest of members stays uninitialized intentionally for better performance.
10402  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
10403  suballocations1st.begin() + m_1stNullItemsBeginCount,
10404  suballocations1st.end(),
10405  refSuballoc);
10406  if(it != suballocations1st.end())
10407  {
10408  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10409  it->hAllocation = VK_NULL_HANDLE;
10410  ++m_1stNullItemsMiddleCount;
10411  m_SumFreeSize += it->size;
10412  CleanupAfterFree();
10413  return;
10414  }
10415  }
10416 
10417  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10418  {
10419  // Item from the middle of 2nd vector.
10420  VmaSuballocation refSuballoc;
10421  refSuballoc.offset = offset;
10422  // Rest of members stays uninitialized intentionally for better performance.
10423  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10424  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
10425  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
10426  if(it != suballocations2nd.end())
10427  {
10428  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10429  it->hAllocation = VK_NULL_HANDLE;
10430  ++m_2ndNullItemsCount;
10431  m_SumFreeSize += it->size;
10432  CleanupAfterFree();
10433  return;
10434  }
10435  }
10436 
10437  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10438 }
10439 
10440 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10441 {
10442  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10443  const size_t suballocCount = AccessSuballocations1st().size();
10444  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10445 }
10446 
10447 void VmaBlockMetadata_Linear::CleanupAfterFree()
10448 {
10449  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10450  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10451 
10452  if(IsEmpty())
10453  {
10454  suballocations1st.clear();
10455  suballocations2nd.clear();
10456  m_1stNullItemsBeginCount = 0;
10457  m_1stNullItemsMiddleCount = 0;
10458  m_2ndNullItemsCount = 0;
10459  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10460  }
10461  else
10462  {
10463  const size_t suballoc1stCount = suballocations1st.size();
10464  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10465  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10466 
10467  // Find more null items at the beginning of 1st vector.
10468  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10469  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10470  {
10471  ++m_1stNullItemsBeginCount;
10472  --m_1stNullItemsMiddleCount;
10473  }
10474 
10475  // Find more null items at the end of 1st vector.
10476  while(m_1stNullItemsMiddleCount > 0 &&
10477  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10478  {
10479  --m_1stNullItemsMiddleCount;
10480  suballocations1st.pop_back();
10481  }
10482 
10483  // Find more null items at the end of 2nd vector.
10484  while(m_2ndNullItemsCount > 0 &&
10485  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10486  {
10487  --m_2ndNullItemsCount;
10488  suballocations2nd.pop_back();
10489  }
10490 
10491  // Find more null items at the beginning of 2nd vector.
10492  while(m_2ndNullItemsCount > 0 &&
10493  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
10494  {
10495  --m_2ndNullItemsCount;
10496  suballocations2nd.remove(0);
10497  }
10498 
10499  if(ShouldCompact1st())
10500  {
10501  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10502  size_t srcIndex = m_1stNullItemsBeginCount;
10503  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10504  {
10505  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10506  {
10507  ++srcIndex;
10508  }
10509  if(dstIndex != srcIndex)
10510  {
10511  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10512  }
10513  ++srcIndex;
10514  }
10515  suballocations1st.resize(nonNullItemCount);
10516  m_1stNullItemsBeginCount = 0;
10517  m_1stNullItemsMiddleCount = 0;
10518  }
10519 
10520  // 2nd vector became empty.
10521  if(suballocations2nd.empty())
10522  {
10523  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10524  }
10525 
10526  // 1st vector became empty.
10527  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10528  {
10529  suballocations1st.clear();
10530  m_1stNullItemsBeginCount = 0;
10531 
10532  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10533  {
10534  // Swap 1st with 2nd. Now 2nd is empty.
10535  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10536  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10537  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10538  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10539  {
10540  ++m_1stNullItemsBeginCount;
10541  --m_1stNullItemsMiddleCount;
10542  }
10543  m_2ndNullItemsCount = 0;
10544  m_1stVectorIndex ^= 1;
10545  }
10546  }
10547  }
10548 
10549  VMA_HEAVY_ASSERT(Validate());
10550 }
10551 
10552 
10554 // class VmaBlockMetadata_Buddy
10555 
10556 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10557  VmaBlockMetadata(hAllocator),
10558  m_Root(VMA_NULL),
10559  m_AllocationCount(0),
10560  m_FreeCount(1),
10561  m_SumFreeSize(0)
10562 {
10563  memset(m_FreeList, 0, sizeof(m_FreeList));
10564 }
10565 
10566 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10567 {
10568  DeleteNode(m_Root);
10569 }
10570 
10571 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10572 {
10573  VmaBlockMetadata::Init(size);
10574 
10575  m_UsableSize = VmaPrevPow2(size);
10576  m_SumFreeSize = m_UsableSize;
10577 
10578  // Calculate m_LevelCount.
10579  m_LevelCount = 1;
10580  while(m_LevelCount < MAX_LEVELS &&
10581  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10582  {
10583  ++m_LevelCount;
10584  }
10585 
10586  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10587  rootNode->offset = 0;
10588  rootNode->type = Node::TYPE_FREE;
10589  rootNode->parent = VMA_NULL;
10590  rootNode->buddy = VMA_NULL;
10591 
10592  m_Root = rootNode;
10593  AddToFreeListFront(0, rootNode);
10594 }
10595 
10596 bool VmaBlockMetadata_Buddy::Validate() const
10597 {
10598  // Validate tree.
10599  ValidationContext ctx;
10600  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10601  {
10602  VMA_VALIDATE(false && "ValidateNode failed.");
10603  }
10604  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10605  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10606 
10607  // Validate free node lists.
10608  for(uint32_t level = 0; level < m_LevelCount; ++level)
10609  {
10610  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10611  m_FreeList[level].front->free.prev == VMA_NULL);
10612 
10613  for(Node* node = m_FreeList[level].front;
10614  node != VMA_NULL;
10615  node = node->free.next)
10616  {
10617  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10618 
10619  if(node->free.next == VMA_NULL)
10620  {
10621  VMA_VALIDATE(m_FreeList[level].back == node);
10622  }
10623  else
10624  {
10625  VMA_VALIDATE(node->free.next->free.prev == node);
10626  }
10627  }
10628  }
10629 
10630  // Validate that free lists ar higher levels are empty.
10631  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10632  {
10633  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10634  }
10635 
10636  return true;
10637 }
10638 
10639 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10640 {
10641  for(uint32_t level = 0; level < m_LevelCount; ++level)
10642  {
10643  if(m_FreeList[level].front != VMA_NULL)
10644  {
10645  return LevelToNodeSize(level);
10646  }
10647  }
10648  return 0;
10649 }
10650 
10651 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10652 {
10653  const VkDeviceSize unusableSize = GetUnusableSize();
10654 
10655  outInfo.blockCount = 1;
10656 
10657  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10658  outInfo.usedBytes = outInfo.unusedBytes = 0;
10659 
10660  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10661  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10662  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10663 
10664  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10665 
10666  if(unusableSize > 0)
10667  {
10668  ++outInfo.unusedRangeCount;
10669  outInfo.unusedBytes += unusableSize;
10670  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10671  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10672  }
10673 }
10674 
10675 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10676 {
10677  const VkDeviceSize unusableSize = GetUnusableSize();
10678 
10679  inoutStats.size += GetSize();
10680  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10681  inoutStats.allocationCount += m_AllocationCount;
10682  inoutStats.unusedRangeCount += m_FreeCount;
10683  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10684 
10685  if(unusableSize > 0)
10686  {
10687  ++inoutStats.unusedRangeCount;
10688  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10689  }
10690 }
10691 
10692 #if VMA_STATS_STRING_ENABLED
10693 
10694 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10695 {
10696  // TODO optimize
10697  VmaStatInfo stat;
10698  CalcAllocationStatInfo(stat);
10699 
10700  PrintDetailedMap_Begin(
10701  json,
10702  stat.unusedBytes,
10703  stat.allocationCount,
10704  stat.unusedRangeCount);
10705 
10706  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10707 
10708  const VkDeviceSize unusableSize = GetUnusableSize();
10709  if(unusableSize > 0)
10710  {
10711  PrintDetailedMap_UnusedRange(json,
10712  m_UsableSize, // offset
10713  unusableSize); // size
10714  }
10715 
10716  PrintDetailedMap_End(json);
10717 }
10718 
10719 #endif // #if VMA_STATS_STRING_ENABLED
10720 
10721 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10722  uint32_t currentFrameIndex,
10723  uint32_t frameInUseCount,
10724  VkDeviceSize bufferImageGranularity,
10725  VkDeviceSize allocSize,
10726  VkDeviceSize allocAlignment,
10727  bool upperAddress,
10728  VmaSuballocationType allocType,
10729  bool canMakeOtherLost,
10730  uint32_t strategy,
10731  VmaAllocationRequest* pAllocationRequest)
10732 {
10733  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10734 
10735  // Simple way to respect bufferImageGranularity. May be optimized some day.
10736  // Whenever it might be an OPTIMAL image...
10737  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10738  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10739  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10740  {
10741  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10742  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10743  }
10744 
10745  if(allocSize > m_UsableSize)
10746  {
10747  return false;
10748  }
10749 
10750  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10751  for(uint32_t level = targetLevel + 1; level--; )
10752  {
10753  for(Node* freeNode = m_FreeList[level].front;
10754  freeNode != VMA_NULL;
10755  freeNode = freeNode->free.next)
10756  {
10757  if(freeNode->offset % allocAlignment == 0)
10758  {
10759  pAllocationRequest->type = VmaAllocationRequestType::Normal;
10760  pAllocationRequest->offset = freeNode->offset;
10761  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10762  pAllocationRequest->sumItemSize = 0;
10763  pAllocationRequest->itemsToMakeLostCount = 0;
10764  pAllocationRequest->customData = (void*)(uintptr_t)level;
10765  return true;
10766  }
10767  }
10768  }
10769 
10770  return false;
10771 }
10772 
10773 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10774  uint32_t currentFrameIndex,
10775  uint32_t frameInUseCount,
10776  VmaAllocationRequest* pAllocationRequest)
10777 {
10778  /*
10779  Lost allocations are not supported in buddy allocator at the moment.
10780  Support might be added in the future.
10781  */
10782  return pAllocationRequest->itemsToMakeLostCount == 0;
10783 }
10784 
10785 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10786 {
10787  /*
10788  Lost allocations are not supported in buddy allocator at the moment.
10789  Support might be added in the future.
10790  */
10791  return 0;
10792 }
10793 
10794 void VmaBlockMetadata_Buddy::Alloc(
10795  const VmaAllocationRequest& request,
10796  VmaSuballocationType type,
10797  VkDeviceSize allocSize,
10798  VmaAllocation hAllocation)
10799 {
10800  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
10801 
10802  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10803  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10804 
10805  Node* currNode = m_FreeList[currLevel].front;
10806  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10807  while(currNode->offset != request.offset)
10808  {
10809  currNode = currNode->free.next;
10810  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10811  }
10812 
10813  // Go down, splitting free nodes.
10814  while(currLevel < targetLevel)
10815  {
10816  // currNode is already first free node at currLevel.
10817  // Remove it from list of free nodes at this currLevel.
10818  RemoveFromFreeList(currLevel, currNode);
10819 
10820  const uint32_t childrenLevel = currLevel + 1;
10821 
10822  // Create two free sub-nodes.
10823  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10824  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10825 
10826  leftChild->offset = currNode->offset;
10827  leftChild->type = Node::TYPE_FREE;
10828  leftChild->parent = currNode;
10829  leftChild->buddy = rightChild;
10830 
10831  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10832  rightChild->type = Node::TYPE_FREE;
10833  rightChild->parent = currNode;
10834  rightChild->buddy = leftChild;
10835 
10836  // Convert current currNode to split type.
10837  currNode->type = Node::TYPE_SPLIT;
10838  currNode->split.leftChild = leftChild;
10839 
10840  // Add child nodes to free list. Order is important!
10841  AddToFreeListFront(childrenLevel, rightChild);
10842  AddToFreeListFront(childrenLevel, leftChild);
10843 
10844  ++m_FreeCount;
10845  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
10846  ++currLevel;
10847  currNode = m_FreeList[currLevel].front;
10848 
10849  /*
10850  We can be sure that currNode, as left child of node previously split,
10851  also fullfills the alignment requirement.
10852  */
10853  }
10854 
10855  // Remove from free list.
10856  VMA_ASSERT(currLevel == targetLevel &&
10857  currNode != VMA_NULL &&
10858  currNode->type == Node::TYPE_FREE);
10859  RemoveFromFreeList(currLevel, currNode);
10860 
10861  // Convert to allocation node.
10862  currNode->type = Node::TYPE_ALLOCATION;
10863  currNode->allocation.alloc = hAllocation;
10864 
10865  ++m_AllocationCount;
10866  --m_FreeCount;
10867  m_SumFreeSize -= allocSize;
10868 }
10869 
10870 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10871 {
10872  if(node->type == Node::TYPE_SPLIT)
10873  {
10874  DeleteNode(node->split.leftChild->buddy);
10875  DeleteNode(node->split.leftChild);
10876  }
10877 
10878  vma_delete(GetAllocationCallbacks(), node);
10879 }
10880 
10881 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
10882 {
10883  VMA_VALIDATE(level < m_LevelCount);
10884  VMA_VALIDATE(curr->parent == parent);
10885  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10886  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10887  switch(curr->type)
10888  {
10889  case Node::TYPE_FREE:
10890  // curr->free.prev, next are validated separately.
10891  ctx.calculatedSumFreeSize += levelNodeSize;
10892  ++ctx.calculatedFreeCount;
10893  break;
10894  case Node::TYPE_ALLOCATION:
10895  ++ctx.calculatedAllocationCount;
10896  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10897  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10898  break;
10899  case Node::TYPE_SPLIT:
10900  {
10901  const uint32_t childrenLevel = level + 1;
10902  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10903  const Node* const leftChild = curr->split.leftChild;
10904  VMA_VALIDATE(leftChild != VMA_NULL);
10905  VMA_VALIDATE(leftChild->offset == curr->offset);
10906  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10907  {
10908  VMA_VALIDATE(false && "ValidateNode for left child failed.");
10909  }
10910  const Node* const rightChild = leftChild->buddy;
10911  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10912  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10913  {
10914  VMA_VALIDATE(false && "ValidateNode for right child failed.");
10915  }
10916  }
10917  break;
10918  default:
10919  return false;
10920  }
10921 
10922  return true;
10923 }
10924 
10925 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
10926 {
10927  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
10928  uint32_t level = 0;
10929  VkDeviceSize currLevelNodeSize = m_UsableSize;
10930  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10931  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10932  {
10933  ++level;
10934  currLevelNodeSize = nextLevelNodeSize;
10935  nextLevelNodeSize = currLevelNodeSize >> 1;
10936  }
10937  return level;
10938 }
10939 
10940 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
10941 {
10942  // Find node and level.
10943  Node* node = m_Root;
10944  VkDeviceSize nodeOffset = 0;
10945  uint32_t level = 0;
10946  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
10947  while(node->type == Node::TYPE_SPLIT)
10948  {
10949  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
10950  if(offset < nodeOffset + nextLevelSize)
10951  {
10952  node = node->split.leftChild;
10953  }
10954  else
10955  {
10956  node = node->split.leftChild->buddy;
10957  nodeOffset += nextLevelSize;
10958  }
10959  ++level;
10960  levelNodeSize = nextLevelSize;
10961  }
10962 
10963  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
10964  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
10965 
10966  ++m_FreeCount;
10967  --m_AllocationCount;
10968  m_SumFreeSize += alloc->GetSize();
10969 
10970  node->type = Node::TYPE_FREE;
10971 
10972  // Join free nodes if possible.
10973  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
10974  {
10975  RemoveFromFreeList(level, node->buddy);
10976  Node* const parent = node->parent;
10977 
10978  vma_delete(GetAllocationCallbacks(), node->buddy);
10979  vma_delete(GetAllocationCallbacks(), node);
10980  parent->type = Node::TYPE_FREE;
10981 
10982  node = parent;
10983  --level;
10984  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
10985  --m_FreeCount;
10986  }
10987 
10988  AddToFreeListFront(level, node);
10989 }
10990 
10991 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
10992 {
10993  switch(node->type)
10994  {
10995  case Node::TYPE_FREE:
10996  ++outInfo.unusedRangeCount;
10997  outInfo.unusedBytes += levelNodeSize;
10998  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
10999  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
11000  break;
11001  case Node::TYPE_ALLOCATION:
11002  {
11003  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11004  ++outInfo.allocationCount;
11005  outInfo.usedBytes += allocSize;
11006  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
11007  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
11008 
11009  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
11010  if(unusedRangeSize > 0)
11011  {
11012  ++outInfo.unusedRangeCount;
11013  outInfo.unusedBytes += unusedRangeSize;
11014  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
11015  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
11016  }
11017  }
11018  break;
11019  case Node::TYPE_SPLIT:
11020  {
11021  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11022  const Node* const leftChild = node->split.leftChild;
11023  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
11024  const Node* const rightChild = leftChild->buddy;
11025  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
11026  }
11027  break;
11028  default:
11029  VMA_ASSERT(0);
11030  }
11031 }
11032 
11033 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
11034 {
11035  VMA_ASSERT(node->type == Node::TYPE_FREE);
11036 
11037  // List is empty.
11038  Node* const frontNode = m_FreeList[level].front;
11039  if(frontNode == VMA_NULL)
11040  {
11041  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
11042  node->free.prev = node->free.next = VMA_NULL;
11043  m_FreeList[level].front = m_FreeList[level].back = node;
11044  }
11045  else
11046  {
11047  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
11048  node->free.prev = VMA_NULL;
11049  node->free.next = frontNode;
11050  frontNode->free.prev = node;
11051  m_FreeList[level].front = node;
11052  }
11053 }
11054 
11055 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
11056 {
11057  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
11058 
11059  // It is at the front.
11060  if(node->free.prev == VMA_NULL)
11061  {
11062  VMA_ASSERT(m_FreeList[level].front == node);
11063  m_FreeList[level].front = node->free.next;
11064  }
11065  else
11066  {
11067  Node* const prevFreeNode = node->free.prev;
11068  VMA_ASSERT(prevFreeNode->free.next == node);
11069  prevFreeNode->free.next = node->free.next;
11070  }
11071 
11072  // It is at the back.
11073  if(node->free.next == VMA_NULL)
11074  {
11075  VMA_ASSERT(m_FreeList[level].back == node);
11076  m_FreeList[level].back = node->free.prev;
11077  }
11078  else
11079  {
11080  Node* const nextFreeNode = node->free.next;
11081  VMA_ASSERT(nextFreeNode->free.prev == node);
11082  nextFreeNode->free.prev = node->free.prev;
11083  }
11084 }
11085 
11086 #if VMA_STATS_STRING_ENABLED
11087 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
11088 {
11089  switch(node->type)
11090  {
11091  case Node::TYPE_FREE:
11092  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
11093  break;
11094  case Node::TYPE_ALLOCATION:
11095  {
11096  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
11097  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11098  if(allocSize < levelNodeSize)
11099  {
11100  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
11101  }
11102  }
11103  break;
11104  case Node::TYPE_SPLIT:
11105  {
11106  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11107  const Node* const leftChild = node->split.leftChild;
11108  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
11109  const Node* const rightChild = leftChild->buddy;
11110  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
11111  }
11112  break;
11113  default:
11114  VMA_ASSERT(0);
11115  }
11116 }
11117 #endif // #if VMA_STATS_STRING_ENABLED
11118 
11119 
11121 // class VmaDeviceMemoryBlock
11122 
11123 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
11124  m_pMetadata(VMA_NULL),
11125  m_MemoryTypeIndex(UINT32_MAX),
11126  m_Id(0),
11127  m_hMemory(VK_NULL_HANDLE),
11128  m_MapCount(0),
11129  m_pMappedData(VMA_NULL)
11130 {
11131 }
11132 
11133 void VmaDeviceMemoryBlock::Init(
11134  VmaAllocator hAllocator,
11135  VmaPool hParentPool,
11136  uint32_t newMemoryTypeIndex,
11137  VkDeviceMemory newMemory,
11138  VkDeviceSize newSize,
11139  uint32_t id,
11140  uint32_t algorithm)
11141 {
11142  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11143 
11144  m_hParentPool = hParentPool;
11145  m_MemoryTypeIndex = newMemoryTypeIndex;
11146  m_Id = id;
11147  m_hMemory = newMemory;
11148 
11149  switch(algorithm)
11150  {
11152  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
11153  break;
11155  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11156  break;
11157  default:
11158  VMA_ASSERT(0);
11159  // Fall-through.
11160  case 0:
11161  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11162  }
11163  m_pMetadata->Init(newSize);
11164 }
11165 
11166 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11167 {
11168  // This is the most important assert in the entire library.
11169  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11170  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11171 
11172  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11173  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11174  m_hMemory = VK_NULL_HANDLE;
11175 
11176  vma_delete(allocator, m_pMetadata);
11177  m_pMetadata = VMA_NULL;
11178 }
11179 
11180 bool VmaDeviceMemoryBlock::Validate() const
11181 {
11182  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11183  (m_pMetadata->GetSize() != 0));
11184 
11185  return m_pMetadata->Validate();
11186 }
11187 
11188 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11189 {
11190  void* pData = nullptr;
11191  VkResult res = Map(hAllocator, 1, &pData);
11192  if(res != VK_SUCCESS)
11193  {
11194  return res;
11195  }
11196 
11197  res = m_pMetadata->CheckCorruption(pData);
11198 
11199  Unmap(hAllocator, 1);
11200 
11201  return res;
11202 }
11203 
11204 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11205 {
11206  if(count == 0)
11207  {
11208  return VK_SUCCESS;
11209  }
11210 
11211  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11212  if(m_MapCount != 0)
11213  {
11214  m_MapCount += count;
11215  VMA_ASSERT(m_pMappedData != VMA_NULL);
11216  if(ppData != VMA_NULL)
11217  {
11218  *ppData = m_pMappedData;
11219  }
11220  return VK_SUCCESS;
11221  }
11222  else
11223  {
11224  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11225  hAllocator->m_hDevice,
11226  m_hMemory,
11227  0, // offset
11228  VK_WHOLE_SIZE,
11229  0, // flags
11230  &m_pMappedData);
11231  if(result == VK_SUCCESS)
11232  {
11233  if(ppData != VMA_NULL)
11234  {
11235  *ppData = m_pMappedData;
11236  }
11237  m_MapCount = count;
11238  }
11239  return result;
11240  }
11241 }
11242 
11243 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11244 {
11245  if(count == 0)
11246  {
11247  return;
11248  }
11249 
11250  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11251  if(m_MapCount >= count)
11252  {
11253  m_MapCount -= count;
11254  if(m_MapCount == 0)
11255  {
11256  m_pMappedData = VMA_NULL;
11257  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11258  }
11259  }
11260  else
11261  {
11262  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11263  }
11264 }
11265 
11266 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11267 {
11268  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11269  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11270 
11271  void* pData;
11272  VkResult res = Map(hAllocator, 1, &pData);
11273  if(res != VK_SUCCESS)
11274  {
11275  return res;
11276  }
11277 
11278  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11279  VmaWriteMagicValue(pData, allocOffset + allocSize);
11280 
11281  Unmap(hAllocator, 1);
11282 
11283  return VK_SUCCESS;
11284 }
11285 
11286 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11287 {
11288  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11289  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11290 
11291  void* pData;
11292  VkResult res = Map(hAllocator, 1, &pData);
11293  if(res != VK_SUCCESS)
11294  {
11295  return res;
11296  }
11297 
11298  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11299  {
11300  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11301  }
11302  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11303  {
11304  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11305  }
11306 
11307  Unmap(hAllocator, 1);
11308 
11309  return VK_SUCCESS;
11310 }
11311 
11312 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11313  const VmaAllocator hAllocator,
11314  const VmaAllocation hAllocation,
11315  VkBuffer hBuffer)
11316 {
11317  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11318  hAllocation->GetBlock() == this);
11319  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11320  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11321  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
11322  hAllocator->m_hDevice,
11323  hBuffer,
11324  m_hMemory,
11325  hAllocation->GetOffset());
11326 }
11327 
11328 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11329  const VmaAllocator hAllocator,
11330  const VmaAllocation hAllocation,
11331  VkImage hImage)
11332 {
11333  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11334  hAllocation->GetBlock() == this);
11335  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11336  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11337  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
11338  hAllocator->m_hDevice,
11339  hImage,
11340  m_hMemory,
11341  hAllocation->GetOffset());
11342 }
11343 
11344 static void InitStatInfo(VmaStatInfo& outInfo)
11345 {
11346  memset(&outInfo, 0, sizeof(outInfo));
11347  outInfo.allocationSizeMin = UINT64_MAX;
11348  outInfo.unusedRangeSizeMin = UINT64_MAX;
11349 }
11350 
11351 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11352 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11353 {
11354  inoutInfo.blockCount += srcInfo.blockCount;
11355  inoutInfo.allocationCount += srcInfo.allocationCount;
11356  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11357  inoutInfo.usedBytes += srcInfo.usedBytes;
11358  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11359  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11360  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11361  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11362  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11363 }
11364 
11365 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11366 {
11367  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11368  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11369  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11370  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11371 }
11372 
11373 VmaPool_T::VmaPool_T(
11374  VmaAllocator hAllocator,
11375  const VmaPoolCreateInfo& createInfo,
11376  VkDeviceSize preferredBlockSize) :
11377  m_BlockVector(
11378  hAllocator,
11379  this, // hParentPool
11380  createInfo.memoryTypeIndex,
11381  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11382  createInfo.minBlockCount,
11383  createInfo.maxBlockCount,
11384  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11385  createInfo.frameInUseCount,
11386  true, // isCustomPool
11387  createInfo.blockSize != 0, // explicitBlockSize
11388  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11389  m_Id(0)
11390 {
11391 }
11392 
11393 VmaPool_T::~VmaPool_T()
11394 {
11395 }
11396 
11397 #if VMA_STATS_STRING_ENABLED
11398 
11399 #endif // #if VMA_STATS_STRING_ENABLED
11400 
11401 VmaBlockVector::VmaBlockVector(
11402  VmaAllocator hAllocator,
11403  VmaPool hParentPool,
11404  uint32_t memoryTypeIndex,
11405  VkDeviceSize preferredBlockSize,
11406  size_t minBlockCount,
11407  size_t maxBlockCount,
11408  VkDeviceSize bufferImageGranularity,
11409  uint32_t frameInUseCount,
11410  bool isCustomPool,
11411  bool explicitBlockSize,
11412  uint32_t algorithm) :
11413  m_hAllocator(hAllocator),
11414  m_hParentPool(hParentPool),
11415  m_MemoryTypeIndex(memoryTypeIndex),
11416  m_PreferredBlockSize(preferredBlockSize),
11417  m_MinBlockCount(minBlockCount),
11418  m_MaxBlockCount(maxBlockCount),
11419  m_BufferImageGranularity(bufferImageGranularity),
11420  m_FrameInUseCount(frameInUseCount),
11421  m_IsCustomPool(isCustomPool),
11422  m_ExplicitBlockSize(explicitBlockSize),
11423  m_Algorithm(algorithm),
11424  m_HasEmptyBlock(false),
11425  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11426  m_NextBlockId(0)
11427 {
11428 }
11429 
11430 VmaBlockVector::~VmaBlockVector()
11431 {
11432  for(size_t i = m_Blocks.size(); i--; )
11433  {
11434  m_Blocks[i]->Destroy(m_hAllocator);
11435  vma_delete(m_hAllocator, m_Blocks[i]);
11436  }
11437 }
11438 
11439 VkResult VmaBlockVector::CreateMinBlocks()
11440 {
11441  for(size_t i = 0; i < m_MinBlockCount; ++i)
11442  {
11443  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11444  if(res != VK_SUCCESS)
11445  {
11446  return res;
11447  }
11448  }
11449  return VK_SUCCESS;
11450 }
11451 
11452 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11453 {
11454  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11455 
11456  const size_t blockCount = m_Blocks.size();
11457 
11458  pStats->size = 0;
11459  pStats->unusedSize = 0;
11460  pStats->allocationCount = 0;
11461  pStats->unusedRangeCount = 0;
11462  pStats->unusedRangeSizeMax = 0;
11463  pStats->blockCount = blockCount;
11464 
11465  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11466  {
11467  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11468  VMA_ASSERT(pBlock);
11469  VMA_HEAVY_ASSERT(pBlock->Validate());
11470  pBlock->m_pMetadata->AddPoolStats(*pStats);
11471  }
11472 }
11473 
11474 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11475 {
11476  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11477  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11478  (VMA_DEBUG_MARGIN > 0) &&
11479  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
11480  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11481 }
11482 
11483 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11484 
11485 VkResult VmaBlockVector::Allocate(
11486  uint32_t currentFrameIndex,
11487  VkDeviceSize size,
11488  VkDeviceSize alignment,
11489  const VmaAllocationCreateInfo& createInfo,
11490  VmaSuballocationType suballocType,
11491  size_t allocationCount,
11492  VmaAllocation* pAllocations)
11493 {
11494  size_t allocIndex;
11495  VkResult res = VK_SUCCESS;
11496 
11497  if(IsCorruptionDetectionEnabled())
11498  {
11499  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11500  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11501  }
11502 
11503  {
11504  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11505  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11506  {
11507  res = AllocatePage(
11508  currentFrameIndex,
11509  size,
11510  alignment,
11511  createInfo,
11512  suballocType,
11513  pAllocations + allocIndex);
11514  if(res != VK_SUCCESS)
11515  {
11516  break;
11517  }
11518  }
11519  }
11520 
11521  if(res != VK_SUCCESS)
11522  {
11523  // Free all already created allocations.
11524  while(allocIndex--)
11525  {
11526  Free(pAllocations[allocIndex]);
11527  }
11528  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11529  }
11530 
11531  return res;
11532 }
11533 
11534 VkResult VmaBlockVector::AllocatePage(
11535  uint32_t currentFrameIndex,
11536  VkDeviceSize size,
11537  VkDeviceSize alignment,
11538  const VmaAllocationCreateInfo& createInfo,
11539  VmaSuballocationType suballocType,
11540  VmaAllocation* pAllocation)
11541 {
11542  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11543  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11544  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11545  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11546  const bool canCreateNewBlock =
11547  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11548  (m_Blocks.size() < m_MaxBlockCount);
11549  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11550 
11551  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11552  // Which in turn is available only when maxBlockCount = 1.
11553  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11554  {
11555  canMakeOtherLost = false;
11556  }
11557 
11558  // Upper address can only be used with linear allocator and within single memory block.
11559  if(isUpperAddress &&
11560  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11561  {
11562  return VK_ERROR_FEATURE_NOT_PRESENT;
11563  }
11564 
11565  // Validate strategy.
11566  switch(strategy)
11567  {
11568  case 0:
11570  break;
11574  break;
11575  default:
11576  return VK_ERROR_FEATURE_NOT_PRESENT;
11577  }
11578 
11579  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11580  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11581  {
11582  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11583  }
11584 
11585  /*
11586  Under certain condition, this whole section can be skipped for optimization, so
11587  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11588  e.g. for custom pools with linear algorithm.
11589  */
11590  if(!canMakeOtherLost || canCreateNewBlock)
11591  {
11592  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11593  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11595 
11596  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11597  {
11598  // Use only last block.
11599  if(!m_Blocks.empty())
11600  {
11601  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11602  VMA_ASSERT(pCurrBlock);
11603  VkResult res = AllocateFromBlock(
11604  pCurrBlock,
11605  currentFrameIndex,
11606  size,
11607  alignment,
11608  allocFlagsCopy,
11609  createInfo.pUserData,
11610  suballocType,
11611  strategy,
11612  pAllocation);
11613  if(res == VK_SUCCESS)
11614  {
11615  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11616  return VK_SUCCESS;
11617  }
11618  }
11619  }
11620  else
11621  {
11623  {
11624  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11625  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11626  {
11627  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11628  VMA_ASSERT(pCurrBlock);
11629  VkResult res = AllocateFromBlock(
11630  pCurrBlock,
11631  currentFrameIndex,
11632  size,
11633  alignment,
11634  allocFlagsCopy,
11635  createInfo.pUserData,
11636  suballocType,
11637  strategy,
11638  pAllocation);
11639  if(res == VK_SUCCESS)
11640  {
11641  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11642  return VK_SUCCESS;
11643  }
11644  }
11645  }
11646  else // WORST_FIT, FIRST_FIT
11647  {
11648  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11649  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11650  {
11651  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11652  VMA_ASSERT(pCurrBlock);
11653  VkResult res = AllocateFromBlock(
11654  pCurrBlock,
11655  currentFrameIndex,
11656  size,
11657  alignment,
11658  allocFlagsCopy,
11659  createInfo.pUserData,
11660  suballocType,
11661  strategy,
11662  pAllocation);
11663  if(res == VK_SUCCESS)
11664  {
11665  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11666  return VK_SUCCESS;
11667  }
11668  }
11669  }
11670  }
11671 
11672  // 2. Try to create new block.
11673  if(canCreateNewBlock)
11674  {
11675  // Calculate optimal size for new block.
11676  VkDeviceSize newBlockSize = m_PreferredBlockSize;
11677  uint32_t newBlockSizeShift = 0;
11678  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11679 
11680  if(!m_ExplicitBlockSize)
11681  {
11682  // Allocate 1/8, 1/4, 1/2 as first blocks.
11683  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11684  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11685  {
11686  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11687  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11688  {
11689  newBlockSize = smallerNewBlockSize;
11690  ++newBlockSizeShift;
11691  }
11692  else
11693  {
11694  break;
11695  }
11696  }
11697  }
11698 
11699  size_t newBlockIndex = 0;
11700  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11701  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11702  if(!m_ExplicitBlockSize)
11703  {
11704  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11705  {
11706  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11707  if(smallerNewBlockSize >= size)
11708  {
11709  newBlockSize = smallerNewBlockSize;
11710  ++newBlockSizeShift;
11711  res = CreateBlock(newBlockSize, &newBlockIndex);
11712  }
11713  else
11714  {
11715  break;
11716  }
11717  }
11718  }
11719 
11720  if(res == VK_SUCCESS)
11721  {
11722  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11723  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11724 
11725  res = AllocateFromBlock(
11726  pBlock,
11727  currentFrameIndex,
11728  size,
11729  alignment,
11730  allocFlagsCopy,
11731  createInfo.pUserData,
11732  suballocType,
11733  strategy,
11734  pAllocation);
11735  if(res == VK_SUCCESS)
11736  {
11737  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
11738  return VK_SUCCESS;
11739  }
11740  else
11741  {
11742  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11743  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11744  }
11745  }
11746  }
11747  }
11748 
11749  // 3. Try to allocate from existing blocks with making other allocations lost.
11750  if(canMakeOtherLost)
11751  {
11752  uint32_t tryIndex = 0;
11753  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11754  {
11755  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11756  VmaAllocationRequest bestRequest = {};
11757  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11758 
11759  // 1. Search existing allocations.
11761  {
11762  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11763  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11764  {
11765  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11766  VMA_ASSERT(pCurrBlock);
11767  VmaAllocationRequest currRequest = {};
11768  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11769  currentFrameIndex,
11770  m_FrameInUseCount,
11771  m_BufferImageGranularity,
11772  size,
11773  alignment,
11774  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11775  suballocType,
11776  canMakeOtherLost,
11777  strategy,
11778  &currRequest))
11779  {
11780  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11781  if(pBestRequestBlock == VMA_NULL ||
11782  currRequestCost < bestRequestCost)
11783  {
11784  pBestRequestBlock = pCurrBlock;
11785  bestRequest = currRequest;
11786  bestRequestCost = currRequestCost;
11787 
11788  if(bestRequestCost == 0)
11789  {
11790  break;
11791  }
11792  }
11793  }
11794  }
11795  }
11796  else // WORST_FIT, FIRST_FIT
11797  {
11798  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11799  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11800  {
11801  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11802  VMA_ASSERT(pCurrBlock);
11803  VmaAllocationRequest currRequest = {};
11804  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11805  currentFrameIndex,
11806  m_FrameInUseCount,
11807  m_BufferImageGranularity,
11808  size,
11809  alignment,
11810  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11811  suballocType,
11812  canMakeOtherLost,
11813  strategy,
11814  &currRequest))
11815  {
11816  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11817  if(pBestRequestBlock == VMA_NULL ||
11818  currRequestCost < bestRequestCost ||
11820  {
11821  pBestRequestBlock = pCurrBlock;
11822  bestRequest = currRequest;
11823  bestRequestCost = currRequestCost;
11824 
11825  if(bestRequestCost == 0 ||
11827  {
11828  break;
11829  }
11830  }
11831  }
11832  }
11833  }
11834 
11835  if(pBestRequestBlock != VMA_NULL)
11836  {
11837  if(mapped)
11838  {
11839  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11840  if(res != VK_SUCCESS)
11841  {
11842  return res;
11843  }
11844  }
11845 
11846  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11847  currentFrameIndex,
11848  m_FrameInUseCount,
11849  &bestRequest))
11850  {
11851  // We no longer have an empty Allocation.
11852  if(pBestRequestBlock->m_pMetadata->IsEmpty())
11853  {
11854  m_HasEmptyBlock = false;
11855  }
11856  // Allocate from this pBlock.
11857  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
11858  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
11859  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
11860  (*pAllocation)->InitBlockAllocation(
11861  pBestRequestBlock,
11862  bestRequest.offset,
11863  alignment,
11864  size,
11865  suballocType,
11866  mapped,
11867  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11868  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11869  VMA_DEBUG_LOG(" Returned from existing block");
11870  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
11871  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11872  {
11873  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11874  }
11875  if(IsCorruptionDetectionEnabled())
11876  {
11877  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11878  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11879  }
11880  return VK_SUCCESS;
11881  }
11882  // else: Some allocations must have been touched while we are here. Next try.
11883  }
11884  else
11885  {
11886  // Could not find place in any of the blocks - break outer loop.
11887  break;
11888  }
11889  }
11890  /* Maximum number of tries exceeded - a very unlike event when many other
11891  threads are simultaneously touching allocations making it impossible to make
11892  lost at the same time as we try to allocate. */
11893  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11894  {
11895  return VK_ERROR_TOO_MANY_OBJECTS;
11896  }
11897  }
11898 
11899  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11900 }
11901 
11902 void VmaBlockVector::Free(
11903  VmaAllocation hAllocation)
11904 {
11905  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11906 
11907  // Scope for lock.
11908  {
11909  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11910 
11911  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11912 
11913  if(IsCorruptionDetectionEnabled())
11914  {
11915  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11916  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
11917  }
11918 
11919  if(hAllocation->IsPersistentMap())
11920  {
11921  pBlock->Unmap(m_hAllocator, 1);
11922  }
11923 
11924  pBlock->m_pMetadata->Free(hAllocation);
11925  VMA_HEAVY_ASSERT(pBlock->Validate());
11926 
11927  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
11928 
11929  // pBlock became empty after this deallocation.
11930  if(pBlock->m_pMetadata->IsEmpty())
11931  {
11932  // Already has empty Allocation. We don't want to have two, so delete this one.
11933  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11934  {
11935  pBlockToDelete = pBlock;
11936  Remove(pBlock);
11937  }
11938  // We now have first empty block.
11939  else
11940  {
11941  m_HasEmptyBlock = true;
11942  }
11943  }
11944  // pBlock didn't become empty, but we have another empty block - find and free that one.
11945  // (This is optional, heuristics.)
11946  else if(m_HasEmptyBlock)
11947  {
11948  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
11949  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
11950  {
11951  pBlockToDelete = pLastBlock;
11952  m_Blocks.pop_back();
11953  m_HasEmptyBlock = false;
11954  }
11955  }
11956 
11957  IncrementallySortBlocks();
11958  }
11959 
11960  // Destruction of a free Allocation. Deferred until this point, outside of mutex
11961  // lock, for performance reason.
11962  if(pBlockToDelete != VMA_NULL)
11963  {
11964  VMA_DEBUG_LOG(" Deleted empty allocation");
11965  pBlockToDelete->Destroy(m_hAllocator);
11966  vma_delete(m_hAllocator, pBlockToDelete);
11967  }
11968 }
11969 
11970 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
11971 {
11972  VkDeviceSize result = 0;
11973  for(size_t i = m_Blocks.size(); i--; )
11974  {
11975  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
11976  if(result >= m_PreferredBlockSize)
11977  {
11978  break;
11979  }
11980  }
11981  return result;
11982 }
11983 
11984 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
11985 {
11986  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11987  {
11988  if(m_Blocks[blockIndex] == pBlock)
11989  {
11990  VmaVectorRemove(m_Blocks, blockIndex);
11991  return;
11992  }
11993  }
11994  VMA_ASSERT(0);
11995 }
11996 
11997 void VmaBlockVector::IncrementallySortBlocks()
11998 {
11999  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12000  {
12001  // Bubble sort only until first swap.
12002  for(size_t i = 1; i < m_Blocks.size(); ++i)
12003  {
12004  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
12005  {
12006  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
12007  return;
12008  }
12009  }
12010  }
12011 }
12012 
12013 VkResult VmaBlockVector::AllocateFromBlock(
12014  VmaDeviceMemoryBlock* pBlock,
12015  uint32_t currentFrameIndex,
12016  VkDeviceSize size,
12017  VkDeviceSize alignment,
12018  VmaAllocationCreateFlags allocFlags,
12019  void* pUserData,
12020  VmaSuballocationType suballocType,
12021  uint32_t strategy,
12022  VmaAllocation* pAllocation)
12023 {
12024  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
12025  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12026  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12027  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12028 
12029  VmaAllocationRequest currRequest = {};
12030  if(pBlock->m_pMetadata->CreateAllocationRequest(
12031  currentFrameIndex,
12032  m_FrameInUseCount,
12033  m_BufferImageGranularity,
12034  size,
12035  alignment,
12036  isUpperAddress,
12037  suballocType,
12038  false, // canMakeOtherLost
12039  strategy,
12040  &currRequest))
12041  {
12042  // Allocate from pCurrBlock.
12043  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
12044 
12045  if(mapped)
12046  {
12047  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12048  if(res != VK_SUCCESS)
12049  {
12050  return res;
12051  }
12052  }
12053 
12054  // We no longer have an empty Allocation.
12055  if(pBlock->m_pMetadata->IsEmpty())
12056  {
12057  m_HasEmptyBlock = false;
12058  }
12059 
12060  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
12061  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
12062  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
12063  (*pAllocation)->InitBlockAllocation(
12064  pBlock,
12065  currRequest.offset,
12066  alignment,
12067  size,
12068  suballocType,
12069  mapped,
12070  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12071  VMA_HEAVY_ASSERT(pBlock->Validate());
12072  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12073  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12074  {
12075  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12076  }
12077  if(IsCorruptionDetectionEnabled())
12078  {
12079  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
12080  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12081  }
12082  return VK_SUCCESS;
12083  }
12084  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12085 }
12086 
12087 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12088 {
12089  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12090  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12091  allocInfo.allocationSize = blockSize;
12092  VkDeviceMemory mem = VK_NULL_HANDLE;
12093  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12094  if(res < 0)
12095  {
12096  return res;
12097  }
12098 
12099  // New VkDeviceMemory successfully created.
12100 
12101  // Create new Allocation for it.
12102  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12103  pBlock->Init(
12104  m_hAllocator,
12105  m_hParentPool,
12106  m_MemoryTypeIndex,
12107  mem,
12108  allocInfo.allocationSize,
12109  m_NextBlockId++,
12110  m_Algorithm);
12111 
12112  m_Blocks.push_back(pBlock);
12113  if(pNewBlockIndex != VMA_NULL)
12114  {
12115  *pNewBlockIndex = m_Blocks.size() - 1;
12116  }
12117 
12118  return VK_SUCCESS;
12119 }
12120 
12121 void VmaBlockVector::ApplyDefragmentationMovesCpu(
12122  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12123  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
12124 {
12125  const size_t blockCount = m_Blocks.size();
12126  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
12127 
12128  enum BLOCK_FLAG
12129  {
12130  BLOCK_FLAG_USED = 0x00000001,
12131  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
12132  };
12133 
12134  struct BlockInfo
12135  {
12136  uint32_t flags;
12137  void* pMappedData;
12138  };
12139  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
12140  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
12141  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
12142 
12143  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12144  const size_t moveCount = moves.size();
12145  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12146  {
12147  const VmaDefragmentationMove& move = moves[moveIndex];
12148  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
12149  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
12150  }
12151 
12152  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12153 
12154  // Go over all blocks. Get mapped pointer or map if necessary.
12155  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12156  {
12157  BlockInfo& currBlockInfo = blockInfo[blockIndex];
12158  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12159  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12160  {
12161  currBlockInfo.pMappedData = pBlock->GetMappedData();
12162  // It is not originally mapped - map it.
12163  if(currBlockInfo.pMappedData == VMA_NULL)
12164  {
12165  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12166  if(pDefragCtx->res == VK_SUCCESS)
12167  {
12168  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12169  }
12170  }
12171  }
12172  }
12173 
12174  // Go over all moves. Do actual data transfer.
12175  if(pDefragCtx->res == VK_SUCCESS)
12176  {
12177  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12178  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12179 
12180  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12181  {
12182  const VmaDefragmentationMove& move = moves[moveIndex];
12183 
12184  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12185  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12186 
12187  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12188 
12189  // Invalidate source.
12190  if(isNonCoherent)
12191  {
12192  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12193  memRange.memory = pSrcBlock->GetDeviceMemory();
12194  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12195  memRange.size = VMA_MIN(
12196  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12197  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12198  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12199  }
12200 
12201  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12202  memmove(
12203  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12204  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12205  static_cast<size_t>(move.size));
12206 
12207  if(IsCorruptionDetectionEnabled())
12208  {
12209  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12210  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12211  }
12212 
12213  // Flush destination.
12214  if(isNonCoherent)
12215  {
12216  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12217  memRange.memory = pDstBlock->GetDeviceMemory();
12218  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12219  memRange.size = VMA_MIN(
12220  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12221  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12222  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12223  }
12224  }
12225  }
12226 
12227  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12228  // Regardless of pCtx->res == VK_SUCCESS.
12229  for(size_t blockIndex = blockCount; blockIndex--; )
12230  {
12231  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12232  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12233  {
12234  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12235  pBlock->Unmap(m_hAllocator, 1);
12236  }
12237  }
12238 }
12239 
12240 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12241  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12242  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12243  VkCommandBuffer commandBuffer)
12244 {
12245  const size_t blockCount = m_Blocks.size();
12246 
12247  pDefragCtx->blockContexts.resize(blockCount);
12248  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12249 
12250  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12251  const size_t moveCount = moves.size();
12252  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12253  {
12254  const VmaDefragmentationMove& move = moves[moveIndex];
12255  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12256  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12257  }
12258 
12259  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12260 
12261  // Go over all blocks. Create and bind buffer for whole block if necessary.
12262  {
12263  VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
12264  bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
12265  VK_BUFFER_USAGE_TRANSFER_DST_BIT;
12266 
12267  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12268  {
12269  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12270  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12271  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12272  {
12273  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12274  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12275  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12276  if(pDefragCtx->res == VK_SUCCESS)
12277  {
12278  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12279  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12280  }
12281  }
12282  }
12283  }
12284 
12285  // Go over all moves. Post data transfer commands to command buffer.
12286  if(pDefragCtx->res == VK_SUCCESS)
12287  {
12288  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12289  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12290 
12291  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12292  {
12293  const VmaDefragmentationMove& move = moves[moveIndex];
12294 
12295  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12296  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12297 
12298  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12299 
12300  VkBufferCopy region = {
12301  move.srcOffset,
12302  move.dstOffset,
12303  move.size };
12304  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12305  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12306  }
12307  }
12308 
12309  // Save buffers to defrag context for later destruction.
12310  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12311  {
12312  pDefragCtx->res = VK_NOT_READY;
12313  }
12314 }
12315 
12316 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12317 {
12318  m_HasEmptyBlock = false;
12319  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12320  {
12321  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12322  if(pBlock->m_pMetadata->IsEmpty())
12323  {
12324  if(m_Blocks.size() > m_MinBlockCount)
12325  {
12326  if(pDefragmentationStats != VMA_NULL)
12327  {
12328  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12329  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12330  }
12331 
12332  VmaVectorRemove(m_Blocks, blockIndex);
12333  pBlock->Destroy(m_hAllocator);
12334  vma_delete(m_hAllocator, pBlock);
12335  }
12336  else
12337  {
12338  m_HasEmptyBlock = true;
12339  }
12340  }
12341  }
12342 }
12343 
12344 #if VMA_STATS_STRING_ENABLED
12345 
12346 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12347 {
12348  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12349 
12350  json.BeginObject();
12351 
12352  if(m_IsCustomPool)
12353  {
12354  json.WriteString("MemoryTypeIndex");
12355  json.WriteNumber(m_MemoryTypeIndex);
12356 
12357  json.WriteString("BlockSize");
12358  json.WriteNumber(m_PreferredBlockSize);
12359 
12360  json.WriteString("BlockCount");
12361  json.BeginObject(true);
12362  if(m_MinBlockCount > 0)
12363  {
12364  json.WriteString("Min");
12365  json.WriteNumber((uint64_t)m_MinBlockCount);
12366  }
12367  if(m_MaxBlockCount < SIZE_MAX)
12368  {
12369  json.WriteString("Max");
12370  json.WriteNumber((uint64_t)m_MaxBlockCount);
12371  }
12372  json.WriteString("Cur");
12373  json.WriteNumber((uint64_t)m_Blocks.size());
12374  json.EndObject();
12375 
12376  if(m_FrameInUseCount > 0)
12377  {
12378  json.WriteString("FrameInUseCount");
12379  json.WriteNumber(m_FrameInUseCount);
12380  }
12381 
12382  if(m_Algorithm != 0)
12383  {
12384  json.WriteString("Algorithm");
12385  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12386  }
12387  }
12388  else
12389  {
12390  json.WriteString("PreferredBlockSize");
12391  json.WriteNumber(m_PreferredBlockSize);
12392  }
12393 
12394  json.WriteString("Blocks");
12395  json.BeginObject();
12396  for(size_t i = 0; i < m_Blocks.size(); ++i)
12397  {
12398  json.BeginString();
12399  json.ContinueString(m_Blocks[i]->GetId());
12400  json.EndString();
12401 
12402  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12403  }
12404  json.EndObject();
12405 
12406  json.EndObject();
12407 }
12408 
12409 #endif // #if VMA_STATS_STRING_ENABLED
12410 
12411 void VmaBlockVector::Defragment(
12412  class VmaBlockVectorDefragmentationContext* pCtx,
12413  VmaDefragmentationStats* pStats,
12414  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12415  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12416  VkCommandBuffer commandBuffer)
12417 {
12418  pCtx->res = VK_SUCCESS;
12419 
12420  const VkMemoryPropertyFlags memPropFlags =
12421  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12422  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12423  const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
12424 
12425  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12426  isHostVisible;
12427  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12428  !IsCorruptionDetectionEnabled();
12429 
12430  // There are options to defragment this memory type.
12431  if(canDefragmentOnCpu || canDefragmentOnGpu)
12432  {
12433  bool defragmentOnGpu;
12434  // There is only one option to defragment this memory type.
12435  if(canDefragmentOnGpu != canDefragmentOnCpu)
12436  {
12437  defragmentOnGpu = canDefragmentOnGpu;
12438  }
12439  // Both options are available: Heuristics to choose the best one.
12440  else
12441  {
12442  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12443  m_hAllocator->IsIntegratedGpu();
12444  }
12445 
12446  bool overlappingMoveSupported = !defragmentOnGpu;
12447 
12448  if(m_hAllocator->m_UseMutex)
12449  {
12450  m_Mutex.LockWrite();
12451  pCtx->mutexLocked = true;
12452  }
12453 
12454  pCtx->Begin(overlappingMoveSupported);
12455 
12456  // Defragment.
12457 
12458  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12459  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12460  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12461  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12462  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12463 
12464  // Accumulate statistics.
12465  if(pStats != VMA_NULL)
12466  {
12467  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12468  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12469  pStats->bytesMoved += bytesMoved;
12470  pStats->allocationsMoved += allocationsMoved;
12471  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12472  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12473  if(defragmentOnGpu)
12474  {
12475  maxGpuBytesToMove -= bytesMoved;
12476  maxGpuAllocationsToMove -= allocationsMoved;
12477  }
12478  else
12479  {
12480  maxCpuBytesToMove -= bytesMoved;
12481  maxCpuAllocationsToMove -= allocationsMoved;
12482  }
12483  }
12484 
12485  if(pCtx->res >= VK_SUCCESS)
12486  {
12487  if(defragmentOnGpu)
12488  {
12489  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12490  }
12491  else
12492  {
12493  ApplyDefragmentationMovesCpu(pCtx, moves);
12494  }
12495  }
12496  }
12497 }
12498 
12499 void VmaBlockVector::DefragmentationEnd(
12500  class VmaBlockVectorDefragmentationContext* pCtx,
12501  VmaDefragmentationStats* pStats)
12502 {
12503  // Destroy buffers.
12504  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12505  {
12506  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12507  if(blockCtx.hBuffer)
12508  {
12509  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12510  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12511  }
12512  }
12513 
12514  if(pCtx->res >= VK_SUCCESS)
12515  {
12516  FreeEmptyBlocks(pStats);
12517  }
12518 
12519  if(pCtx->mutexLocked)
12520  {
12521  VMA_ASSERT(m_hAllocator->m_UseMutex);
12522  m_Mutex.UnlockWrite();
12523  }
12524 }
12525 
12526 size_t VmaBlockVector::CalcAllocationCount() const
12527 {
12528  size_t result = 0;
12529  for(size_t i = 0; i < m_Blocks.size(); ++i)
12530  {
12531  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12532  }
12533  return result;
12534 }
12535 
12536 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12537 {
12538  if(m_BufferImageGranularity == 1)
12539  {
12540  return false;
12541  }
12542  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12543  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12544  {
12545  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12546  VMA_ASSERT(m_Algorithm == 0);
12547  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12548  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12549  {
12550  return true;
12551  }
12552  }
12553  return false;
12554 }
12555 
12556 void VmaBlockVector::MakePoolAllocationsLost(
12557  uint32_t currentFrameIndex,
12558  size_t* pLostAllocationCount)
12559 {
12560  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12561  size_t lostAllocationCount = 0;
12562  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12563  {
12564  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12565  VMA_ASSERT(pBlock);
12566  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12567  }
12568  if(pLostAllocationCount != VMA_NULL)
12569  {
12570  *pLostAllocationCount = lostAllocationCount;
12571  }
12572 }
12573 
12574 VkResult VmaBlockVector::CheckCorruption()
12575 {
12576  if(!IsCorruptionDetectionEnabled())
12577  {
12578  return VK_ERROR_FEATURE_NOT_PRESENT;
12579  }
12580 
12581  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12582  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12583  {
12584  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12585  VMA_ASSERT(pBlock);
12586  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12587  if(res != VK_SUCCESS)
12588  {
12589  return res;
12590  }
12591  }
12592  return VK_SUCCESS;
12593 }
12594 
12595 void VmaBlockVector::AddStats(VmaStats* pStats)
12596 {
12597  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12598  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12599 
12600  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12601 
12602  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12603  {
12604  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12605  VMA_ASSERT(pBlock);
12606  VMA_HEAVY_ASSERT(pBlock->Validate());
12607  VmaStatInfo allocationStatInfo;
12608  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12609  VmaAddStatInfo(pStats->total, allocationStatInfo);
12610  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12611  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12612  }
12613 }
12614 
12616 // VmaDefragmentationAlgorithm_Generic members definition
12617 
12618 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12619  VmaAllocator hAllocator,
12620  VmaBlockVector* pBlockVector,
12621  uint32_t currentFrameIndex,
12622  bool overlappingMoveSupported) :
12623  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12624  m_AllocationCount(0),
12625  m_AllAllocations(false),
12626  m_BytesMoved(0),
12627  m_AllocationsMoved(0),
12628  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12629 {
12630  // Create block info for each block.
12631  const size_t blockCount = m_pBlockVector->m_Blocks.size();
12632  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12633  {
12634  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12635  pBlockInfo->m_OriginalBlockIndex = blockIndex;
12636  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12637  m_Blocks.push_back(pBlockInfo);
12638  }
12639 
12640  // Sort them by m_pBlock pointer value.
12641  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12642 }
12643 
12644 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12645 {
12646  for(size_t i = m_Blocks.size(); i--; )
12647  {
12648  vma_delete(m_hAllocator, m_Blocks[i]);
12649  }
12650 }
12651 
12652 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12653 {
12654  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12655  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12656  {
12657  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12658  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12659  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12660  {
12661  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12662  (*it)->m_Allocations.push_back(allocInfo);
12663  }
12664  else
12665  {
12666  VMA_ASSERT(0);
12667  }
12668 
12669  ++m_AllocationCount;
12670  }
12671 }
12672 
12673 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12674  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12675  VkDeviceSize maxBytesToMove,
12676  uint32_t maxAllocationsToMove)
12677 {
12678  if(m_Blocks.empty())
12679  {
12680  return VK_SUCCESS;
12681  }
12682 
12683  // This is a choice based on research.
12684  // Option 1:
12685  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12686  // Option 2:
12687  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12688  // Option 3:
12689  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12690 
12691  size_t srcBlockMinIndex = 0;
12692  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12693  /*
12694  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12695  {
12696  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12697  if(blocksWithNonMovableCount > 0)
12698  {
12699  srcBlockMinIndex = blocksWithNonMovableCount - 1;
12700  }
12701  }
12702  */
12703 
12704  size_t srcBlockIndex = m_Blocks.size() - 1;
12705  size_t srcAllocIndex = SIZE_MAX;
12706  for(;;)
12707  {
12708  // 1. Find next allocation to move.
12709  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12710  // 1.2. Then start from last to first m_Allocations.
12711  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12712  {
12713  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12714  {
12715  // Finished: no more allocations to process.
12716  if(srcBlockIndex == srcBlockMinIndex)
12717  {
12718  return VK_SUCCESS;
12719  }
12720  else
12721  {
12722  --srcBlockIndex;
12723  srcAllocIndex = SIZE_MAX;
12724  }
12725  }
12726  else
12727  {
12728  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12729  }
12730  }
12731 
12732  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12733  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12734 
12735  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12736  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12737  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12738  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12739 
12740  // 2. Try to find new place for this allocation in preceding or current block.
12741  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12742  {
12743  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12744  VmaAllocationRequest dstAllocRequest;
12745  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12746  m_CurrentFrameIndex,
12747  m_pBlockVector->GetFrameInUseCount(),
12748  m_pBlockVector->GetBufferImageGranularity(),
12749  size,
12750  alignment,
12751  false, // upperAddress
12752  suballocType,
12753  false, // canMakeOtherLost
12754  strategy,
12755  &dstAllocRequest) &&
12756  MoveMakesSense(
12757  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12758  {
12759  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12760 
12761  // Reached limit on number of allocations or bytes to move.
12762  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12763  (m_BytesMoved + size > maxBytesToMove))
12764  {
12765  return VK_SUCCESS;
12766  }
12767 
12768  VmaDefragmentationMove move;
12769  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12770  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12771  move.srcOffset = srcOffset;
12772  move.dstOffset = dstAllocRequest.offset;
12773  move.size = size;
12774  moves.push_back(move);
12775 
12776  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12777  dstAllocRequest,
12778  suballocType,
12779  size,
12780  allocInfo.m_hAllocation);
12781  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12782 
12783  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12784 
12785  if(allocInfo.m_pChanged != VMA_NULL)
12786  {
12787  *allocInfo.m_pChanged = VK_TRUE;
12788  }
12789 
12790  ++m_AllocationsMoved;
12791  m_BytesMoved += size;
12792 
12793  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12794 
12795  break;
12796  }
12797  }
12798 
12799  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
12800 
12801  if(srcAllocIndex > 0)
12802  {
12803  --srcAllocIndex;
12804  }
12805  else
12806  {
12807  if(srcBlockIndex > 0)
12808  {
12809  --srcBlockIndex;
12810  srcAllocIndex = SIZE_MAX;
12811  }
12812  else
12813  {
12814  return VK_SUCCESS;
12815  }
12816  }
12817  }
12818 }
12819 
12820 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
12821 {
12822  size_t result = 0;
12823  for(size_t i = 0; i < m_Blocks.size(); ++i)
12824  {
12825  if(m_Blocks[i]->m_HasNonMovableAllocations)
12826  {
12827  ++result;
12828  }
12829  }
12830  return result;
12831 }
12832 
12833 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12834  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12835  VkDeviceSize maxBytesToMove,
12836  uint32_t maxAllocationsToMove)
12837 {
12838  if(!m_AllAllocations && m_AllocationCount == 0)
12839  {
12840  return VK_SUCCESS;
12841  }
12842 
12843  const size_t blockCount = m_Blocks.size();
12844  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12845  {
12846  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12847 
12848  if(m_AllAllocations)
12849  {
12850  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12851  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12852  it != pMetadata->m_Suballocations.end();
12853  ++it)
12854  {
12855  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12856  {
12857  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12858  pBlockInfo->m_Allocations.push_back(allocInfo);
12859  }
12860  }
12861  }
12862 
12863  pBlockInfo->CalcHasNonMovableAllocations();
12864 
12865  // This is a choice based on research.
12866  // Option 1:
12867  pBlockInfo->SortAllocationsByOffsetDescending();
12868  // Option 2:
12869  //pBlockInfo->SortAllocationsBySizeDescending();
12870  }
12871 
12872  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
12873  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12874 
12875  // This is a choice based on research.
12876  const uint32_t roundCount = 2;
12877 
12878  // Execute defragmentation rounds (the main part).
12879  VkResult result = VK_SUCCESS;
12880  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12881  {
12882  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12883  }
12884 
12885  return result;
12886 }
12887 
12888 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12889  size_t dstBlockIndex, VkDeviceSize dstOffset,
12890  size_t srcBlockIndex, VkDeviceSize srcOffset)
12891 {
12892  if(dstBlockIndex < srcBlockIndex)
12893  {
12894  return true;
12895  }
12896  if(dstBlockIndex > srcBlockIndex)
12897  {
12898  return false;
12899  }
12900  if(dstOffset < srcOffset)
12901  {
12902  return true;
12903  }
12904  return false;
12905 }
12906 
12908 // VmaDefragmentationAlgorithm_Fast
12909 
12910 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12911  VmaAllocator hAllocator,
12912  VmaBlockVector* pBlockVector,
12913  uint32_t currentFrameIndex,
12914  bool overlappingMoveSupported) :
12915  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12916  m_OverlappingMoveSupported(overlappingMoveSupported),
12917  m_AllocationCount(0),
12918  m_AllAllocations(false),
12919  m_BytesMoved(0),
12920  m_AllocationsMoved(0),
12921  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12922 {
12923  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12924 
12925 }
12926 
12927 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12928 {
12929 }
12930 
12931 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12932  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12933  VkDeviceSize maxBytesToMove,
12934  uint32_t maxAllocationsToMove)
12935 {
12936  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12937 
12938  const size_t blockCount = m_pBlockVector->GetBlockCount();
12939  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12940  {
12941  return VK_SUCCESS;
12942  }
12943 
12944  PreprocessMetadata();
12945 
12946  // Sort blocks in order from most destination.
12947 
12948  m_BlockInfos.resize(blockCount);
12949  for(size_t i = 0; i < blockCount; ++i)
12950  {
12951  m_BlockInfos[i].origBlockIndex = i;
12952  }
12953 
12954  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
12955  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
12956  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
12957  });
12958 
12959  // THE MAIN ALGORITHM
12960 
12961  FreeSpaceDatabase freeSpaceDb;
12962 
12963  size_t dstBlockInfoIndex = 0;
12964  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12965  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12966  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12967  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
12968  VkDeviceSize dstOffset = 0;
12969 
12970  bool end = false;
12971  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
12972  {
12973  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
12974  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
12975  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
12976  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
12977  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
12978  {
12979  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
12980  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
12981  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
12982  if(m_AllocationsMoved == maxAllocationsToMove ||
12983  m_BytesMoved + srcAllocSize > maxBytesToMove)
12984  {
12985  end = true;
12986  break;
12987  }
12988  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
12989 
12990  // Try to place it in one of free spaces from the database.
12991  size_t freeSpaceInfoIndex;
12992  VkDeviceSize dstAllocOffset;
12993  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
12994  freeSpaceInfoIndex, dstAllocOffset))
12995  {
12996  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
12997  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
12998  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
12999 
13000  // Same block
13001  if(freeSpaceInfoIndex == srcBlockInfoIndex)
13002  {
13003  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13004 
13005  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13006 
13007  VmaSuballocation suballoc = *srcSuballocIt;
13008  suballoc.offset = dstAllocOffset;
13009  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
13010  m_BytesMoved += srcAllocSize;
13011  ++m_AllocationsMoved;
13012 
13013  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13014  ++nextSuballocIt;
13015  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13016  srcSuballocIt = nextSuballocIt;
13017 
13018  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13019 
13020  VmaDefragmentationMove move = {
13021  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13022  srcAllocOffset, dstAllocOffset,
13023  srcAllocSize };
13024  moves.push_back(move);
13025  }
13026  // Different block
13027  else
13028  {
13029  // MOVE OPTION 2: Move the allocation to a different block.
13030 
13031  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
13032 
13033  VmaSuballocation suballoc = *srcSuballocIt;
13034  suballoc.offset = dstAllocOffset;
13035  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
13036  m_BytesMoved += srcAllocSize;
13037  ++m_AllocationsMoved;
13038 
13039  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13040  ++nextSuballocIt;
13041  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13042  srcSuballocIt = nextSuballocIt;
13043 
13044  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13045 
13046  VmaDefragmentationMove move = {
13047  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13048  srcAllocOffset, dstAllocOffset,
13049  srcAllocSize };
13050  moves.push_back(move);
13051  }
13052  }
13053  else
13054  {
13055  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
13056 
13057  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
13058  while(dstBlockInfoIndex < srcBlockInfoIndex &&
13059  dstAllocOffset + srcAllocSize > dstBlockSize)
13060  {
13061  // But before that, register remaining free space at the end of dst block.
13062  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
13063 
13064  ++dstBlockInfoIndex;
13065  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13066  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13067  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13068  dstBlockSize = pDstMetadata->GetSize();
13069  dstOffset = 0;
13070  dstAllocOffset = 0;
13071  }
13072 
13073  // Same block
13074  if(dstBlockInfoIndex == srcBlockInfoIndex)
13075  {
13076  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13077 
13078  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
13079 
13080  bool skipOver = overlap;
13081  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
13082  {
13083  // If destination and source place overlap, skip if it would move it
13084  // by only < 1/64 of its size.
13085  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
13086  }
13087 
13088  if(skipOver)
13089  {
13090  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
13091 
13092  dstOffset = srcAllocOffset + srcAllocSize;
13093  ++srcSuballocIt;
13094  }
13095  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13096  else
13097  {
13098  srcSuballocIt->offset = dstAllocOffset;
13099  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
13100  dstOffset = dstAllocOffset + srcAllocSize;
13101  m_BytesMoved += srcAllocSize;
13102  ++m_AllocationsMoved;
13103  ++srcSuballocIt;
13104  VmaDefragmentationMove move = {
13105  srcOrigBlockIndex, dstOrigBlockIndex,
13106  srcAllocOffset, dstAllocOffset,
13107  srcAllocSize };
13108  moves.push_back(move);
13109  }
13110  }
13111  // Different block
13112  else
13113  {
13114  // MOVE OPTION 2: Move the allocation to a different block.
13115 
13116  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
13117  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
13118 
13119  VmaSuballocation suballoc = *srcSuballocIt;
13120  suballoc.offset = dstAllocOffset;
13121  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
13122  dstOffset = dstAllocOffset + srcAllocSize;
13123  m_BytesMoved += srcAllocSize;
13124  ++m_AllocationsMoved;
13125 
13126  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13127  ++nextSuballocIt;
13128  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13129  srcSuballocIt = nextSuballocIt;
13130 
13131  pDstMetadata->m_Suballocations.push_back(suballoc);
13132 
13133  VmaDefragmentationMove move = {
13134  srcOrigBlockIndex, dstOrigBlockIndex,
13135  srcAllocOffset, dstAllocOffset,
13136  srcAllocSize };
13137  moves.push_back(move);
13138  }
13139  }
13140  }
13141  }
13142 
13143  m_BlockInfos.clear();
13144 
13145  PostprocessMetadata();
13146 
13147  return VK_SUCCESS;
13148 }
13149 
13150 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
13151 {
13152  const size_t blockCount = m_pBlockVector->GetBlockCount();
13153  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13154  {
13155  VmaBlockMetadata_Generic* const pMetadata =
13156  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13157  pMetadata->m_FreeCount = 0;
13158  pMetadata->m_SumFreeSize = pMetadata->GetSize();
13159  pMetadata->m_FreeSuballocationsBySize.clear();
13160  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13161  it != pMetadata->m_Suballocations.end(); )
13162  {
13163  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13164  {
13165  VmaSuballocationList::iterator nextIt = it;
13166  ++nextIt;
13167  pMetadata->m_Suballocations.erase(it);
13168  it = nextIt;
13169  }
13170  else
13171  {
13172  ++it;
13173  }
13174  }
13175  }
13176 }
13177 
13178 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13179 {
13180  const size_t blockCount = m_pBlockVector->GetBlockCount();
13181  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13182  {
13183  VmaBlockMetadata_Generic* const pMetadata =
13184  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13185  const VkDeviceSize blockSize = pMetadata->GetSize();
13186 
13187  // No allocations in this block - entire area is free.
13188  if(pMetadata->m_Suballocations.empty())
13189  {
13190  pMetadata->m_FreeCount = 1;
13191  //pMetadata->m_SumFreeSize is already set to blockSize.
13192  VmaSuballocation suballoc = {
13193  0, // offset
13194  blockSize, // size
13195  VMA_NULL, // hAllocation
13196  VMA_SUBALLOCATION_TYPE_FREE };
13197  pMetadata->m_Suballocations.push_back(suballoc);
13198  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13199  }
13200  // There are some allocations in this block.
13201  else
13202  {
13203  VkDeviceSize offset = 0;
13204  VmaSuballocationList::iterator it;
13205  for(it = pMetadata->m_Suballocations.begin();
13206  it != pMetadata->m_Suballocations.end();
13207  ++it)
13208  {
13209  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13210  VMA_ASSERT(it->offset >= offset);
13211 
13212  // Need to insert preceding free space.
13213  if(it->offset > offset)
13214  {
13215  ++pMetadata->m_FreeCount;
13216  const VkDeviceSize freeSize = it->offset - offset;
13217  VmaSuballocation suballoc = {
13218  offset, // offset
13219  freeSize, // size
13220  VMA_NULL, // hAllocation
13221  VMA_SUBALLOCATION_TYPE_FREE };
13222  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13223  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13224  {
13225  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13226  }
13227  }
13228 
13229  pMetadata->m_SumFreeSize -= it->size;
13230  offset = it->offset + it->size;
13231  }
13232 
13233  // Need to insert trailing free space.
13234  if(offset < blockSize)
13235  {
13236  ++pMetadata->m_FreeCount;
13237  const VkDeviceSize freeSize = blockSize - offset;
13238  VmaSuballocation suballoc = {
13239  offset, // offset
13240  freeSize, // size
13241  VMA_NULL, // hAllocation
13242  VMA_SUBALLOCATION_TYPE_FREE };
13243  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13244  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13245  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13246  {
13247  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13248  }
13249  }
13250 
13251  VMA_SORT(
13252  pMetadata->m_FreeSuballocationsBySize.begin(),
13253  pMetadata->m_FreeSuballocationsBySize.end(),
13254  VmaSuballocationItemSizeLess());
13255  }
13256 
13257  VMA_HEAVY_ASSERT(pMetadata->Validate());
13258  }
13259 }
13260 
13261 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13262 {
13263  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13264  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13265  while(it != pMetadata->m_Suballocations.end())
13266  {
13267  if(it->offset < suballoc.offset)
13268  {
13269  ++it;
13270  }
13271  }
13272  pMetadata->m_Suballocations.insert(it, suballoc);
13273 }
13274 
13276 // VmaBlockVectorDefragmentationContext
13277 
13278 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13279  VmaAllocator hAllocator,
13280  VmaPool hCustomPool,
13281  VmaBlockVector* pBlockVector,
13282  uint32_t currFrameIndex,
13283  uint32_t algorithmFlags) :
13284  res(VK_SUCCESS),
13285  mutexLocked(false),
13286  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13287  m_hAllocator(hAllocator),
13288  m_hCustomPool(hCustomPool),
13289  m_pBlockVector(pBlockVector),
13290  m_CurrFrameIndex(currFrameIndex),
13291  m_AlgorithmFlags(algorithmFlags),
13292  m_pAlgorithm(VMA_NULL),
13293  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13294  m_AllAllocations(false)
13295 {
13296 }
13297 
13298 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13299 {
13300  vma_delete(m_hAllocator, m_pAlgorithm);
13301 }
13302 
13303 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13304 {
13305  AllocInfo info = { hAlloc, pChanged };
13306  m_Allocations.push_back(info);
13307 }
13308 
13309 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13310 {
13311  const bool allAllocations = m_AllAllocations ||
13312  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13313 
13314  /********************************
13315  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13316  ********************************/
13317 
13318  /*
13319  Fast algorithm is supported only when certain criteria are met:
13320  - VMA_DEBUG_MARGIN is 0.
13321  - All allocations in this block vector are moveable.
13322  - There is no possibility of image/buffer granularity conflict.
13323  */
13324  if(VMA_DEBUG_MARGIN == 0 &&
13325  allAllocations &&
13326  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13327  {
13328  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13329  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13330  }
13331  else
13332  {
13333  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13334  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13335  }
13336 
13337  if(allAllocations)
13338  {
13339  m_pAlgorithm->AddAll();
13340  }
13341  else
13342  {
13343  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13344  {
13345  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13346  }
13347  }
13348 }
13349 
13351 // VmaDefragmentationContext
13352 
13353 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13354  VmaAllocator hAllocator,
13355  uint32_t currFrameIndex,
13356  uint32_t flags,
13357  VmaDefragmentationStats* pStats) :
13358  m_hAllocator(hAllocator),
13359  m_CurrFrameIndex(currFrameIndex),
13360  m_Flags(flags),
13361  m_pStats(pStats),
13362  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13363 {
13364  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13365 }
13366 
13367 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13368 {
13369  for(size_t i = m_CustomPoolContexts.size(); i--; )
13370  {
13371  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13372  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13373  vma_delete(m_hAllocator, pBlockVectorCtx);
13374  }
13375  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13376  {
13377  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13378  if(pBlockVectorCtx)
13379  {
13380  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13381  vma_delete(m_hAllocator, pBlockVectorCtx);
13382  }
13383  }
13384 }
13385 
13386 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13387 {
13388  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13389  {
13390  VmaPool pool = pPools[poolIndex];
13391  VMA_ASSERT(pool);
13392  // Pools with algorithm other than default are not defragmented.
13393  if(pool->m_BlockVector.GetAlgorithm() == 0)
13394  {
13395  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13396 
13397  for(size_t i = m_CustomPoolContexts.size(); i--; )
13398  {
13399  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13400  {
13401  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13402  break;
13403  }
13404  }
13405 
13406  if(!pBlockVectorDefragCtx)
13407  {
13408  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13409  m_hAllocator,
13410  pool,
13411  &pool->m_BlockVector,
13412  m_CurrFrameIndex,
13413  m_Flags);
13414  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13415  }
13416 
13417  pBlockVectorDefragCtx->AddAll();
13418  }
13419  }
13420 }
13421 
13422 void VmaDefragmentationContext_T::AddAllocations(
13423  uint32_t allocationCount,
13424  VmaAllocation* pAllocations,
13425  VkBool32* pAllocationsChanged)
13426 {
13427  // Dispatch pAllocations among defragmentators. Create them when necessary.
13428  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13429  {
13430  const VmaAllocation hAlloc = pAllocations[allocIndex];
13431  VMA_ASSERT(hAlloc);
13432  // DedicatedAlloc cannot be defragmented.
13433  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13434  // Lost allocation cannot be defragmented.
13435  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13436  {
13437  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13438 
13439  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
13440  // This allocation belongs to custom pool.
13441  if(hAllocPool != VK_NULL_HANDLE)
13442  {
13443  // Pools with algorithm other than default are not defragmented.
13444  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13445  {
13446  for(size_t i = m_CustomPoolContexts.size(); i--; )
13447  {
13448  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13449  {
13450  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13451  break;
13452  }
13453  }
13454  if(!pBlockVectorDefragCtx)
13455  {
13456  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13457  m_hAllocator,
13458  hAllocPool,
13459  &hAllocPool->m_BlockVector,
13460  m_CurrFrameIndex,
13461  m_Flags);
13462  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13463  }
13464  }
13465  }
13466  // This allocation belongs to default pool.
13467  else
13468  {
13469  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13470  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13471  if(!pBlockVectorDefragCtx)
13472  {
13473  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13474  m_hAllocator,
13475  VMA_NULL, // hCustomPool
13476  m_hAllocator->m_pBlockVectors[memTypeIndex],
13477  m_CurrFrameIndex,
13478  m_Flags);
13479  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13480  }
13481  }
13482 
13483  if(pBlockVectorDefragCtx)
13484  {
13485  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13486  &pAllocationsChanged[allocIndex] : VMA_NULL;
13487  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13488  }
13489  }
13490  }
13491 }
13492 
13493 VkResult VmaDefragmentationContext_T::Defragment(
13494  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13495  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13496  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13497 {
13498  if(pStats)
13499  {
13500  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13501  }
13502 
13503  if(commandBuffer == VK_NULL_HANDLE)
13504  {
13505  maxGpuBytesToMove = 0;
13506  maxGpuAllocationsToMove = 0;
13507  }
13508 
13509  VkResult res = VK_SUCCESS;
13510 
13511  // Process default pools.
13512  for(uint32_t memTypeIndex = 0;
13513  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13514  ++memTypeIndex)
13515  {
13516  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13517  if(pBlockVectorCtx)
13518  {
13519  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13520  pBlockVectorCtx->GetBlockVector()->Defragment(
13521  pBlockVectorCtx,
13522  pStats,
13523  maxCpuBytesToMove, maxCpuAllocationsToMove,
13524  maxGpuBytesToMove, maxGpuAllocationsToMove,
13525  commandBuffer);
13526  if(pBlockVectorCtx->res != VK_SUCCESS)
13527  {
13528  res = pBlockVectorCtx->res;
13529  }
13530  }
13531  }
13532 
13533  // Process custom pools.
13534  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13535  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13536  ++customCtxIndex)
13537  {
13538  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13539  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13540  pBlockVectorCtx->GetBlockVector()->Defragment(
13541  pBlockVectorCtx,
13542  pStats,
13543  maxCpuBytesToMove, maxCpuAllocationsToMove,
13544  maxGpuBytesToMove, maxGpuAllocationsToMove,
13545  commandBuffer);
13546  if(pBlockVectorCtx->res != VK_SUCCESS)
13547  {
13548  res = pBlockVectorCtx->res;
13549  }
13550  }
13551 
13552  return res;
13553 }
13554 
13556 // VmaRecorder
13557 
13558 #if VMA_RECORDING_ENABLED
13559 
13560 VmaRecorder::VmaRecorder() :
13561  m_UseMutex(true),
13562  m_Flags(0),
13563  m_File(VMA_NULL),
13564  m_Freq(INT64_MAX),
13565  m_StartCounter(INT64_MAX)
13566 {
13567 }
13568 
13569 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13570 {
13571  m_UseMutex = useMutex;
13572  m_Flags = settings.flags;
13573 
13574  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13575  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13576 
13577  // Open file for writing.
13578  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13579  if(err != 0)
13580  {
13581  return VK_ERROR_INITIALIZATION_FAILED;
13582  }
13583 
13584  // Write header.
13585  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13586  fprintf(m_File, "%s\n", "1,5");
13587 
13588  return VK_SUCCESS;
13589 }
13590 
13591 VmaRecorder::~VmaRecorder()
13592 {
13593  if(m_File != VMA_NULL)
13594  {
13595  fclose(m_File);
13596  }
13597 }
13598 
13599 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13600 {
13601  CallParams callParams;
13602  GetBasicParams(callParams);
13603 
13604  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13605  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13606  Flush();
13607 }
13608 
13609 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13610 {
13611  CallParams callParams;
13612  GetBasicParams(callParams);
13613 
13614  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13615  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13616  Flush();
13617 }
13618 
13619 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13620 {
13621  CallParams callParams;
13622  GetBasicParams(callParams);
13623 
13624  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13625  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13626  createInfo.memoryTypeIndex,
13627  createInfo.flags,
13628  createInfo.blockSize,
13629  (uint64_t)createInfo.minBlockCount,
13630  (uint64_t)createInfo.maxBlockCount,
13631  createInfo.frameInUseCount,
13632  pool);
13633  Flush();
13634 }
13635 
13636 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13637 {
13638  CallParams callParams;
13639  GetBasicParams(callParams);
13640 
13641  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13642  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13643  pool);
13644  Flush();
13645 }
13646 
13647 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13648  const VkMemoryRequirements& vkMemReq,
13649  const VmaAllocationCreateInfo& createInfo,
13650  VmaAllocation allocation)
13651 {
13652  CallParams callParams;
13653  GetBasicParams(callParams);
13654 
13655  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13656  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13657  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13658  vkMemReq.size,
13659  vkMemReq.alignment,
13660  vkMemReq.memoryTypeBits,
13661  createInfo.flags,
13662  createInfo.usage,
13663  createInfo.requiredFlags,
13664  createInfo.preferredFlags,
13665  createInfo.memoryTypeBits,
13666  createInfo.pool,
13667  allocation,
13668  userDataStr.GetString());
13669  Flush();
13670 }
13671 
13672 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
13673  const VkMemoryRequirements& vkMemReq,
13674  const VmaAllocationCreateInfo& createInfo,
13675  uint64_t allocationCount,
13676  const VmaAllocation* pAllocations)
13677 {
13678  CallParams callParams;
13679  GetBasicParams(callParams);
13680 
13681  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13682  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13683  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
13684  vkMemReq.size,
13685  vkMemReq.alignment,
13686  vkMemReq.memoryTypeBits,
13687  createInfo.flags,
13688  createInfo.usage,
13689  createInfo.requiredFlags,
13690  createInfo.preferredFlags,
13691  createInfo.memoryTypeBits,
13692  createInfo.pool);
13693  PrintPointerList(allocationCount, pAllocations);
13694  fprintf(m_File, ",%s\n", userDataStr.GetString());
13695  Flush();
13696 }
13697 
13698 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13699  const VkMemoryRequirements& vkMemReq,
13700  bool requiresDedicatedAllocation,
13701  bool prefersDedicatedAllocation,
13702  const VmaAllocationCreateInfo& createInfo,
13703  VmaAllocation allocation)
13704 {
13705  CallParams callParams;
13706  GetBasicParams(callParams);
13707 
13708  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13709  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13710  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13711  vkMemReq.size,
13712  vkMemReq.alignment,
13713  vkMemReq.memoryTypeBits,
13714  requiresDedicatedAllocation ? 1 : 0,
13715  prefersDedicatedAllocation ? 1 : 0,
13716  createInfo.flags,
13717  createInfo.usage,
13718  createInfo.requiredFlags,
13719  createInfo.preferredFlags,
13720  createInfo.memoryTypeBits,
13721  createInfo.pool,
13722  allocation,
13723  userDataStr.GetString());
13724  Flush();
13725 }
13726 
13727 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13728  const VkMemoryRequirements& vkMemReq,
13729  bool requiresDedicatedAllocation,
13730  bool prefersDedicatedAllocation,
13731  const VmaAllocationCreateInfo& createInfo,
13732  VmaAllocation allocation)
13733 {
13734  CallParams callParams;
13735  GetBasicParams(callParams);
13736 
13737  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13738  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13739  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13740  vkMemReq.size,
13741  vkMemReq.alignment,
13742  vkMemReq.memoryTypeBits,
13743  requiresDedicatedAllocation ? 1 : 0,
13744  prefersDedicatedAllocation ? 1 : 0,
13745  createInfo.flags,
13746  createInfo.usage,
13747  createInfo.requiredFlags,
13748  createInfo.preferredFlags,
13749  createInfo.memoryTypeBits,
13750  createInfo.pool,
13751  allocation,
13752  userDataStr.GetString());
13753  Flush();
13754 }
13755 
13756 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13757  VmaAllocation allocation)
13758 {
13759  CallParams callParams;
13760  GetBasicParams(callParams);
13761 
13762  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13763  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13764  allocation);
13765  Flush();
13766 }
13767 
13768 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
13769  uint64_t allocationCount,
13770  const VmaAllocation* pAllocations)
13771 {
13772  CallParams callParams;
13773  GetBasicParams(callParams);
13774 
13775  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13776  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
13777  PrintPointerList(allocationCount, pAllocations);
13778  fprintf(m_File, "\n");
13779  Flush();
13780 }
13781 
13782 void VmaRecorder::RecordResizeAllocation(
13783  uint32_t frameIndex,
13784  VmaAllocation allocation,
13785  VkDeviceSize newSize)
13786 {
13787  CallParams callParams;
13788  GetBasicParams(callParams);
13789 
13790  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13791  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
13792  allocation, newSize);
13793  Flush();
13794 }
13795 
13796 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13797  VmaAllocation allocation,
13798  const void* pUserData)
13799 {
13800  CallParams callParams;
13801  GetBasicParams(callParams);
13802 
13803  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13804  UserDataString userDataStr(
13805  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
13806  pUserData);
13807  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13808  allocation,
13809  userDataStr.GetString());
13810  Flush();
13811 }
13812 
13813 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13814  VmaAllocation allocation)
13815 {
13816  CallParams callParams;
13817  GetBasicParams(callParams);
13818 
13819  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13820  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13821  allocation);
13822  Flush();
13823 }
13824 
13825 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13826  VmaAllocation allocation)
13827 {
13828  CallParams callParams;
13829  GetBasicParams(callParams);
13830 
13831  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13832  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13833  allocation);
13834  Flush();
13835 }
13836 
13837 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13838  VmaAllocation allocation)
13839 {
13840  CallParams callParams;
13841  GetBasicParams(callParams);
13842 
13843  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13844  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13845  allocation);
13846  Flush();
13847 }
13848 
13849 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13850  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13851 {
13852  CallParams callParams;
13853  GetBasicParams(callParams);
13854 
13855  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13856  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13857  allocation,
13858  offset,
13859  size);
13860  Flush();
13861 }
13862 
13863 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13864  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13865 {
13866  CallParams callParams;
13867  GetBasicParams(callParams);
13868 
13869  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13870  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13871  allocation,
13872  offset,
13873  size);
13874  Flush();
13875 }
13876 
13877 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13878  const VkBufferCreateInfo& bufCreateInfo,
13879  const VmaAllocationCreateInfo& allocCreateInfo,
13880  VmaAllocation allocation)
13881 {
13882  CallParams callParams;
13883  GetBasicParams(callParams);
13884 
13885  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13886  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13887  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13888  bufCreateInfo.flags,
13889  bufCreateInfo.size,
13890  bufCreateInfo.usage,
13891  bufCreateInfo.sharingMode,
13892  allocCreateInfo.flags,
13893  allocCreateInfo.usage,
13894  allocCreateInfo.requiredFlags,
13895  allocCreateInfo.preferredFlags,
13896  allocCreateInfo.memoryTypeBits,
13897  allocCreateInfo.pool,
13898  allocation,
13899  userDataStr.GetString());
13900  Flush();
13901 }
13902 
13903 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13904  const VkImageCreateInfo& imageCreateInfo,
13905  const VmaAllocationCreateInfo& allocCreateInfo,
13906  VmaAllocation allocation)
13907 {
13908  CallParams callParams;
13909  GetBasicParams(callParams);
13910 
13911  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13912  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13913  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13914  imageCreateInfo.flags,
13915  imageCreateInfo.imageType,
13916  imageCreateInfo.format,
13917  imageCreateInfo.extent.width,
13918  imageCreateInfo.extent.height,
13919  imageCreateInfo.extent.depth,
13920  imageCreateInfo.mipLevels,
13921  imageCreateInfo.arrayLayers,
13922  imageCreateInfo.samples,
13923  imageCreateInfo.tiling,
13924  imageCreateInfo.usage,
13925  imageCreateInfo.sharingMode,
13926  imageCreateInfo.initialLayout,
13927  allocCreateInfo.flags,
13928  allocCreateInfo.usage,
13929  allocCreateInfo.requiredFlags,
13930  allocCreateInfo.preferredFlags,
13931  allocCreateInfo.memoryTypeBits,
13932  allocCreateInfo.pool,
13933  allocation,
13934  userDataStr.GetString());
13935  Flush();
13936 }
13937 
13938 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13939  VmaAllocation allocation)
13940 {
13941  CallParams callParams;
13942  GetBasicParams(callParams);
13943 
13944  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13945  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13946  allocation);
13947  Flush();
13948 }
13949 
13950 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
13951  VmaAllocation allocation)
13952 {
13953  CallParams callParams;
13954  GetBasicParams(callParams);
13955 
13956  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13957  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
13958  allocation);
13959  Flush();
13960 }
13961 
13962 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
13963  VmaAllocation allocation)
13964 {
13965  CallParams callParams;
13966  GetBasicParams(callParams);
13967 
13968  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13969  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13970  allocation);
13971  Flush();
13972 }
13973 
13974 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
13975  VmaAllocation allocation)
13976 {
13977  CallParams callParams;
13978  GetBasicParams(callParams);
13979 
13980  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13981  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
13982  allocation);
13983  Flush();
13984 }
13985 
13986 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
13987  VmaPool pool)
13988 {
13989  CallParams callParams;
13990  GetBasicParams(callParams);
13991 
13992  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13993  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
13994  pool);
13995  Flush();
13996 }
13997 
13998 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
13999  const VmaDefragmentationInfo2& info,
14001 {
14002  CallParams callParams;
14003  GetBasicParams(callParams);
14004 
14005  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14006  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
14007  info.flags);
14008  PrintPointerList(info.allocationCount, info.pAllocations);
14009  fprintf(m_File, ",");
14010  PrintPointerList(info.poolCount, info.pPools);
14011  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
14012  info.maxCpuBytesToMove,
14014  info.maxGpuBytesToMove,
14016  info.commandBuffer,
14017  ctx);
14018  Flush();
14019 }
14020 
14021 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
14023 {
14024  CallParams callParams;
14025  GetBasicParams(callParams);
14026 
14027  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14028  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
14029  ctx);
14030  Flush();
14031 }
14032 
14033 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
14034 {
14035  if(pUserData != VMA_NULL)
14036  {
14037  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
14038  {
14039  m_Str = (const char*)pUserData;
14040  }
14041  else
14042  {
14043  sprintf_s(m_PtrStr, "%p", pUserData);
14044  m_Str = m_PtrStr;
14045  }
14046  }
14047  else
14048  {
14049  m_Str = "";
14050  }
14051 }
14052 
14053 void VmaRecorder::WriteConfiguration(
14054  const VkPhysicalDeviceProperties& devProps,
14055  const VkPhysicalDeviceMemoryProperties& memProps,
14056  bool dedicatedAllocationExtensionEnabled)
14057 {
14058  fprintf(m_File, "Config,Begin\n");
14059 
14060  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
14061  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
14062  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
14063  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
14064  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
14065  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
14066 
14067  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
14068  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
14069  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
14070 
14071  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
14072  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
14073  {
14074  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
14075  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
14076  }
14077  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
14078  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
14079  {
14080  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
14081  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
14082  }
14083 
14084  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
14085 
14086  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
14087  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
14088  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
14089  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
14090  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
14091  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
14092  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
14093  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
14094  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14095 
14096  fprintf(m_File, "Config,End\n");
14097 }
14098 
14099 void VmaRecorder::GetBasicParams(CallParams& outParams)
14100 {
14101  outParams.threadId = GetCurrentThreadId();
14102 
14103  LARGE_INTEGER counter;
14104  QueryPerformanceCounter(&counter);
14105  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
14106 }
14107 
14108 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
14109 {
14110  if(count)
14111  {
14112  fprintf(m_File, "%p", pItems[0]);
14113  for(uint64_t i = 1; i < count; ++i)
14114  {
14115  fprintf(m_File, " %p", pItems[i]);
14116  }
14117  }
14118 }
14119 
14120 void VmaRecorder::Flush()
14121 {
14122  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
14123  {
14124  fflush(m_File);
14125  }
14126 }
14127 
14128 #endif // #if VMA_RECORDING_ENABLED
14129 
14131 // VmaAllocationObjectAllocator
14132 
14133 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
14134  m_Allocator(pAllocationCallbacks, 1024)
14135 {
14136 }
14137 
14138 VmaAllocation VmaAllocationObjectAllocator::Allocate()
14139 {
14140  VmaMutexLock mutexLock(m_Mutex);
14141  return m_Allocator.Alloc();
14142 }
14143 
14144 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
14145 {
14146  VmaMutexLock mutexLock(m_Mutex);
14147  m_Allocator.Free(hAlloc);
14148 }
14149 
14151 // VmaAllocator_T
14152 
14153 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
14154  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
14155  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
14156  m_hDevice(pCreateInfo->device),
14157  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
14158  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
14159  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
14160  m_AllocationObjectAllocator(&m_AllocationCallbacks),
14161  m_PreferredLargeHeapBlockSize(0),
14162  m_PhysicalDevice(pCreateInfo->physicalDevice),
14163  m_CurrentFrameIndex(0),
14164  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
14165  m_NextPoolId(0)
14167  ,m_pRecorder(VMA_NULL)
14168 #endif
14169 {
14170  if(VMA_DEBUG_DETECT_CORRUPTION)
14171  {
14172  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
14173  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
14174  }
14175 
14176  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
14177 
14178 #if !(VMA_DEDICATED_ALLOCATION)
14180  {
14181  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14182  }
14183 #endif
14184 
14185  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
14186  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14187  memset(&m_MemProps, 0, sizeof(m_MemProps));
14188 
14189  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14190  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
14191 
14192  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14193  {
14194  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
14195  }
14196 
14197  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14198  {
14199  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14200  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14201  }
14202 
14203  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14204 
14205  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14206  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14207 
14208  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14209  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14210  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14211  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14212 
14213  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14214  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14215 
14216  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14217  {
14218  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14219  {
14220  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14221  if(limit != VK_WHOLE_SIZE)
14222  {
14223  m_HeapSizeLimit[heapIndex] = limit;
14224  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14225  {
14226  m_MemProps.memoryHeaps[heapIndex].size = limit;
14227  }
14228  }
14229  }
14230  }
14231 
14232  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14233  {
14234  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14235 
14236  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14237  this,
14238  VK_NULL_HANDLE, // hParentPool
14239  memTypeIndex,
14240  preferredBlockSize,
14241  0,
14242  SIZE_MAX,
14243  GetBufferImageGranularity(),
14244  pCreateInfo->frameInUseCount,
14245  false, // isCustomPool
14246  false, // explicitBlockSize
14247  false); // linearAlgorithm
14248  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14249  // becase minBlockCount is 0.
14250  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14251 
14252  }
14253 }
14254 
14255 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14256 {
14257  VkResult res = VK_SUCCESS;
14258 
14259  if(pCreateInfo->pRecordSettings != VMA_NULL &&
14260  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14261  {
14262 #if VMA_RECORDING_ENABLED
14263  m_pRecorder = vma_new(this, VmaRecorder)();
14264  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14265  if(res != VK_SUCCESS)
14266  {
14267  return res;
14268  }
14269  m_pRecorder->WriteConfiguration(
14270  m_PhysicalDeviceProperties,
14271  m_MemProps,
14272  m_UseKhrDedicatedAllocation);
14273  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14274 #else
14275  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14276  return VK_ERROR_FEATURE_NOT_PRESENT;
14277 #endif
14278  }
14279 
14280  return res;
14281 }
14282 
14283 VmaAllocator_T::~VmaAllocator_T()
14284 {
14285 #if VMA_RECORDING_ENABLED
14286  if(m_pRecorder != VMA_NULL)
14287  {
14288  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14289  vma_delete(this, m_pRecorder);
14290  }
14291 #endif
14292 
14293  VMA_ASSERT(m_Pools.empty());
14294 
14295  for(size_t i = GetMemoryTypeCount(); i--; )
14296  {
14297  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
14298  {
14299  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
14300  }
14301 
14302  vma_delete(this, m_pDedicatedAllocations[i]);
14303  vma_delete(this, m_pBlockVectors[i]);
14304  }
14305 }
14306 
14307 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14308 {
14309 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14310  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
14311  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
14312  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
14313  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
14314  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
14315  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
14316  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
14317  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
14318  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
14319  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
14320  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
14321  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
14322  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
14323  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
14324  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
14325  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
14326  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
14327 #if VMA_DEDICATED_ALLOCATION
14328  if(m_UseKhrDedicatedAllocation)
14329  {
14330  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14331  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14332  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14333  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14334  }
14335 #endif // #if VMA_DEDICATED_ALLOCATION
14336 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14337 
14338 #define VMA_COPY_IF_NOT_NULL(funcName) \
14339  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14340 
14341  if(pVulkanFunctions != VMA_NULL)
14342  {
14343  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14344  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14345  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14346  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14347  VMA_COPY_IF_NOT_NULL(vkMapMemory);
14348  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14349  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14350  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14351  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14352  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14353  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14354  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14355  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14356  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14357  VMA_COPY_IF_NOT_NULL(vkCreateImage);
14358  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14359  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14360 #if VMA_DEDICATED_ALLOCATION
14361  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14362  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14363 #endif
14364  }
14365 
14366 #undef VMA_COPY_IF_NOT_NULL
14367 
14368  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14369  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14370  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14371  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14372  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14373  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14374  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14375  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14376  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14377  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14378  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14379  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14380  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14381  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14382  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14383  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14384  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14385  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14386  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14387 #if VMA_DEDICATED_ALLOCATION
14388  if(m_UseKhrDedicatedAllocation)
14389  {
14390  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14391  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14392  }
14393 #endif
14394 }
14395 
14396 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14397 {
14398  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14399  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14400  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14401  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
14402 }
14403 
14404 VkResult VmaAllocator_T::AllocateMemoryOfType(
14405  VkDeviceSize size,
14406  VkDeviceSize alignment,
14407  bool dedicatedAllocation,
14408  VkBuffer dedicatedBuffer,
14409  VkImage dedicatedImage,
14410  const VmaAllocationCreateInfo& createInfo,
14411  uint32_t memTypeIndex,
14412  VmaSuballocationType suballocType,
14413  size_t allocationCount,
14414  VmaAllocation* pAllocations)
14415 {
14416  VMA_ASSERT(pAllocations != VMA_NULL);
14417  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
14418 
14419  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14420 
14421  // If memory type is not HOST_VISIBLE, disable MAPPED.
14422  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14423  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14424  {
14425  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14426  }
14427 
14428  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14429  VMA_ASSERT(blockVector);
14430 
14431  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14432  bool preferDedicatedMemory =
14433  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14434  dedicatedAllocation ||
14435  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14436  size > preferredBlockSize / 2;
14437 
14438  if(preferDedicatedMemory &&
14439  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14440  finalCreateInfo.pool == VK_NULL_HANDLE)
14441  {
14443  }
14444 
14445  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14446  {
14447  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14448  {
14449  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14450  }
14451  else
14452  {
14453  return AllocateDedicatedMemory(
14454  size,
14455  suballocType,
14456  memTypeIndex,
14457  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14458  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14459  finalCreateInfo.pUserData,
14460  dedicatedBuffer,
14461  dedicatedImage,
14462  allocationCount,
14463  pAllocations);
14464  }
14465  }
14466  else
14467  {
14468  VkResult res = blockVector->Allocate(
14469  m_CurrentFrameIndex.load(),
14470  size,
14471  alignment,
14472  finalCreateInfo,
14473  suballocType,
14474  allocationCount,
14475  pAllocations);
14476  if(res == VK_SUCCESS)
14477  {
14478  return res;
14479  }
14480 
14481  // 5. Try dedicated memory.
14482  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14483  {
14484  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14485  }
14486  else
14487  {
14488  res = AllocateDedicatedMemory(
14489  size,
14490  suballocType,
14491  memTypeIndex,
14492  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14493  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14494  finalCreateInfo.pUserData,
14495  dedicatedBuffer,
14496  dedicatedImage,
14497  allocationCount,
14498  pAllocations);
14499  if(res == VK_SUCCESS)
14500  {
14501  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14502  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14503  return VK_SUCCESS;
14504  }
14505  else
14506  {
14507  // Everything failed: Return error code.
14508  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14509  return res;
14510  }
14511  }
14512  }
14513 }
14514 
14515 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14516  VkDeviceSize size,
14517  VmaSuballocationType suballocType,
14518  uint32_t memTypeIndex,
14519  bool map,
14520  bool isUserDataString,
14521  void* pUserData,
14522  VkBuffer dedicatedBuffer,
14523  VkImage dedicatedImage,
14524  size_t allocationCount,
14525  VmaAllocation* pAllocations)
14526 {
14527  VMA_ASSERT(allocationCount > 0 && pAllocations);
14528 
14529  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14530  allocInfo.memoryTypeIndex = memTypeIndex;
14531  allocInfo.allocationSize = size;
14532 
14533 #if VMA_DEDICATED_ALLOCATION
14534  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14535  if(m_UseKhrDedicatedAllocation)
14536  {
14537  if(dedicatedBuffer != VK_NULL_HANDLE)
14538  {
14539  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14540  dedicatedAllocInfo.buffer = dedicatedBuffer;
14541  allocInfo.pNext = &dedicatedAllocInfo;
14542  }
14543  else if(dedicatedImage != VK_NULL_HANDLE)
14544  {
14545  dedicatedAllocInfo.image = dedicatedImage;
14546  allocInfo.pNext = &dedicatedAllocInfo;
14547  }
14548  }
14549 #endif // #if VMA_DEDICATED_ALLOCATION
14550 
14551  size_t allocIndex;
14552  VkResult res = VK_SUCCESS;
14553  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14554  {
14555  res = AllocateDedicatedMemoryPage(
14556  size,
14557  suballocType,
14558  memTypeIndex,
14559  allocInfo,
14560  map,
14561  isUserDataString,
14562  pUserData,
14563  pAllocations + allocIndex);
14564  if(res != VK_SUCCESS)
14565  {
14566  break;
14567  }
14568  }
14569 
14570  if(res == VK_SUCCESS)
14571  {
14572  // Register them in m_pDedicatedAllocations.
14573  {
14574  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14575  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14576  VMA_ASSERT(pDedicatedAllocations);
14577  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14578  {
14579  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
14580  }
14581  }
14582 
14583  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14584  }
14585  else
14586  {
14587  // Free all already created allocations.
14588  while(allocIndex--)
14589  {
14590  VmaAllocation currAlloc = pAllocations[allocIndex];
14591  VkDeviceMemory hMemory = currAlloc->GetMemory();
14592 
14593  /*
14594  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14595  before vkFreeMemory.
14596 
14597  if(currAlloc->GetMappedData() != VMA_NULL)
14598  {
14599  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14600  }
14601  */
14602 
14603  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14604 
14605  currAlloc->SetUserData(this, VMA_NULL);
14606  currAlloc->Dtor();
14607  m_AllocationObjectAllocator.Free(currAlloc);
14608  }
14609 
14610  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14611  }
14612 
14613  return res;
14614 }
14615 
14616 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14617  VkDeviceSize size,
14618  VmaSuballocationType suballocType,
14619  uint32_t memTypeIndex,
14620  const VkMemoryAllocateInfo& allocInfo,
14621  bool map,
14622  bool isUserDataString,
14623  void* pUserData,
14624  VmaAllocation* pAllocation)
14625 {
14626  VkDeviceMemory hMemory = VK_NULL_HANDLE;
14627  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14628  if(res < 0)
14629  {
14630  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14631  return res;
14632  }
14633 
14634  void* pMappedData = VMA_NULL;
14635  if(map)
14636  {
14637  res = (*m_VulkanFunctions.vkMapMemory)(
14638  m_hDevice,
14639  hMemory,
14640  0,
14641  VK_WHOLE_SIZE,
14642  0,
14643  &pMappedData);
14644  if(res < 0)
14645  {
14646  VMA_DEBUG_LOG(" vkMapMemory FAILED");
14647  FreeVulkanMemory(memTypeIndex, size, hMemory);
14648  return res;
14649  }
14650  }
14651 
14652  *pAllocation = m_AllocationObjectAllocator.Allocate();
14653  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
14654  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14655  (*pAllocation)->SetUserData(this, pUserData);
14656  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14657  {
14658  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14659  }
14660 
14661  return VK_SUCCESS;
14662 }
14663 
14664 void VmaAllocator_T::GetBufferMemoryRequirements(
14665  VkBuffer hBuffer,
14666  VkMemoryRequirements& memReq,
14667  bool& requiresDedicatedAllocation,
14668  bool& prefersDedicatedAllocation) const
14669 {
14670 #if VMA_DEDICATED_ALLOCATION
14671  if(m_UseKhrDedicatedAllocation)
14672  {
14673  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14674  memReqInfo.buffer = hBuffer;
14675 
14676  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14677 
14678  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14679  memReq2.pNext = &memDedicatedReq;
14680 
14681  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14682 
14683  memReq = memReq2.memoryRequirements;
14684  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14685  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14686  }
14687  else
14688 #endif // #if VMA_DEDICATED_ALLOCATION
14689  {
14690  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14691  requiresDedicatedAllocation = false;
14692  prefersDedicatedAllocation = false;
14693  }
14694 }
14695 
14696 void VmaAllocator_T::GetImageMemoryRequirements(
14697  VkImage hImage,
14698  VkMemoryRequirements& memReq,
14699  bool& requiresDedicatedAllocation,
14700  bool& prefersDedicatedAllocation) const
14701 {
14702 #if VMA_DEDICATED_ALLOCATION
14703  if(m_UseKhrDedicatedAllocation)
14704  {
14705  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14706  memReqInfo.image = hImage;
14707 
14708  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14709 
14710  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14711  memReq2.pNext = &memDedicatedReq;
14712 
14713  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14714 
14715  memReq = memReq2.memoryRequirements;
14716  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14717  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14718  }
14719  else
14720 #endif // #if VMA_DEDICATED_ALLOCATION
14721  {
14722  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14723  requiresDedicatedAllocation = false;
14724  prefersDedicatedAllocation = false;
14725  }
14726 }
14727 
14728 VkResult VmaAllocator_T::AllocateMemory(
14729  const VkMemoryRequirements& vkMemReq,
14730  bool requiresDedicatedAllocation,
14731  bool prefersDedicatedAllocation,
14732  VkBuffer dedicatedBuffer,
14733  VkImage dedicatedImage,
14734  const VmaAllocationCreateInfo& createInfo,
14735  VmaSuballocationType suballocType,
14736  size_t allocationCount,
14737  VmaAllocation* pAllocations)
14738 {
14739  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14740 
14741  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14742 
14743  if(vkMemReq.size == 0)
14744  {
14745  return VK_ERROR_VALIDATION_FAILED_EXT;
14746  }
14747  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14748  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14749  {
14750  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14751  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14752  }
14753  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14755  {
14756  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14757  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14758  }
14759  if(requiresDedicatedAllocation)
14760  {
14761  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14762  {
14763  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14764  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14765  }
14766  if(createInfo.pool != VK_NULL_HANDLE)
14767  {
14768  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
14769  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14770  }
14771  }
14772  if((createInfo.pool != VK_NULL_HANDLE) &&
14773  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
14774  {
14775  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14776  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14777  }
14778 
14779  if(createInfo.pool != VK_NULL_HANDLE)
14780  {
14781  const VkDeviceSize alignmentForPool = VMA_MAX(
14782  vkMemReq.alignment,
14783  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
14784  return createInfo.pool->m_BlockVector.Allocate(
14785  m_CurrentFrameIndex.load(),
14786  vkMemReq.size,
14787  alignmentForPool,
14788  createInfo,
14789  suballocType,
14790  allocationCount,
14791  pAllocations);
14792  }
14793  else
14794  {
14795  // Bit mask of memory Vulkan types acceptable for this allocation.
14796  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14797  uint32_t memTypeIndex = UINT32_MAX;
14798  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14799  if(res == VK_SUCCESS)
14800  {
14801  VkDeviceSize alignmentForMemType = VMA_MAX(
14802  vkMemReq.alignment,
14803  GetMemoryTypeMinAlignment(memTypeIndex));
14804 
14805  res = AllocateMemoryOfType(
14806  vkMemReq.size,
14807  alignmentForMemType,
14808  requiresDedicatedAllocation || prefersDedicatedAllocation,
14809  dedicatedBuffer,
14810  dedicatedImage,
14811  createInfo,
14812  memTypeIndex,
14813  suballocType,
14814  allocationCount,
14815  pAllocations);
14816  // Succeeded on first try.
14817  if(res == VK_SUCCESS)
14818  {
14819  return res;
14820  }
14821  // Allocation from this memory type failed. Try other compatible memory types.
14822  else
14823  {
14824  for(;;)
14825  {
14826  // Remove old memTypeIndex from list of possibilities.
14827  memoryTypeBits &= ~(1u << memTypeIndex);
14828  // Find alternative memTypeIndex.
14829  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14830  if(res == VK_SUCCESS)
14831  {
14832  alignmentForMemType = VMA_MAX(
14833  vkMemReq.alignment,
14834  GetMemoryTypeMinAlignment(memTypeIndex));
14835 
14836  res = AllocateMemoryOfType(
14837  vkMemReq.size,
14838  alignmentForMemType,
14839  requiresDedicatedAllocation || prefersDedicatedAllocation,
14840  dedicatedBuffer,
14841  dedicatedImage,
14842  createInfo,
14843  memTypeIndex,
14844  suballocType,
14845  allocationCount,
14846  pAllocations);
14847  // Allocation from this alternative memory type succeeded.
14848  if(res == VK_SUCCESS)
14849  {
14850  return res;
14851  }
14852  // else: Allocation from this memory type failed. Try next one - next loop iteration.
14853  }
14854  // No other matching memory type index could be found.
14855  else
14856  {
14857  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
14858  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14859  }
14860  }
14861  }
14862  }
14863  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
14864  else
14865  return res;
14866  }
14867 }
14868 
14869 void VmaAllocator_T::FreeMemory(
14870  size_t allocationCount,
14871  const VmaAllocation* pAllocations)
14872 {
14873  VMA_ASSERT(pAllocations);
14874 
14875  for(size_t allocIndex = allocationCount; allocIndex--; )
14876  {
14877  VmaAllocation allocation = pAllocations[allocIndex];
14878 
14879  if(allocation != VK_NULL_HANDLE)
14880  {
14881  if(TouchAllocation(allocation))
14882  {
14883  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14884  {
14885  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14886  }
14887 
14888  switch(allocation->GetType())
14889  {
14890  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14891  {
14892  VmaBlockVector* pBlockVector = VMA_NULL;
14893  VmaPool hPool = allocation->GetBlock()->GetParentPool();
14894  if(hPool != VK_NULL_HANDLE)
14895  {
14896  pBlockVector = &hPool->m_BlockVector;
14897  }
14898  else
14899  {
14900  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14901  pBlockVector = m_pBlockVectors[memTypeIndex];
14902  }
14903  pBlockVector->Free(allocation);
14904  }
14905  break;
14906  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14907  FreeDedicatedMemory(allocation);
14908  break;
14909  default:
14910  VMA_ASSERT(0);
14911  }
14912  }
14913 
14914  allocation->SetUserData(this, VMA_NULL);
14915  allocation->Dtor();
14916  m_AllocationObjectAllocator.Free(allocation);
14917  }
14918  }
14919 }
14920 
14921 VkResult VmaAllocator_T::ResizeAllocation(
14922  const VmaAllocation alloc,
14923  VkDeviceSize newSize)
14924 {
14925  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14926  {
14927  return VK_ERROR_VALIDATION_FAILED_EXT;
14928  }
14929  if(newSize == alloc->GetSize())
14930  {
14931  return VK_SUCCESS;
14932  }
14933 
14934  switch(alloc->GetType())
14935  {
14936  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14937  return VK_ERROR_FEATURE_NOT_PRESENT;
14938  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14939  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
14940  {
14941  alloc->ChangeSize(newSize);
14942  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
14943  return VK_SUCCESS;
14944  }
14945  else
14946  {
14947  return VK_ERROR_OUT_OF_POOL_MEMORY;
14948  }
14949  default:
14950  VMA_ASSERT(0);
14951  return VK_ERROR_VALIDATION_FAILED_EXT;
14952  }
14953 }
14954 
14955 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
14956 {
14957  // Initialize.
14958  InitStatInfo(pStats->total);
14959  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
14960  InitStatInfo(pStats->memoryType[i]);
14961  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14962  InitStatInfo(pStats->memoryHeap[i]);
14963 
14964  // Process default pools.
14965  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14966  {
14967  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14968  VMA_ASSERT(pBlockVector);
14969  pBlockVector->AddStats(pStats);
14970  }
14971 
14972  // Process custom pools.
14973  {
14974  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14975  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14976  {
14977  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
14978  }
14979  }
14980 
14981  // Process dedicated allocations.
14982  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14983  {
14984  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14985  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14986  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
14987  VMA_ASSERT(pDedicatedAllocVector);
14988  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
14989  {
14990  VmaStatInfo allocationStatInfo;
14991  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
14992  VmaAddStatInfo(pStats->total, allocationStatInfo);
14993  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
14994  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
14995  }
14996  }
14997 
14998  // Postprocess.
14999  VmaPostprocessCalcStatInfo(pStats->total);
15000  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
15001  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
15002  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
15003  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
15004 }
15005 
15006 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
15007 
15008 VkResult VmaAllocator_T::DefragmentationBegin(
15009  const VmaDefragmentationInfo2& info,
15010  VmaDefragmentationStats* pStats,
15011  VmaDefragmentationContext* pContext)
15012 {
15013  if(info.pAllocationsChanged != VMA_NULL)
15014  {
15015  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
15016  }
15017 
15018  *pContext = vma_new(this, VmaDefragmentationContext_T)(
15019  this, m_CurrentFrameIndex.load(), info.flags, pStats);
15020 
15021  (*pContext)->AddPools(info.poolCount, info.pPools);
15022  (*pContext)->AddAllocations(
15024 
15025  VkResult res = (*pContext)->Defragment(
15028  info.commandBuffer, pStats);
15029 
15030  if(res != VK_NOT_READY)
15031  {
15032  vma_delete(this, *pContext);
15033  *pContext = VMA_NULL;
15034  }
15035 
15036  return res;
15037 }
15038 
15039 VkResult VmaAllocator_T::DefragmentationEnd(
15040  VmaDefragmentationContext context)
15041 {
15042  vma_delete(this, context);
15043  return VK_SUCCESS;
15044 }
15045 
15046 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
15047 {
15048  if(hAllocation->CanBecomeLost())
15049  {
15050  /*
15051  Warning: This is a carefully designed algorithm.
15052  Do not modify unless you really know what you're doing :)
15053  */
15054  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15055  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15056  for(;;)
15057  {
15058  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15059  {
15060  pAllocationInfo->memoryType = UINT32_MAX;
15061  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
15062  pAllocationInfo->offset = 0;
15063  pAllocationInfo->size = hAllocation->GetSize();
15064  pAllocationInfo->pMappedData = VMA_NULL;
15065  pAllocationInfo->pUserData = hAllocation->GetUserData();
15066  return;
15067  }
15068  else if(localLastUseFrameIndex == localCurrFrameIndex)
15069  {
15070  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15071  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15072  pAllocationInfo->offset = hAllocation->GetOffset();
15073  pAllocationInfo->size = hAllocation->GetSize();
15074  pAllocationInfo->pMappedData = VMA_NULL;
15075  pAllocationInfo->pUserData = hAllocation->GetUserData();
15076  return;
15077  }
15078  else // Last use time earlier than current time.
15079  {
15080  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15081  {
15082  localLastUseFrameIndex = localCurrFrameIndex;
15083  }
15084  }
15085  }
15086  }
15087  else
15088  {
15089 #if VMA_STATS_STRING_ENABLED
15090  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15091  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15092  for(;;)
15093  {
15094  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15095  if(localLastUseFrameIndex == localCurrFrameIndex)
15096  {
15097  break;
15098  }
15099  else // Last use time earlier than current time.
15100  {
15101  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15102  {
15103  localLastUseFrameIndex = localCurrFrameIndex;
15104  }
15105  }
15106  }
15107 #endif
15108 
15109  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15110  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15111  pAllocationInfo->offset = hAllocation->GetOffset();
15112  pAllocationInfo->size = hAllocation->GetSize();
15113  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
15114  pAllocationInfo->pUserData = hAllocation->GetUserData();
15115  }
15116 }
15117 
15118 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
15119 {
15120  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
15121  if(hAllocation->CanBecomeLost())
15122  {
15123  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15124  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15125  for(;;)
15126  {
15127  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15128  {
15129  return false;
15130  }
15131  else if(localLastUseFrameIndex == localCurrFrameIndex)
15132  {
15133  return true;
15134  }
15135  else // Last use time earlier than current time.
15136  {
15137  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15138  {
15139  localLastUseFrameIndex = localCurrFrameIndex;
15140  }
15141  }
15142  }
15143  }
15144  else
15145  {
15146 #if VMA_STATS_STRING_ENABLED
15147  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15148  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15149  for(;;)
15150  {
15151  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15152  if(localLastUseFrameIndex == localCurrFrameIndex)
15153  {
15154  break;
15155  }
15156  else // Last use time earlier than current time.
15157  {
15158  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15159  {
15160  localLastUseFrameIndex = localCurrFrameIndex;
15161  }
15162  }
15163  }
15164 #endif
15165 
15166  return true;
15167  }
15168 }
15169 
15170 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
15171 {
15172  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
15173 
15174  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
15175 
15176  if(newCreateInfo.maxBlockCount == 0)
15177  {
15178  newCreateInfo.maxBlockCount = SIZE_MAX;
15179  }
15180  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
15181  {
15182  return VK_ERROR_INITIALIZATION_FAILED;
15183  }
15184 
15185  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
15186 
15187  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
15188 
15189  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15190  if(res != VK_SUCCESS)
15191  {
15192  vma_delete(this, *pPool);
15193  *pPool = VMA_NULL;
15194  return res;
15195  }
15196 
15197  // Add to m_Pools.
15198  {
15199  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15200  (*pPool)->SetId(m_NextPoolId++);
15201  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
15202  }
15203 
15204  return VK_SUCCESS;
15205 }
15206 
15207 void VmaAllocator_T::DestroyPool(VmaPool pool)
15208 {
15209  // Remove from m_Pools.
15210  {
15211  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15212  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15213  VMA_ASSERT(success && "Pool not found in Allocator.");
15214  }
15215 
15216  vma_delete(this, pool);
15217 }
15218 
15219 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15220 {
15221  pool->m_BlockVector.GetPoolStats(pPoolStats);
15222 }
15223 
15224 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15225 {
15226  m_CurrentFrameIndex.store(frameIndex);
15227 }
15228 
15229 void VmaAllocator_T::MakePoolAllocationsLost(
15230  VmaPool hPool,
15231  size_t* pLostAllocationCount)
15232 {
15233  hPool->m_BlockVector.MakePoolAllocationsLost(
15234  m_CurrentFrameIndex.load(),
15235  pLostAllocationCount);
15236 }
15237 
15238 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15239 {
15240  return hPool->m_BlockVector.CheckCorruption();
15241 }
15242 
15243 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15244 {
15245  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15246 
15247  // Process default pools.
15248  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15249  {
15250  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15251  {
15252  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15253  VMA_ASSERT(pBlockVector);
15254  VkResult localRes = pBlockVector->CheckCorruption();
15255  switch(localRes)
15256  {
15257  case VK_ERROR_FEATURE_NOT_PRESENT:
15258  break;
15259  case VK_SUCCESS:
15260  finalRes = VK_SUCCESS;
15261  break;
15262  default:
15263  return localRes;
15264  }
15265  }
15266  }
15267 
15268  // Process custom pools.
15269  {
15270  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15271  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15272  {
15273  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15274  {
15275  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15276  switch(localRes)
15277  {
15278  case VK_ERROR_FEATURE_NOT_PRESENT:
15279  break;
15280  case VK_SUCCESS:
15281  finalRes = VK_SUCCESS;
15282  break;
15283  default:
15284  return localRes;
15285  }
15286  }
15287  }
15288  }
15289 
15290  return finalRes;
15291 }
15292 
15293 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15294 {
15295  *pAllocation = m_AllocationObjectAllocator.Allocate();
15296  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
15297  (*pAllocation)->InitLost();
15298 }
15299 
15300 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15301 {
15302  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15303 
15304  VkResult res;
15305  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15306  {
15307  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15308  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
15309  {
15310  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15311  if(res == VK_SUCCESS)
15312  {
15313  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
15314  }
15315  }
15316  else
15317  {
15318  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
15319  }
15320  }
15321  else
15322  {
15323  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15324  }
15325 
15326  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15327  {
15328  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15329  }
15330 
15331  return res;
15332 }
15333 
15334 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15335 {
15336  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15337  {
15338  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15339  }
15340 
15341  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15342 
15343  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15344  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15345  {
15346  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15347  m_HeapSizeLimit[heapIndex] += size;
15348  }
15349 }
15350 
15351 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15352 {
15353  if(hAllocation->CanBecomeLost())
15354  {
15355  return VK_ERROR_MEMORY_MAP_FAILED;
15356  }
15357 
15358  switch(hAllocation->GetType())
15359  {
15360  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15361  {
15362  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15363  char *pBytes = VMA_NULL;
15364  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15365  if(res == VK_SUCCESS)
15366  {
15367  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15368  hAllocation->BlockAllocMap();
15369  }
15370  return res;
15371  }
15372  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15373  return hAllocation->DedicatedAllocMap(this, ppData);
15374  default:
15375  VMA_ASSERT(0);
15376  return VK_ERROR_MEMORY_MAP_FAILED;
15377  }
15378 }
15379 
15380 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15381 {
15382  switch(hAllocation->GetType())
15383  {
15384  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15385  {
15386  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15387  hAllocation->BlockAllocUnmap();
15388  pBlock->Unmap(this, 1);
15389  }
15390  break;
15391  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15392  hAllocation->DedicatedAllocUnmap(this);
15393  break;
15394  default:
15395  VMA_ASSERT(0);
15396  }
15397 }
15398 
15399 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
15400 {
15401  VkResult res = VK_SUCCESS;
15402  switch(hAllocation->GetType())
15403  {
15404  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15405  res = GetVulkanFunctions().vkBindBufferMemory(
15406  m_hDevice,
15407  hBuffer,
15408  hAllocation->GetMemory(),
15409  0); //memoryOffset
15410  break;
15411  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15412  {
15413  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15414  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
15415  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
15416  break;
15417  }
15418  default:
15419  VMA_ASSERT(0);
15420  }
15421  return res;
15422 }
15423 
15424 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
15425 {
15426  VkResult res = VK_SUCCESS;
15427  switch(hAllocation->GetType())
15428  {
15429  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15430  res = GetVulkanFunctions().vkBindImageMemory(
15431  m_hDevice,
15432  hImage,
15433  hAllocation->GetMemory(),
15434  0); //memoryOffset
15435  break;
15436  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15437  {
15438  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15439  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
15440  res = pBlock->BindImageMemory(this, hAllocation, hImage);
15441  break;
15442  }
15443  default:
15444  VMA_ASSERT(0);
15445  }
15446  return res;
15447 }
15448 
15449 void VmaAllocator_T::FlushOrInvalidateAllocation(
15450  VmaAllocation hAllocation,
15451  VkDeviceSize offset, VkDeviceSize size,
15452  VMA_CACHE_OPERATION op)
15453 {
15454  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
15455  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15456  {
15457  const VkDeviceSize allocationSize = hAllocation->GetSize();
15458  VMA_ASSERT(offset <= allocationSize);
15459 
15460  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15461 
15462  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
15463  memRange.memory = hAllocation->GetMemory();
15464 
15465  switch(hAllocation->GetType())
15466  {
15467  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15468  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15469  if(size == VK_WHOLE_SIZE)
15470  {
15471  memRange.size = allocationSize - memRange.offset;
15472  }
15473  else
15474  {
15475  VMA_ASSERT(offset + size <= allocationSize);
15476  memRange.size = VMA_MIN(
15477  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
15478  allocationSize - memRange.offset);
15479  }
15480  break;
15481 
15482  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15483  {
15484  // 1. Still within this allocation.
15485  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15486  if(size == VK_WHOLE_SIZE)
15487  {
15488  size = allocationSize - offset;
15489  }
15490  else
15491  {
15492  VMA_ASSERT(offset + size <= allocationSize);
15493  }
15494  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
15495 
15496  // 2. Adjust to whole block.
15497  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
15498  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15499  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
15500  memRange.offset += allocationOffset;
15501  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
15502 
15503  break;
15504  }
15505 
15506  default:
15507  VMA_ASSERT(0);
15508  }
15509 
15510  switch(op)
15511  {
15512  case VMA_CACHE_FLUSH:
15513  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15514  break;
15515  case VMA_CACHE_INVALIDATE:
15516  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15517  break;
15518  default:
15519  VMA_ASSERT(0);
15520  }
15521  }
15522  // else: Just ignore this call.
15523 }
15524 
15525 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
15526 {
15527  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15528 
15529  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15530  {
15531  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15532  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15533  VMA_ASSERT(pDedicatedAllocations);
15534  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
15535  VMA_ASSERT(success);
15536  }
15537 
15538  VkDeviceMemory hMemory = allocation->GetMemory();
15539 
15540  /*
15541  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15542  before vkFreeMemory.
15543 
15544  if(allocation->GetMappedData() != VMA_NULL)
15545  {
15546  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15547  }
15548  */
15549 
15550  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15551 
15552  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15553 }
15554 
15555 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15556 {
15557  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15558  !hAllocation->CanBecomeLost() &&
15559  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15560  {
15561  void* pData = VMA_NULL;
15562  VkResult res = Map(hAllocation, &pData);
15563  if(res == VK_SUCCESS)
15564  {
15565  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15566  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15567  Unmap(hAllocation);
15568  }
15569  else
15570  {
15571  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15572  }
15573  }
15574 }
15575 
15576 #if VMA_STATS_STRING_ENABLED
15577 
15578 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15579 {
15580  bool dedicatedAllocationsStarted = false;
15581  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15582  {
15583  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15584  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15585  VMA_ASSERT(pDedicatedAllocVector);
15586  if(pDedicatedAllocVector->empty() == false)
15587  {
15588  if(dedicatedAllocationsStarted == false)
15589  {
15590  dedicatedAllocationsStarted = true;
15591  json.WriteString("DedicatedAllocations");
15592  json.BeginObject();
15593  }
15594 
15595  json.BeginString("Type ");
15596  json.ContinueString(memTypeIndex);
15597  json.EndString();
15598 
15599  json.BeginArray();
15600 
15601  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15602  {
15603  json.BeginObject(true);
15604  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
15605  hAlloc->PrintParameters(json);
15606  json.EndObject();
15607  }
15608 
15609  json.EndArray();
15610  }
15611  }
15612  if(dedicatedAllocationsStarted)
15613  {
15614  json.EndObject();
15615  }
15616 
15617  {
15618  bool allocationsStarted = false;
15619  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15620  {
15621  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
15622  {
15623  if(allocationsStarted == false)
15624  {
15625  allocationsStarted = true;
15626  json.WriteString("DefaultPools");
15627  json.BeginObject();
15628  }
15629 
15630  json.BeginString("Type ");
15631  json.ContinueString(memTypeIndex);
15632  json.EndString();
15633 
15634  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15635  }
15636  }
15637  if(allocationsStarted)
15638  {
15639  json.EndObject();
15640  }
15641  }
15642 
15643  // Custom pools
15644  {
15645  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15646  const size_t poolCount = m_Pools.size();
15647  if(poolCount > 0)
15648  {
15649  json.WriteString("Pools");
15650  json.BeginObject();
15651  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15652  {
15653  json.BeginString();
15654  json.ContinueString(m_Pools[poolIndex]->GetId());
15655  json.EndString();
15656 
15657  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15658  }
15659  json.EndObject();
15660  }
15661  }
15662 }
15663 
15664 #endif // #if VMA_STATS_STRING_ENABLED
15665 
15667 // Public interface
15668 
15669 VkResult vmaCreateAllocator(
15670  const VmaAllocatorCreateInfo* pCreateInfo,
15671  VmaAllocator* pAllocator)
15672 {
15673  VMA_ASSERT(pCreateInfo && pAllocator);
15674  VMA_DEBUG_LOG("vmaCreateAllocator");
15675  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
15676  return (*pAllocator)->Init(pCreateInfo);
15677 }
15678 
15679 void vmaDestroyAllocator(
15680  VmaAllocator allocator)
15681 {
15682  if(allocator != VK_NULL_HANDLE)
15683  {
15684  VMA_DEBUG_LOG("vmaDestroyAllocator");
15685  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15686  vma_delete(&allocationCallbacks, allocator);
15687  }
15688 }
15689 
15691  VmaAllocator allocator,
15692  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15693 {
15694  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15695  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15696 }
15697 
15699  VmaAllocator allocator,
15700  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15701 {
15702  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15703  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15704 }
15705 
15707  VmaAllocator allocator,
15708  uint32_t memoryTypeIndex,
15709  VkMemoryPropertyFlags* pFlags)
15710 {
15711  VMA_ASSERT(allocator && pFlags);
15712  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15713  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15714 }
15715 
15717  VmaAllocator allocator,
15718  uint32_t frameIndex)
15719 {
15720  VMA_ASSERT(allocator);
15721  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15722 
15723  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15724 
15725  allocator->SetCurrentFrameIndex(frameIndex);
15726 }
15727 
15728 void vmaCalculateStats(
15729  VmaAllocator allocator,
15730  VmaStats* pStats)
15731 {
15732  VMA_ASSERT(allocator && pStats);
15733  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15734  allocator->CalculateStats(pStats);
15735 }
15736 
15737 #if VMA_STATS_STRING_ENABLED
15738 
15739 void vmaBuildStatsString(
15740  VmaAllocator allocator,
15741  char** ppStatsString,
15742  VkBool32 detailedMap)
15743 {
15744  VMA_ASSERT(allocator && ppStatsString);
15745  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15746 
15747  VmaStringBuilder sb(allocator);
15748  {
15749  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15750  json.BeginObject();
15751 
15752  VmaStats stats;
15753  allocator->CalculateStats(&stats);
15754 
15755  json.WriteString("Total");
15756  VmaPrintStatInfo(json, stats.total);
15757 
15758  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15759  {
15760  json.BeginString("Heap ");
15761  json.ContinueString(heapIndex);
15762  json.EndString();
15763  json.BeginObject();
15764 
15765  json.WriteString("Size");
15766  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15767 
15768  json.WriteString("Flags");
15769  json.BeginArray(true);
15770  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15771  {
15772  json.WriteString("DEVICE_LOCAL");
15773  }
15774  json.EndArray();
15775 
15776  if(stats.memoryHeap[heapIndex].blockCount > 0)
15777  {
15778  json.WriteString("Stats");
15779  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
15780  }
15781 
15782  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15783  {
15784  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15785  {
15786  json.BeginString("Type ");
15787  json.ContinueString(typeIndex);
15788  json.EndString();
15789 
15790  json.BeginObject();
15791 
15792  json.WriteString("Flags");
15793  json.BeginArray(true);
15794  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15795  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15796  {
15797  json.WriteString("DEVICE_LOCAL");
15798  }
15799  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15800  {
15801  json.WriteString("HOST_VISIBLE");
15802  }
15803  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15804  {
15805  json.WriteString("HOST_COHERENT");
15806  }
15807  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15808  {
15809  json.WriteString("HOST_CACHED");
15810  }
15811  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15812  {
15813  json.WriteString("LAZILY_ALLOCATED");
15814  }
15815  json.EndArray();
15816 
15817  if(stats.memoryType[typeIndex].blockCount > 0)
15818  {
15819  json.WriteString("Stats");
15820  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
15821  }
15822 
15823  json.EndObject();
15824  }
15825  }
15826 
15827  json.EndObject();
15828  }
15829  if(detailedMap == VK_TRUE)
15830  {
15831  allocator->PrintDetailedMap(json);
15832  }
15833 
15834  json.EndObject();
15835  }
15836 
15837  const size_t len = sb.GetLength();
15838  char* const pChars = vma_new_array(allocator, char, len + 1);
15839  if(len > 0)
15840  {
15841  memcpy(pChars, sb.GetData(), len);
15842  }
15843  pChars[len] = '\0';
15844  *ppStatsString = pChars;
15845 }
15846 
15847 void vmaFreeStatsString(
15848  VmaAllocator allocator,
15849  char* pStatsString)
15850 {
15851  if(pStatsString != VMA_NULL)
15852  {
15853  VMA_ASSERT(allocator);
15854  size_t len = strlen(pStatsString);
15855  vma_delete_array(allocator, pStatsString, len + 1);
15856  }
15857 }
15858 
15859 #endif // #if VMA_STATS_STRING_ENABLED
15860 
15861 /*
15862 This function is not protected by any mutex because it just reads immutable data.
15863 */
15864 VkResult vmaFindMemoryTypeIndex(
15865  VmaAllocator allocator,
15866  uint32_t memoryTypeBits,
15867  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15868  uint32_t* pMemoryTypeIndex)
15869 {
15870  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15871  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15872  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15873 
15874  if(pAllocationCreateInfo->memoryTypeBits != 0)
15875  {
15876  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
15877  }
15878 
15879  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
15880  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
15881 
15882  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
15883  if(mapped)
15884  {
15885  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15886  }
15887 
15888  // Convert usage to requiredFlags and preferredFlags.
15889  switch(pAllocationCreateInfo->usage)
15890  {
15892  break;
15894  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15895  {
15896  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15897  }
15898  break;
15900  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15901  break;
15903  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15904  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15905  {
15906  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15907  }
15908  break;
15910  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15911  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
15912  break;
15913  default:
15914  break;
15915  }
15916 
15917  *pMemoryTypeIndex = UINT32_MAX;
15918  uint32_t minCost = UINT32_MAX;
15919  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
15920  memTypeIndex < allocator->GetMemoryTypeCount();
15921  ++memTypeIndex, memTypeBit <<= 1)
15922  {
15923  // This memory type is acceptable according to memoryTypeBits bitmask.
15924  if((memTypeBit & memoryTypeBits) != 0)
15925  {
15926  const VkMemoryPropertyFlags currFlags =
15927  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
15928  // This memory type contains requiredFlags.
15929  if((requiredFlags & ~currFlags) == 0)
15930  {
15931  // Calculate cost as number of bits from preferredFlags not present in this memory type.
15932  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
15933  // Remember memory type with lowest cost.
15934  if(currCost < minCost)
15935  {
15936  *pMemoryTypeIndex = memTypeIndex;
15937  if(currCost == 0)
15938  {
15939  return VK_SUCCESS;
15940  }
15941  minCost = currCost;
15942  }
15943  }
15944  }
15945  }
15946  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
15947 }
15948 
15950  VmaAllocator allocator,
15951  const VkBufferCreateInfo* pBufferCreateInfo,
15952  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15953  uint32_t* pMemoryTypeIndex)
15954 {
15955  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15956  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
15957  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15958  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15959 
15960  const VkDevice hDev = allocator->m_hDevice;
15961  VkBuffer hBuffer = VK_NULL_HANDLE;
15962  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
15963  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
15964  if(res == VK_SUCCESS)
15965  {
15966  VkMemoryRequirements memReq = {};
15967  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
15968  hDev, hBuffer, &memReq);
15969 
15970  res = vmaFindMemoryTypeIndex(
15971  allocator,
15972  memReq.memoryTypeBits,
15973  pAllocationCreateInfo,
15974  pMemoryTypeIndex);
15975 
15976  allocator->GetVulkanFunctions().vkDestroyBuffer(
15977  hDev, hBuffer, allocator->GetAllocationCallbacks());
15978  }
15979  return res;
15980 }
15981 
15983  VmaAllocator allocator,
15984  const VkImageCreateInfo* pImageCreateInfo,
15985  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15986  uint32_t* pMemoryTypeIndex)
15987 {
15988  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15989  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
15990  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15991  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15992 
15993  const VkDevice hDev = allocator->m_hDevice;
15994  VkImage hImage = VK_NULL_HANDLE;
15995  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
15996  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
15997  if(res == VK_SUCCESS)
15998  {
15999  VkMemoryRequirements memReq = {};
16000  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
16001  hDev, hImage, &memReq);
16002 
16003  res = vmaFindMemoryTypeIndex(
16004  allocator,
16005  memReq.memoryTypeBits,
16006  pAllocationCreateInfo,
16007  pMemoryTypeIndex);
16008 
16009  allocator->GetVulkanFunctions().vkDestroyImage(
16010  hDev, hImage, allocator->GetAllocationCallbacks());
16011  }
16012  return res;
16013 }
16014 
16015 VkResult vmaCreatePool(
16016  VmaAllocator allocator,
16017  const VmaPoolCreateInfo* pCreateInfo,
16018  VmaPool* pPool)
16019 {
16020  VMA_ASSERT(allocator && pCreateInfo && pPool);
16021 
16022  VMA_DEBUG_LOG("vmaCreatePool");
16023 
16024  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16025 
16026  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
16027 
16028 #if VMA_RECORDING_ENABLED
16029  if(allocator->GetRecorder() != VMA_NULL)
16030  {
16031  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
16032  }
16033 #endif
16034 
16035  return res;
16036 }
16037 
16038 void vmaDestroyPool(
16039  VmaAllocator allocator,
16040  VmaPool pool)
16041 {
16042  VMA_ASSERT(allocator);
16043 
16044  if(pool == VK_NULL_HANDLE)
16045  {
16046  return;
16047  }
16048 
16049  VMA_DEBUG_LOG("vmaDestroyPool");
16050 
16051  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16052 
16053 #if VMA_RECORDING_ENABLED
16054  if(allocator->GetRecorder() != VMA_NULL)
16055  {
16056  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
16057  }
16058 #endif
16059 
16060  allocator->DestroyPool(pool);
16061 }
16062 
16063 void vmaGetPoolStats(
16064  VmaAllocator allocator,
16065  VmaPool pool,
16066  VmaPoolStats* pPoolStats)
16067 {
16068  VMA_ASSERT(allocator && pool && pPoolStats);
16069 
16070  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16071 
16072  allocator->GetPoolStats(pool, pPoolStats);
16073 }
16074 
16076  VmaAllocator allocator,
16077  VmaPool pool,
16078  size_t* pLostAllocationCount)
16079 {
16080  VMA_ASSERT(allocator && pool);
16081 
16082  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16083 
16084 #if VMA_RECORDING_ENABLED
16085  if(allocator->GetRecorder() != VMA_NULL)
16086  {
16087  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
16088  }
16089 #endif
16090 
16091  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
16092 }
16093 
16094 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
16095 {
16096  VMA_ASSERT(allocator && pool);
16097 
16098  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16099 
16100  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
16101 
16102  return allocator->CheckPoolCorruption(pool);
16103 }
16104 
16105 VkResult vmaAllocateMemory(
16106  VmaAllocator allocator,
16107  const VkMemoryRequirements* pVkMemoryRequirements,
16108  const VmaAllocationCreateInfo* pCreateInfo,
16109  VmaAllocation* pAllocation,
16110  VmaAllocationInfo* pAllocationInfo)
16111 {
16112  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
16113 
16114  VMA_DEBUG_LOG("vmaAllocateMemory");
16115 
16116  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16117 
16118  VkResult result = allocator->AllocateMemory(
16119  *pVkMemoryRequirements,
16120  false, // requiresDedicatedAllocation
16121  false, // prefersDedicatedAllocation
16122  VK_NULL_HANDLE, // dedicatedBuffer
16123  VK_NULL_HANDLE, // dedicatedImage
16124  *pCreateInfo,
16125  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16126  1, // allocationCount
16127  pAllocation);
16128 
16129 #if VMA_RECORDING_ENABLED
16130  if(allocator->GetRecorder() != VMA_NULL)
16131  {
16132  allocator->GetRecorder()->RecordAllocateMemory(
16133  allocator->GetCurrentFrameIndex(),
16134  *pVkMemoryRequirements,
16135  *pCreateInfo,
16136  *pAllocation);
16137  }
16138 #endif
16139 
16140  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16141  {
16142  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16143  }
16144 
16145  return result;
16146 }
16147 
16148 VkResult vmaAllocateMemoryPages(
16149  VmaAllocator allocator,
16150  const VkMemoryRequirements* pVkMemoryRequirements,
16151  const VmaAllocationCreateInfo* pCreateInfo,
16152  size_t allocationCount,
16153  VmaAllocation* pAllocations,
16154  VmaAllocationInfo* pAllocationInfo)
16155 {
16156  if(allocationCount == 0)
16157  {
16158  return VK_SUCCESS;
16159  }
16160 
16161  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
16162 
16163  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
16164 
16165  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16166 
16167  VkResult result = allocator->AllocateMemory(
16168  *pVkMemoryRequirements,
16169  false, // requiresDedicatedAllocation
16170  false, // prefersDedicatedAllocation
16171  VK_NULL_HANDLE, // dedicatedBuffer
16172  VK_NULL_HANDLE, // dedicatedImage
16173  *pCreateInfo,
16174  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16175  allocationCount,
16176  pAllocations);
16177 
16178 #if VMA_RECORDING_ENABLED
16179  if(allocator->GetRecorder() != VMA_NULL)
16180  {
16181  allocator->GetRecorder()->RecordAllocateMemoryPages(
16182  allocator->GetCurrentFrameIndex(),
16183  *pVkMemoryRequirements,
16184  *pCreateInfo,
16185  (uint64_t)allocationCount,
16186  pAllocations);
16187  }
16188 #endif
16189 
16190  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16191  {
16192  for(size_t i = 0; i < allocationCount; ++i)
16193  {
16194  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16195  }
16196  }
16197 
16198  return result;
16199 }
16200 
16202  VmaAllocator allocator,
16203  VkBuffer buffer,
16204  const VmaAllocationCreateInfo* pCreateInfo,
16205  VmaAllocation* pAllocation,
16206  VmaAllocationInfo* pAllocationInfo)
16207 {
16208  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16209 
16210  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16211 
16212  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16213 
16214  VkMemoryRequirements vkMemReq = {};
16215  bool requiresDedicatedAllocation = false;
16216  bool prefersDedicatedAllocation = false;
16217  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16218  requiresDedicatedAllocation,
16219  prefersDedicatedAllocation);
16220 
16221  VkResult result = allocator->AllocateMemory(
16222  vkMemReq,
16223  requiresDedicatedAllocation,
16224  prefersDedicatedAllocation,
16225  buffer, // dedicatedBuffer
16226  VK_NULL_HANDLE, // dedicatedImage
16227  *pCreateInfo,
16228  VMA_SUBALLOCATION_TYPE_BUFFER,
16229  1, // allocationCount
16230  pAllocation);
16231 
16232 #if VMA_RECORDING_ENABLED
16233  if(allocator->GetRecorder() != VMA_NULL)
16234  {
16235  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16236  allocator->GetCurrentFrameIndex(),
16237  vkMemReq,
16238  requiresDedicatedAllocation,
16239  prefersDedicatedAllocation,
16240  *pCreateInfo,
16241  *pAllocation);
16242  }
16243 #endif
16244 
16245  if(pAllocationInfo && result == VK_SUCCESS)
16246  {
16247  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16248  }
16249 
16250  return result;
16251 }
16252 
16253 VkResult vmaAllocateMemoryForImage(
16254  VmaAllocator allocator,
16255  VkImage image,
16256  const VmaAllocationCreateInfo* pCreateInfo,
16257  VmaAllocation* pAllocation,
16258  VmaAllocationInfo* pAllocationInfo)
16259 {
16260  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16261 
16262  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
16263 
16264  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16265 
16266  VkMemoryRequirements vkMemReq = {};
16267  bool requiresDedicatedAllocation = false;
16268  bool prefersDedicatedAllocation = false;
16269  allocator->GetImageMemoryRequirements(image, vkMemReq,
16270  requiresDedicatedAllocation, prefersDedicatedAllocation);
16271 
16272  VkResult result = allocator->AllocateMemory(
16273  vkMemReq,
16274  requiresDedicatedAllocation,
16275  prefersDedicatedAllocation,
16276  VK_NULL_HANDLE, // dedicatedBuffer
16277  image, // dedicatedImage
16278  *pCreateInfo,
16279  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16280  1, // allocationCount
16281  pAllocation);
16282 
16283 #if VMA_RECORDING_ENABLED
16284  if(allocator->GetRecorder() != VMA_NULL)
16285  {
16286  allocator->GetRecorder()->RecordAllocateMemoryForImage(
16287  allocator->GetCurrentFrameIndex(),
16288  vkMemReq,
16289  requiresDedicatedAllocation,
16290  prefersDedicatedAllocation,
16291  *pCreateInfo,
16292  *pAllocation);
16293  }
16294 #endif
16295 
16296  if(pAllocationInfo && result == VK_SUCCESS)
16297  {
16298  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16299  }
16300 
16301  return result;
16302 }
16303 
16304 void vmaFreeMemory(
16305  VmaAllocator allocator,
16306  VmaAllocation allocation)
16307 {
16308  VMA_ASSERT(allocator);
16309 
16310  if(allocation == VK_NULL_HANDLE)
16311  {
16312  return;
16313  }
16314 
16315  VMA_DEBUG_LOG("vmaFreeMemory");
16316 
16317  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16318 
16319 #if VMA_RECORDING_ENABLED
16320  if(allocator->GetRecorder() != VMA_NULL)
16321  {
16322  allocator->GetRecorder()->RecordFreeMemory(
16323  allocator->GetCurrentFrameIndex(),
16324  allocation);
16325  }
16326 #endif
16327 
16328  allocator->FreeMemory(
16329  1, // allocationCount
16330  &allocation);
16331 }
16332 
16333 void vmaFreeMemoryPages(
16334  VmaAllocator allocator,
16335  size_t allocationCount,
16336  VmaAllocation* pAllocations)
16337 {
16338  if(allocationCount == 0)
16339  {
16340  return;
16341  }
16342 
16343  VMA_ASSERT(allocator);
16344 
16345  VMA_DEBUG_LOG("vmaFreeMemoryPages");
16346 
16347  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16348 
16349 #if VMA_RECORDING_ENABLED
16350  if(allocator->GetRecorder() != VMA_NULL)
16351  {
16352  allocator->GetRecorder()->RecordFreeMemoryPages(
16353  allocator->GetCurrentFrameIndex(),
16354  (uint64_t)allocationCount,
16355  pAllocations);
16356  }
16357 #endif
16358 
16359  allocator->FreeMemory(allocationCount, pAllocations);
16360 }
16361 
16362 VkResult vmaResizeAllocation(
16363  VmaAllocator allocator,
16364  VmaAllocation allocation,
16365  VkDeviceSize newSize)
16366 {
16367  VMA_ASSERT(allocator && allocation);
16368 
16369  VMA_DEBUG_LOG("vmaResizeAllocation");
16370 
16371  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16372 
16373 #if VMA_RECORDING_ENABLED
16374  if(allocator->GetRecorder() != VMA_NULL)
16375  {
16376  allocator->GetRecorder()->RecordResizeAllocation(
16377  allocator->GetCurrentFrameIndex(),
16378  allocation,
16379  newSize);
16380  }
16381 #endif
16382 
16383  return allocator->ResizeAllocation(allocation, newSize);
16384 }
16385 
16387  VmaAllocator allocator,
16388  VmaAllocation allocation,
16389  VmaAllocationInfo* pAllocationInfo)
16390 {
16391  VMA_ASSERT(allocator && allocation && pAllocationInfo);
16392 
16393  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16394 
16395 #if VMA_RECORDING_ENABLED
16396  if(allocator->GetRecorder() != VMA_NULL)
16397  {
16398  allocator->GetRecorder()->RecordGetAllocationInfo(
16399  allocator->GetCurrentFrameIndex(),
16400  allocation);
16401  }
16402 #endif
16403 
16404  allocator->GetAllocationInfo(allocation, pAllocationInfo);
16405 }
16406 
16407 VkBool32 vmaTouchAllocation(
16408  VmaAllocator allocator,
16409  VmaAllocation allocation)
16410 {
16411  VMA_ASSERT(allocator && allocation);
16412 
16413  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16414 
16415 #if VMA_RECORDING_ENABLED
16416  if(allocator->GetRecorder() != VMA_NULL)
16417  {
16418  allocator->GetRecorder()->RecordTouchAllocation(
16419  allocator->GetCurrentFrameIndex(),
16420  allocation);
16421  }
16422 #endif
16423 
16424  return allocator->TouchAllocation(allocation);
16425 }
16426 
16428  VmaAllocator allocator,
16429  VmaAllocation allocation,
16430  void* pUserData)
16431 {
16432  VMA_ASSERT(allocator && allocation);
16433 
16434  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16435 
16436  allocation->SetUserData(allocator, pUserData);
16437 
16438 #if VMA_RECORDING_ENABLED
16439  if(allocator->GetRecorder() != VMA_NULL)
16440  {
16441  allocator->GetRecorder()->RecordSetAllocationUserData(
16442  allocator->GetCurrentFrameIndex(),
16443  allocation,
16444  pUserData);
16445  }
16446 #endif
16447 }
16448 
16450  VmaAllocator allocator,
16451  VmaAllocation* pAllocation)
16452 {
16453  VMA_ASSERT(allocator && pAllocation);
16454 
16455  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
16456 
16457  allocator->CreateLostAllocation(pAllocation);
16458 
16459 #if VMA_RECORDING_ENABLED
16460  if(allocator->GetRecorder() != VMA_NULL)
16461  {
16462  allocator->GetRecorder()->RecordCreateLostAllocation(
16463  allocator->GetCurrentFrameIndex(),
16464  *pAllocation);
16465  }
16466 #endif
16467 }
16468 
16469 VkResult vmaMapMemory(
16470  VmaAllocator allocator,
16471  VmaAllocation allocation,
16472  void** ppData)
16473 {
16474  VMA_ASSERT(allocator && allocation && ppData);
16475 
16476  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16477 
16478  VkResult res = allocator->Map(allocation, ppData);
16479 
16480 #if VMA_RECORDING_ENABLED
16481  if(allocator->GetRecorder() != VMA_NULL)
16482  {
16483  allocator->GetRecorder()->RecordMapMemory(
16484  allocator->GetCurrentFrameIndex(),
16485  allocation);
16486  }
16487 #endif
16488 
16489  return res;
16490 }
16491 
16492 void vmaUnmapMemory(
16493  VmaAllocator allocator,
16494  VmaAllocation allocation)
16495 {
16496  VMA_ASSERT(allocator && allocation);
16497 
16498  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16499 
16500 #if VMA_RECORDING_ENABLED
16501  if(allocator->GetRecorder() != VMA_NULL)
16502  {
16503  allocator->GetRecorder()->RecordUnmapMemory(
16504  allocator->GetCurrentFrameIndex(),
16505  allocation);
16506  }
16507 #endif
16508 
16509  allocator->Unmap(allocation);
16510 }
16511 
16512 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16513 {
16514  VMA_ASSERT(allocator && allocation);
16515 
16516  VMA_DEBUG_LOG("vmaFlushAllocation");
16517 
16518  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16519 
16520  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
16521 
16522 #if VMA_RECORDING_ENABLED
16523  if(allocator->GetRecorder() != VMA_NULL)
16524  {
16525  allocator->GetRecorder()->RecordFlushAllocation(
16526  allocator->GetCurrentFrameIndex(),
16527  allocation, offset, size);
16528  }
16529 #endif
16530 }
16531 
16532 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16533 {
16534  VMA_ASSERT(allocator && allocation);
16535 
16536  VMA_DEBUG_LOG("vmaInvalidateAllocation");
16537 
16538  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16539 
16540  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
16541 
16542 #if VMA_RECORDING_ENABLED
16543  if(allocator->GetRecorder() != VMA_NULL)
16544  {
16545  allocator->GetRecorder()->RecordInvalidateAllocation(
16546  allocator->GetCurrentFrameIndex(),
16547  allocation, offset, size);
16548  }
16549 #endif
16550 }
16551 
16552 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
16553 {
16554  VMA_ASSERT(allocator);
16555 
16556  VMA_DEBUG_LOG("vmaCheckCorruption");
16557 
16558  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16559 
16560  return allocator->CheckCorruption(memoryTypeBits);
16561 }
16562 
16563 VkResult vmaDefragment(
16564  VmaAllocator allocator,
16565  VmaAllocation* pAllocations,
16566  size_t allocationCount,
16567  VkBool32* pAllocationsChanged,
16568  const VmaDefragmentationInfo *pDefragmentationInfo,
16569  VmaDefragmentationStats* pDefragmentationStats)
16570 {
16571  // Deprecated interface, reimplemented using new one.
16572 
16573  VmaDefragmentationInfo2 info2 = {};
16574  info2.allocationCount = (uint32_t)allocationCount;
16575  info2.pAllocations = pAllocations;
16576  info2.pAllocationsChanged = pAllocationsChanged;
16577  if(pDefragmentationInfo != VMA_NULL)
16578  {
16579  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
16580  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
16581  }
16582  else
16583  {
16584  info2.maxCpuAllocationsToMove = UINT32_MAX;
16585  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
16586  }
16587  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
16588 
16590  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
16591  if(res == VK_NOT_READY)
16592  {
16593  res = vmaDefragmentationEnd( allocator, ctx);
16594  }
16595  return res;
16596 }
16597 
16598 VkResult vmaDefragmentationBegin(
16599  VmaAllocator allocator,
16600  const VmaDefragmentationInfo2* pInfo,
16601  VmaDefragmentationStats* pStats,
16602  VmaDefragmentationContext *pContext)
16603 {
16604  VMA_ASSERT(allocator && pInfo && pContext);
16605 
16606  // Degenerate case: Nothing to defragment.
16607  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
16608  {
16609  return VK_SUCCESS;
16610  }
16611 
16612  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
16613  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
16614  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
16615  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
16616 
16617  VMA_DEBUG_LOG("vmaDefragmentationBegin");
16618 
16619  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16620 
16621  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
16622 
16623 #if VMA_RECORDING_ENABLED
16624  if(allocator->GetRecorder() != VMA_NULL)
16625  {
16626  allocator->GetRecorder()->RecordDefragmentationBegin(
16627  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
16628  }
16629 #endif
16630 
16631  return res;
16632 }
16633 
16634 VkResult vmaDefragmentationEnd(
16635  VmaAllocator allocator,
16636  VmaDefragmentationContext context)
16637 {
16638  VMA_ASSERT(allocator);
16639 
16640  VMA_DEBUG_LOG("vmaDefragmentationEnd");
16641 
16642  if(context != VK_NULL_HANDLE)
16643  {
16644  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16645 
16646 #if VMA_RECORDING_ENABLED
16647  if(allocator->GetRecorder() != VMA_NULL)
16648  {
16649  allocator->GetRecorder()->RecordDefragmentationEnd(
16650  allocator->GetCurrentFrameIndex(), context);
16651  }
16652 #endif
16653 
16654  return allocator->DefragmentationEnd(context);
16655  }
16656  else
16657  {
16658  return VK_SUCCESS;
16659  }
16660 }
16661 
16662 VkResult vmaBindBufferMemory(
16663  VmaAllocator allocator,
16664  VmaAllocation allocation,
16665  VkBuffer buffer)
16666 {
16667  VMA_ASSERT(allocator && allocation && buffer);
16668 
16669  VMA_DEBUG_LOG("vmaBindBufferMemory");
16670 
16671  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16672 
16673  return allocator->BindBufferMemory(allocation, buffer);
16674 }
16675 
16676 VkResult vmaBindImageMemory(
16677  VmaAllocator allocator,
16678  VmaAllocation allocation,
16679  VkImage image)
16680 {
16681  VMA_ASSERT(allocator && allocation && image);
16682 
16683  VMA_DEBUG_LOG("vmaBindImageMemory");
16684 
16685  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16686 
16687  return allocator->BindImageMemory(allocation, image);
16688 }
16689 
16690 VkResult vmaCreateBuffer(
16691  VmaAllocator allocator,
16692  const VkBufferCreateInfo* pBufferCreateInfo,
16693  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16694  VkBuffer* pBuffer,
16695  VmaAllocation* pAllocation,
16696  VmaAllocationInfo* pAllocationInfo)
16697 {
16698  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16699 
16700  if(pBufferCreateInfo->size == 0)
16701  {
16702  return VK_ERROR_VALIDATION_FAILED_EXT;
16703  }
16704 
16705  VMA_DEBUG_LOG("vmaCreateBuffer");
16706 
16707  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16708 
16709  *pBuffer = VK_NULL_HANDLE;
16710  *pAllocation = VK_NULL_HANDLE;
16711 
16712  // 1. Create VkBuffer.
16713  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16714  allocator->m_hDevice,
16715  pBufferCreateInfo,
16716  allocator->GetAllocationCallbacks(),
16717  pBuffer);
16718  if(res >= 0)
16719  {
16720  // 2. vkGetBufferMemoryRequirements.
16721  VkMemoryRequirements vkMemReq = {};
16722  bool requiresDedicatedAllocation = false;
16723  bool prefersDedicatedAllocation = false;
16724  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16725  requiresDedicatedAllocation, prefersDedicatedAllocation);
16726 
16727  // Make sure alignment requirements for specific buffer usages reported
16728  // in Physical Device Properties are included in alignment reported by memory requirements.
16729  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16730  {
16731  VMA_ASSERT(vkMemReq.alignment %
16732  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16733  }
16734  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16735  {
16736  VMA_ASSERT(vkMemReq.alignment %
16737  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16738  }
16739  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16740  {
16741  VMA_ASSERT(vkMemReq.alignment %
16742  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16743  }
16744 
16745  // 3. Allocate memory using allocator.
16746  res = allocator->AllocateMemory(
16747  vkMemReq,
16748  requiresDedicatedAllocation,
16749  prefersDedicatedAllocation,
16750  *pBuffer, // dedicatedBuffer
16751  VK_NULL_HANDLE, // dedicatedImage
16752  *pAllocationCreateInfo,
16753  VMA_SUBALLOCATION_TYPE_BUFFER,
16754  1, // allocationCount
16755  pAllocation);
16756 
16757 #if VMA_RECORDING_ENABLED
16758  if(allocator->GetRecorder() != VMA_NULL)
16759  {
16760  allocator->GetRecorder()->RecordCreateBuffer(
16761  allocator->GetCurrentFrameIndex(),
16762  *pBufferCreateInfo,
16763  *pAllocationCreateInfo,
16764  *pAllocation);
16765  }
16766 #endif
16767 
16768  if(res >= 0)
16769  {
16770  // 3. Bind buffer with memory.
16771  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
16772  {
16773  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
16774  }
16775  if(res >= 0)
16776  {
16777  // All steps succeeded.
16778  #if VMA_STATS_STRING_ENABLED
16779  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16780  #endif
16781  if(pAllocationInfo != VMA_NULL)
16782  {
16783  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16784  }
16785 
16786  return VK_SUCCESS;
16787  }
16788  allocator->FreeMemory(
16789  1, // allocationCount
16790  pAllocation);
16791  *pAllocation = VK_NULL_HANDLE;
16792  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16793  *pBuffer = VK_NULL_HANDLE;
16794  return res;
16795  }
16796  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16797  *pBuffer = VK_NULL_HANDLE;
16798  return res;
16799  }
16800  return res;
16801 }
16802 
16803 void vmaDestroyBuffer(
16804  VmaAllocator allocator,
16805  VkBuffer buffer,
16806  VmaAllocation allocation)
16807 {
16808  VMA_ASSERT(allocator);
16809 
16810  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16811  {
16812  return;
16813  }
16814 
16815  VMA_DEBUG_LOG("vmaDestroyBuffer");
16816 
16817  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16818 
16819 #if VMA_RECORDING_ENABLED
16820  if(allocator->GetRecorder() != VMA_NULL)
16821  {
16822  allocator->GetRecorder()->RecordDestroyBuffer(
16823  allocator->GetCurrentFrameIndex(),
16824  allocation);
16825  }
16826 #endif
16827 
16828  if(buffer != VK_NULL_HANDLE)
16829  {
16830  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16831  }
16832 
16833  if(allocation != VK_NULL_HANDLE)
16834  {
16835  allocator->FreeMemory(
16836  1, // allocationCount
16837  &allocation);
16838  }
16839 }
16840 
16841 VkResult vmaCreateImage(
16842  VmaAllocator allocator,
16843  const VkImageCreateInfo* pImageCreateInfo,
16844  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16845  VkImage* pImage,
16846  VmaAllocation* pAllocation,
16847  VmaAllocationInfo* pAllocationInfo)
16848 {
16849  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16850 
16851  if(pImageCreateInfo->extent.width == 0 ||
16852  pImageCreateInfo->extent.height == 0 ||
16853  pImageCreateInfo->extent.depth == 0 ||
16854  pImageCreateInfo->mipLevels == 0 ||
16855  pImageCreateInfo->arrayLayers == 0)
16856  {
16857  return VK_ERROR_VALIDATION_FAILED_EXT;
16858  }
16859 
16860  VMA_DEBUG_LOG("vmaCreateImage");
16861 
16862  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16863 
16864  *pImage = VK_NULL_HANDLE;
16865  *pAllocation = VK_NULL_HANDLE;
16866 
16867  // 1. Create VkImage.
16868  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16869  allocator->m_hDevice,
16870  pImageCreateInfo,
16871  allocator->GetAllocationCallbacks(),
16872  pImage);
16873  if(res >= 0)
16874  {
16875  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16876  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16877  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16878 
16879  // 2. Allocate memory using allocator.
16880  VkMemoryRequirements vkMemReq = {};
16881  bool requiresDedicatedAllocation = false;
16882  bool prefersDedicatedAllocation = false;
16883  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16884  requiresDedicatedAllocation, prefersDedicatedAllocation);
16885 
16886  res = allocator->AllocateMemory(
16887  vkMemReq,
16888  requiresDedicatedAllocation,
16889  prefersDedicatedAllocation,
16890  VK_NULL_HANDLE, // dedicatedBuffer
16891  *pImage, // dedicatedImage
16892  *pAllocationCreateInfo,
16893  suballocType,
16894  1, // allocationCount
16895  pAllocation);
16896 
16897 #if VMA_RECORDING_ENABLED
16898  if(allocator->GetRecorder() != VMA_NULL)
16899  {
16900  allocator->GetRecorder()->RecordCreateImage(
16901  allocator->GetCurrentFrameIndex(),
16902  *pImageCreateInfo,
16903  *pAllocationCreateInfo,
16904  *pAllocation);
16905  }
16906 #endif
16907 
16908  if(res >= 0)
16909  {
16910  // 3. Bind image with memory.
16911  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
16912  {
16913  res = allocator->BindImageMemory(*pAllocation, *pImage);
16914  }
16915  if(res >= 0)
16916  {
16917  // All steps succeeded.
16918  #if VMA_STATS_STRING_ENABLED
16919  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
16920  #endif
16921  if(pAllocationInfo != VMA_NULL)
16922  {
16923  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16924  }
16925 
16926  return VK_SUCCESS;
16927  }
16928  allocator->FreeMemory(
16929  1, // allocationCount
16930  pAllocation);
16931  *pAllocation = VK_NULL_HANDLE;
16932  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16933  *pImage = VK_NULL_HANDLE;
16934  return res;
16935  }
16936  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16937  *pImage = VK_NULL_HANDLE;
16938  return res;
16939  }
16940  return res;
16941 }
16942 
16943 void vmaDestroyImage(
16944  VmaAllocator allocator,
16945  VkImage image,
16946  VmaAllocation allocation)
16947 {
16948  VMA_ASSERT(allocator);
16949 
16950  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16951  {
16952  return;
16953  }
16954 
16955  VMA_DEBUG_LOG("vmaDestroyImage");
16956 
16957  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16958 
16959 #if VMA_RECORDING_ENABLED
16960  if(allocator->GetRecorder() != VMA_NULL)
16961  {
16962  allocator->GetRecorder()->RecordDestroyImage(
16963  allocator->GetCurrentFrameIndex(),
16964  allocation);
16965  }
16966 #endif
16967 
16968  if(image != VK_NULL_HANDLE)
16969  {
16970  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
16971  }
16972  if(allocation != VK_NULL_HANDLE)
16973  {
16974  allocator->FreeMemory(
16975  1, // allocationCount
16976  &allocation);
16977  }
16978 }
16979 
16980 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1753
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2053
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1811
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:2864
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1785
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2384
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1765
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2015
Definition: vk_mem_alloc.h:2119
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:2817
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1757
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2484
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1808
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2900
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2273
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1652
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2365
Definition: vk_mem_alloc.h:2090
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:2820
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1746
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2172
Definition: vk_mem_alloc.h:2042
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1820
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2301
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
Definition: vk_mem_alloc.h:1874
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1805
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2046
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1946
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1762
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:2854
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1945
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2904
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1837
VmaStatInfo total
Definition: vk_mem_alloc.h:1955
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2912
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2156
Definition: vk_mem_alloc.h:2114
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2895
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1763
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1688
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1814
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2315
Definition: vk_mem_alloc.h:2309
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:1769
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1881
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2494
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1758
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1783
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2193
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2335
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2371
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1744
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2318
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2869
VmaMemoryUsage
Definition: vk_mem_alloc.h:1993
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:2829
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2890
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:2908
Definition: vk_mem_alloc.h:2032
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2180
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1761
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1951
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1694
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:2808
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
Definition: vk_mem_alloc.h:2806
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:2835
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1715
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1787
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1720
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2910
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2167
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2381
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1754
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1934
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:2330
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1707
Definition: vk_mem_alloc.h:2305
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2097
Represents Opaque object that represents started defragmentation process.
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1947
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1711
Definition: vk_mem_alloc.h:2130
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2321
Definition: vk_mem_alloc.h:2041
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1760
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2162
Definition: vk_mem_alloc.h:2153
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1937
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1756
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2343
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1823
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2374
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2151
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2859
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2186
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1862
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1953
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2077
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1946
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1767
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1793
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:2805
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:2883
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1709
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1766
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2357
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1759
Definition: vk_mem_alloc.h:2108
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1801
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2508
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:1817
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1946
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1943
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2362
Parameters for defragmentation.
Definition: vk_mem_alloc.h:2814
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
Definition: vk_mem_alloc.h:2123
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:2489
Definition: vk_mem_alloc.h:2137
Definition: vk_mem_alloc.h:2149
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2906
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1752
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1941
Definition: vk_mem_alloc.h:1998
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2311
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1790
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1939
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1764
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1768
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2064
Definition: vk_mem_alloc.h:2144
Definition: vk_mem_alloc.h:2025
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2503
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1742
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1755
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2290
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Tries to resize an allocation in place, if there is enough free memory after it.
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2470
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2134
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2255
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1947
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1777
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1954
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2368
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1947
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:2874
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2475
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:2838