Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1655 /*
1656 Define this macro to 0/1 to disable/enable support for recording functionality,
1657 available through VmaAllocatorCreateInfo::pRecordSettings.
1658 */
1659 #ifndef VMA_RECORDING_ENABLED
1660  #ifdef _WIN32
1661  #define VMA_RECORDING_ENABLED 1
1662  #else
1663  #define VMA_RECORDING_ENABLED 0
1664  #endif
1665 #endif
1666 
1667 #ifndef NOMINMAX
1668  #define NOMINMAX // For windows.h
1669 #endif
1670 
1671 #ifndef VULKAN_H_
1672  #include <vulkan/vulkan.h>
1673 #endif
1674 
1675 #if VMA_RECORDING_ENABLED
1676  #include <windows.h>
1677 #endif
1678 
1679 #if !defined(VMA_DEDICATED_ALLOCATION)
1680  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1681  #define VMA_DEDICATED_ALLOCATION 1
1682  #else
1683  #define VMA_DEDICATED_ALLOCATION 0
1684  #endif
1685 #endif
1686 
1696 VK_DEFINE_HANDLE(VmaAllocator)
1697 
1698 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1700  VmaAllocator allocator,
1701  uint32_t memoryType,
1702  VkDeviceMemory memory,
1703  VkDeviceSize size);
1705 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1706  VmaAllocator allocator,
1707  uint32_t memoryType,
1708  VkDeviceMemory memory,
1709  VkDeviceSize size);
1710 
1724 
1754 
1757 typedef VkFlags VmaAllocatorCreateFlags;
1758 
1763 typedef struct VmaVulkanFunctions {
1764  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1765  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1766  PFN_vkAllocateMemory vkAllocateMemory;
1767  PFN_vkFreeMemory vkFreeMemory;
1768  PFN_vkMapMemory vkMapMemory;
1769  PFN_vkUnmapMemory vkUnmapMemory;
1770  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1771  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1772  PFN_vkBindBufferMemory vkBindBufferMemory;
1773  PFN_vkBindImageMemory vkBindImageMemory;
1774  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1775  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1776  PFN_vkCreateBuffer vkCreateBuffer;
1777  PFN_vkDestroyBuffer vkDestroyBuffer;
1778  PFN_vkCreateImage vkCreateImage;
1779  PFN_vkDestroyImage vkDestroyImage;
1780  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1781 #if VMA_DEDICATED_ALLOCATION
1782  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1783  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1784 #endif
1786 
1788 typedef enum VmaRecordFlagBits {
1795 
1798 typedef VkFlags VmaRecordFlags;
1799 
1801 typedef struct VmaRecordSettings
1802 {
1812  const char* pFilePath;
1814 
1817 {
1821 
1822  VkPhysicalDevice physicalDevice;
1824 
1825  VkDevice device;
1827 
1830 
1831  const VkAllocationCallbacks* pAllocationCallbacks;
1833 
1873  const VkDeviceSize* pHeapSizeLimit;
1894 
1896 VkResult vmaCreateAllocator(
1897  const VmaAllocatorCreateInfo* pCreateInfo,
1898  VmaAllocator* pAllocator);
1899 
1901 void vmaDestroyAllocator(
1902  VmaAllocator allocator);
1903 
1909  VmaAllocator allocator,
1910  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1911 
1917  VmaAllocator allocator,
1918  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1919 
1927  VmaAllocator allocator,
1928  uint32_t memoryTypeIndex,
1929  VkMemoryPropertyFlags* pFlags);
1930 
1940  VmaAllocator allocator,
1941  uint32_t frameIndex);
1942 
1945 typedef struct VmaStatInfo
1946 {
1948  uint32_t blockCount;
1954  VkDeviceSize usedBytes;
1956  VkDeviceSize unusedBytes;
1959 } VmaStatInfo;
1960 
1962 typedef struct VmaStats
1963 {
1964  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1965  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1967 } VmaStats;
1968 
1970 void vmaCalculateStats(
1971  VmaAllocator allocator,
1972  VmaStats* pStats);
1973 
1974 #ifndef VMA_STATS_STRING_ENABLED
1975 #define VMA_STATS_STRING_ENABLED 1
1976 #endif
1977 
1978 #if VMA_STATS_STRING_ENABLED
1979 
1981 
1983 void vmaBuildStatsString(
1984  VmaAllocator allocator,
1985  char** ppStatsString,
1986  VkBool32 detailedMap);
1987 
1988 void vmaFreeStatsString(
1989  VmaAllocator allocator,
1990  char* pStatsString);
1991 
1992 #endif // #if VMA_STATS_STRING_ENABLED
1993 
2002 VK_DEFINE_HANDLE(VmaPool)
2003 
2004 typedef enum VmaMemoryUsage
2005 {
2054 } VmaMemoryUsage;
2055 
2065 
2126 
2142 
2152 
2159 
2163 
2165 {
2178  VkMemoryPropertyFlags requiredFlags;
2183  VkMemoryPropertyFlags preferredFlags;
2191  uint32_t memoryTypeBits;
2204  void* pUserData;
2206 
2223 VkResult vmaFindMemoryTypeIndex(
2224  VmaAllocator allocator,
2225  uint32_t memoryTypeBits,
2226  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2227  uint32_t* pMemoryTypeIndex);
2228 
2242  VmaAllocator allocator,
2243  const VkBufferCreateInfo* pBufferCreateInfo,
2244  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2245  uint32_t* pMemoryTypeIndex);
2246 
2260  VmaAllocator allocator,
2261  const VkImageCreateInfo* pImageCreateInfo,
2262  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2263  uint32_t* pMemoryTypeIndex);
2264 
2285 
2302 
2313 
2319 
2322 typedef VkFlags VmaPoolCreateFlags;
2323 
2326 typedef struct VmaPoolCreateInfo {
2341  VkDeviceSize blockSize;
2370 
2373 typedef struct VmaPoolStats {
2376  VkDeviceSize size;
2379  VkDeviceSize unusedSize;
2392  VkDeviceSize unusedRangeSizeMax;
2395  size_t blockCount;
2396 } VmaPoolStats;
2397 
2404 VkResult vmaCreatePool(
2405  VmaAllocator allocator,
2406  const VmaPoolCreateInfo* pCreateInfo,
2407  VmaPool* pPool);
2408 
2411 void vmaDestroyPool(
2412  VmaAllocator allocator,
2413  VmaPool pool);
2414 
2421 void vmaGetPoolStats(
2422  VmaAllocator allocator,
2423  VmaPool pool,
2424  VmaPoolStats* pPoolStats);
2425 
2433  VmaAllocator allocator,
2434  VmaPool pool,
2435  size_t* pLostAllocationCount);
2436 
2451 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2452 
2477 VK_DEFINE_HANDLE(VmaAllocation)
2478 
2479 
2481 typedef struct VmaAllocationInfo {
2486  uint32_t memoryType;
2495  VkDeviceMemory deviceMemory;
2500  VkDeviceSize offset;
2505  VkDeviceSize size;
2519  void* pUserData;
2521 
2532 VkResult vmaAllocateMemory(
2533  VmaAllocator allocator,
2534  const VkMemoryRequirements* pVkMemoryRequirements,
2535  const VmaAllocationCreateInfo* pCreateInfo,
2536  VmaAllocation* pAllocation,
2537  VmaAllocationInfo* pAllocationInfo);
2538 
2558 VkResult vmaAllocateMemoryPages(
2559  VmaAllocator allocator,
2560  const VkMemoryRequirements* pVkMemoryRequirements,
2561  const VmaAllocationCreateInfo* pCreateInfo,
2562  size_t allocationCount,
2563  VmaAllocation* pAllocations,
2564  VmaAllocationInfo* pAllocationInfo);
2565 
2573  VmaAllocator allocator,
2574  VkBuffer buffer,
2575  const VmaAllocationCreateInfo* pCreateInfo,
2576  VmaAllocation* pAllocation,
2577  VmaAllocationInfo* pAllocationInfo);
2578 
2580 VkResult vmaAllocateMemoryForImage(
2581  VmaAllocator allocator,
2582  VkImage image,
2583  const VmaAllocationCreateInfo* pCreateInfo,
2584  VmaAllocation* pAllocation,
2585  VmaAllocationInfo* pAllocationInfo);
2586 
2591 void vmaFreeMemory(
2592  VmaAllocator allocator,
2593  VmaAllocation allocation);
2594 
2605 void vmaFreeMemoryPages(
2606  VmaAllocator allocator,
2607  size_t allocationCount,
2608  VmaAllocation* pAllocations);
2609 
2630 VkResult vmaResizeAllocation(
2631  VmaAllocator allocator,
2632  VmaAllocation allocation,
2633  VkDeviceSize newSize);
2634 
2652  VmaAllocator allocator,
2653  VmaAllocation allocation,
2654  VmaAllocationInfo* pAllocationInfo);
2655 
2670 VkBool32 vmaTouchAllocation(
2671  VmaAllocator allocator,
2672  VmaAllocation allocation);
2673 
2688  VmaAllocator allocator,
2689  VmaAllocation allocation,
2690  void* pUserData);
2691 
2703  VmaAllocator allocator,
2704  VmaAllocation* pAllocation);
2705 
2740 VkResult vmaMapMemory(
2741  VmaAllocator allocator,
2742  VmaAllocation allocation,
2743  void** ppData);
2744 
2749 void vmaUnmapMemory(
2750  VmaAllocator allocator,
2751  VmaAllocation allocation);
2752 
2769 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2770 
2787 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2788 
2805 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2806 
2813 VK_DEFINE_HANDLE(VmaDefragmentationContext)
2814 
2815 typedef enum VmaDefragmentationFlagBits {
2819 typedef VkFlags VmaDefragmentationFlags;
2820 
2825 typedef struct VmaDefragmentationInfo2 {
2849  uint32_t poolCount;
2870  VkDeviceSize maxCpuBytesToMove;
2880  VkDeviceSize maxGpuBytesToMove;
2894  VkCommandBuffer commandBuffer;
2896 
2901 typedef struct VmaDefragmentationInfo {
2906  VkDeviceSize maxBytesToMove;
2913 
2915 typedef struct VmaDefragmentationStats {
2917  VkDeviceSize bytesMoved;
2919  VkDeviceSize bytesFreed;
2925 
2955 VkResult vmaDefragmentationBegin(
2956  VmaAllocator allocator,
2957  const VmaDefragmentationInfo2* pInfo,
2958  VmaDefragmentationStats* pStats,
2959  VmaDefragmentationContext *pContext);
2960 
2966 VkResult vmaDefragmentationEnd(
2967  VmaAllocator allocator,
2968  VmaDefragmentationContext context);
2969 
3010 VkResult vmaDefragment(
3011  VmaAllocator allocator,
3012  VmaAllocation* pAllocations,
3013  size_t allocationCount,
3014  VkBool32* pAllocationsChanged,
3015  const VmaDefragmentationInfo *pDefragmentationInfo,
3016  VmaDefragmentationStats* pDefragmentationStats);
3017 
3030 VkResult vmaBindBufferMemory(
3031  VmaAllocator allocator,
3032  VmaAllocation allocation,
3033  VkBuffer buffer);
3034 
3047 VkResult vmaBindImageMemory(
3048  VmaAllocator allocator,
3049  VmaAllocation allocation,
3050  VkImage image);
3051 
3078 VkResult vmaCreateBuffer(
3079  VmaAllocator allocator,
3080  const VkBufferCreateInfo* pBufferCreateInfo,
3081  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3082  VkBuffer* pBuffer,
3083  VmaAllocation* pAllocation,
3084  VmaAllocationInfo* pAllocationInfo);
3085 
3097 void vmaDestroyBuffer(
3098  VmaAllocator allocator,
3099  VkBuffer buffer,
3100  VmaAllocation allocation);
3101 
3103 VkResult vmaCreateImage(
3104  VmaAllocator allocator,
3105  const VkImageCreateInfo* pImageCreateInfo,
3106  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3107  VkImage* pImage,
3108  VmaAllocation* pAllocation,
3109  VmaAllocationInfo* pAllocationInfo);
3110 
3122 void vmaDestroyImage(
3123  VmaAllocator allocator,
3124  VkImage image,
3125  VmaAllocation allocation);
3126 
3127 #ifdef __cplusplus
3128 }
3129 #endif
3130 
3131 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3132 
3133 // For Visual Studio IntelliSense.
3134 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3135 #define VMA_IMPLEMENTATION
3136 #endif
3137 
3138 #ifdef VMA_IMPLEMENTATION
3139 #undef VMA_IMPLEMENTATION
3140 
3141 #include <cstdint>
3142 #include <cstdlib>
3143 #include <cstring>
3144 
3145 /*******************************************************************************
3146 CONFIGURATION SECTION
3147 
3148 Define some of these macros before each #include of this header or change them
3149 here if you need other then default behavior depending on your environment.
3150 */
3151 
3152 /*
3153 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3154 internally, like:
3155 
3156  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3157 
3158 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3159 VmaAllocatorCreateInfo::pVulkanFunctions.
3160 */
3161 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3162 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3163 #endif
3164 
3165 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3166 //#define VMA_USE_STL_CONTAINERS 1
3167 
3168 /* Set this macro to 1 to make the library including and using STL containers:
3169 std::pair, std::vector, std::list, std::unordered_map.
3170 
3171 Set it to 0 or undefined to make the library using its own implementation of
3172 the containers.
3173 */
3174 #if VMA_USE_STL_CONTAINERS
3175  #define VMA_USE_STL_VECTOR 1
3176  #define VMA_USE_STL_UNORDERED_MAP 1
3177  #define VMA_USE_STL_LIST 1
3178 #endif
3179 
3180 #ifndef VMA_USE_STL_SHARED_MUTEX
3181  // Compiler conforms to C++17.
3182  #if __cplusplus >= 201703L
3183  #define VMA_USE_STL_SHARED_MUTEX 1
3184  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3185  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3186  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3187  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3188  #define VMA_USE_STL_SHARED_MUTEX 1
3189  #else
3190  #define VMA_USE_STL_SHARED_MUTEX 0
3191  #endif
3192 #endif
3193 
3194 /*
3195 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
3196 Library has its own container implementation.
3197 */
3198 #if VMA_USE_STL_VECTOR
3199  #include <vector>
3200 #endif
3201 
3202 #if VMA_USE_STL_UNORDERED_MAP
3203  #include <unordered_map>
3204 #endif
3205 
3206 #if VMA_USE_STL_LIST
3207  #include <list>
3208 #endif
3209 
3210 /*
3211 Following headers are used in this CONFIGURATION section only, so feel free to
3212 remove them if not needed.
3213 */
3214 #include <cassert> // for assert
3215 #include <algorithm> // for min, max
3216 #include <mutex>
3217 
3218 #ifndef VMA_NULL
3219  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3220  #define VMA_NULL nullptr
3221 #endif
3222 
3223 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3224 #include <cstdlib>
3225 void *aligned_alloc(size_t alignment, size_t size)
3226 {
3227  // alignment must be >= sizeof(void*)
3228  if(alignment < sizeof(void*))
3229  {
3230  alignment = sizeof(void*);
3231  }
3232 
3233  return memalign(alignment, size);
3234 }
3235 #elif defined(__APPLE__) || defined(__ANDROID__)
3236 #include <cstdlib>
3237 void *aligned_alloc(size_t alignment, size_t size)
3238 {
3239  // alignment must be >= sizeof(void*)
3240  if(alignment < sizeof(void*))
3241  {
3242  alignment = sizeof(void*);
3243  }
3244 
3245  void *pointer;
3246  if(posix_memalign(&pointer, alignment, size) == 0)
3247  return pointer;
3248  return VMA_NULL;
3249 }
3250 #endif
3251 
3252 // If your compiler is not compatible with C++11 and definition of
3253 // aligned_alloc() function is missing, uncommeting following line may help:
3254 
3255 //#include <malloc.h>
3256 
3257 // Normal assert to check for programmer's errors, especially in Debug configuration.
3258 #ifndef VMA_ASSERT
3259  #ifdef _DEBUG
3260  #define VMA_ASSERT(expr) assert(expr)
3261  #else
3262  #define VMA_ASSERT(expr)
3263  #endif
3264 #endif
3265 
3266 // Assert that will be called very often, like inside data structures e.g. operator[].
3267 // Making it non-empty can make program slow.
3268 #ifndef VMA_HEAVY_ASSERT
3269  #ifdef _DEBUG
3270  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3271  #else
3272  #define VMA_HEAVY_ASSERT(expr)
3273  #endif
3274 #endif
3275 
3276 #ifndef VMA_ALIGN_OF
3277  #define VMA_ALIGN_OF(type) (__alignof(type))
3278 #endif
3279 
3280 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3281  #if defined(_WIN32)
3282  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3283  #else
3284  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3285  #endif
3286 #endif
3287 
3288 #ifndef VMA_SYSTEM_FREE
3289  #if defined(_WIN32)
3290  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3291  #else
3292  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3293  #endif
3294 #endif
3295 
3296 #ifndef VMA_MIN
3297  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3298 #endif
3299 
3300 #ifndef VMA_MAX
3301  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3302 #endif
3303 
3304 #ifndef VMA_SWAP
3305  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3306 #endif
3307 
3308 #ifndef VMA_SORT
3309  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3310 #endif
3311 
3312 #ifndef VMA_DEBUG_LOG
3313  #define VMA_DEBUG_LOG(format, ...)
3314  /*
3315  #define VMA_DEBUG_LOG(format, ...) do { \
3316  printf(format, __VA_ARGS__); \
3317  printf("\n"); \
3318  } while(false)
3319  */
3320 #endif
3321 
3322 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3323 #if VMA_STATS_STRING_ENABLED
3324  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3325  {
3326  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3327  }
3328  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3329  {
3330  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3331  }
3332  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3333  {
3334  snprintf(outStr, strLen, "%p", ptr);
3335  }
3336 #endif
3337 
3338 #ifndef VMA_MUTEX
3339  class VmaMutex
3340  {
3341  public:
3342  void Lock() { m_Mutex.lock(); }
3343  void Unlock() { m_Mutex.unlock(); }
3344  private:
3345  std::mutex m_Mutex;
3346  };
3347  #define VMA_MUTEX VmaMutex
3348 #endif
3349 
3350 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3351 #ifndef VMA_RW_MUTEX
3352  #if VMA_USE_STL_SHARED_MUTEX
3353  // Use std::shared_mutex from C++17.
3354  #include <shared_mutex>
3355  class VmaRWMutex
3356  {
3357  public:
3358  void LockRead() { m_Mutex.lock_shared(); }
3359  void UnlockRead() { m_Mutex.unlock_shared(); }
3360  void LockWrite() { m_Mutex.lock(); }
3361  void UnlockWrite() { m_Mutex.unlock(); }
3362  private:
3363  std::shared_mutex m_Mutex;
3364  };
3365  #define VMA_RW_MUTEX VmaRWMutex
3366  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
3367  // Use SRWLOCK from WinAPI.
3368  // Minimum supported client = Windows Vista, server = Windows Server 2008.
3369  class VmaRWMutex
3370  {
3371  public:
3372  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3373  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3374  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3375  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3376  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3377  private:
3378  SRWLOCK m_Lock;
3379  };
3380  #define VMA_RW_MUTEX VmaRWMutex
3381  #else
3382  // Less efficient fallback: Use normal mutex.
3383  class VmaRWMutex
3384  {
3385  public:
3386  void LockRead() { m_Mutex.Lock(); }
3387  void UnlockRead() { m_Mutex.Unlock(); }
3388  void LockWrite() { m_Mutex.Lock(); }
3389  void UnlockWrite() { m_Mutex.Unlock(); }
3390  private:
3391  VMA_MUTEX m_Mutex;
3392  };
3393  #define VMA_RW_MUTEX VmaRWMutex
3394  #endif // #if VMA_USE_STL_SHARED_MUTEX
3395 #endif // #ifndef VMA_RW_MUTEX
3396 
3397 /*
3398 If providing your own implementation, you need to implement a subset of std::atomic:
3399 
3400 - Constructor(uint32_t desired)
3401 - uint32_t load() const
3402 - void store(uint32_t desired)
3403 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3404 */
3405 #ifndef VMA_ATOMIC_UINT32
3406  #include <atomic>
3407  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3408 #endif
3409 
3410 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3411 
3415  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3416 #endif
3417 
3418 #ifndef VMA_DEBUG_ALIGNMENT
3419 
3423  #define VMA_DEBUG_ALIGNMENT (1)
3424 #endif
3425 
3426 #ifndef VMA_DEBUG_MARGIN
3427 
3431  #define VMA_DEBUG_MARGIN (0)
3432 #endif
3433 
3434 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3435 
3439  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3440 #endif
3441 
3442 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3443 
3448  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3449 #endif
3450 
3451 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3452 
3456  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3457 #endif
3458 
3459 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3460 
3464  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3465 #endif
3466 
3467 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3468  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3470 #endif
3471 
3472 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3473  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3475 #endif
3476 
3477 #ifndef VMA_CLASS_NO_COPY
3478  #define VMA_CLASS_NO_COPY(className) \
3479  private: \
3480  className(const className&) = delete; \
3481  className& operator=(const className&) = delete;
3482 #endif
3483 
3484 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3485 
3486 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3487 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3488 
3489 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3490 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3491 
3492 /*******************************************************************************
3493 END OF CONFIGURATION
3494 */
3495 
3496 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3497 
3498 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3499  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3500 
3501 // Returns number of bits set to 1 in (v).
3502 static inline uint32_t VmaCountBitsSet(uint32_t v)
3503 {
3504  uint32_t c = v - ((v >> 1) & 0x55555555);
3505  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3506  c = ((c >> 4) + c) & 0x0F0F0F0F;
3507  c = ((c >> 8) + c) & 0x00FF00FF;
3508  c = ((c >> 16) + c) & 0x0000FFFF;
3509  return c;
3510 }
3511 
3512 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3513 // Use types like uint32_t, uint64_t as T.
3514 template <typename T>
3515 static inline T VmaAlignUp(T val, T align)
3516 {
3517  return (val + align - 1) / align * align;
3518 }
3519 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3520 // Use types like uint32_t, uint64_t as T.
3521 template <typename T>
3522 static inline T VmaAlignDown(T val, T align)
3523 {
3524  return val / align * align;
3525 }
3526 
3527 // Division with mathematical rounding to nearest number.
3528 template <typename T>
3529 static inline T VmaRoundDiv(T x, T y)
3530 {
3531  return (x + (y / (T)2)) / y;
3532 }
3533 
3534 /*
3535 Returns true if given number is a power of two.
3536 T must be unsigned integer number or signed integer but always nonnegative.
3537 For 0 returns true.
3538 */
3539 template <typename T>
3540 inline bool VmaIsPow2(T x)
3541 {
3542  return (x & (x-1)) == 0;
3543 }
3544 
3545 // Returns smallest power of 2 greater or equal to v.
3546 static inline uint32_t VmaNextPow2(uint32_t v)
3547 {
3548  v--;
3549  v |= v >> 1;
3550  v |= v >> 2;
3551  v |= v >> 4;
3552  v |= v >> 8;
3553  v |= v >> 16;
3554  v++;
3555  return v;
3556 }
3557 static inline uint64_t VmaNextPow2(uint64_t v)
3558 {
3559  v--;
3560  v |= v >> 1;
3561  v |= v >> 2;
3562  v |= v >> 4;
3563  v |= v >> 8;
3564  v |= v >> 16;
3565  v |= v >> 32;
3566  v++;
3567  return v;
3568 }
3569 
3570 // Returns largest power of 2 less or equal to v.
3571 static inline uint32_t VmaPrevPow2(uint32_t v)
3572 {
3573  v |= v >> 1;
3574  v |= v >> 2;
3575  v |= v >> 4;
3576  v |= v >> 8;
3577  v |= v >> 16;
3578  v = v ^ (v >> 1);
3579  return v;
3580 }
3581 static inline uint64_t VmaPrevPow2(uint64_t v)
3582 {
3583  v |= v >> 1;
3584  v |= v >> 2;
3585  v |= v >> 4;
3586  v |= v >> 8;
3587  v |= v >> 16;
3588  v |= v >> 32;
3589  v = v ^ (v >> 1);
3590  return v;
3591 }
3592 
3593 static inline bool VmaStrIsEmpty(const char* pStr)
3594 {
3595  return pStr == VMA_NULL || *pStr == '\0';
3596 }
3597 
3598 #if VMA_STATS_STRING_ENABLED
3599 
3600 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3601 {
3602  switch(algorithm)
3603  {
3605  return "Linear";
3607  return "Buddy";
3608  case 0:
3609  return "Default";
3610  default:
3611  VMA_ASSERT(0);
3612  return "";
3613  }
3614 }
3615 
3616 #endif // #if VMA_STATS_STRING_ENABLED
3617 
3618 #ifndef VMA_SORT
3619 
3620 template<typename Iterator, typename Compare>
3621 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3622 {
3623  Iterator centerValue = end; --centerValue;
3624  Iterator insertIndex = beg;
3625  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3626  {
3627  if(cmp(*memTypeIndex, *centerValue))
3628  {
3629  if(insertIndex != memTypeIndex)
3630  {
3631  VMA_SWAP(*memTypeIndex, *insertIndex);
3632  }
3633  ++insertIndex;
3634  }
3635  }
3636  if(insertIndex != centerValue)
3637  {
3638  VMA_SWAP(*insertIndex, *centerValue);
3639  }
3640  return insertIndex;
3641 }
3642 
3643 template<typename Iterator, typename Compare>
3644 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3645 {
3646  if(beg < end)
3647  {
3648  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3649  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3650  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3651  }
3652 }
3653 
3654 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3655 
3656 #endif // #ifndef VMA_SORT
3657 
3658 /*
3659 Returns true if two memory blocks occupy overlapping pages.
3660 ResourceA must be in less memory offset than ResourceB.
3661 
3662 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3663 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3664 */
3665 static inline bool VmaBlocksOnSamePage(
3666  VkDeviceSize resourceAOffset,
3667  VkDeviceSize resourceASize,
3668  VkDeviceSize resourceBOffset,
3669  VkDeviceSize pageSize)
3670 {
3671  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3672  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3673  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3674  VkDeviceSize resourceBStart = resourceBOffset;
3675  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3676  return resourceAEndPage == resourceBStartPage;
3677 }
3678 
3679 enum VmaSuballocationType
3680 {
3681  VMA_SUBALLOCATION_TYPE_FREE = 0,
3682  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3683  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3684  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3685  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3686  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3687  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3688 };
3689 
3690 /*
3691 Returns true if given suballocation types could conflict and must respect
3692 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3693 or linear image and another one is optimal image. If type is unknown, behave
3694 conservatively.
3695 */
3696 static inline bool VmaIsBufferImageGranularityConflict(
3697  VmaSuballocationType suballocType1,
3698  VmaSuballocationType suballocType2)
3699 {
3700  if(suballocType1 > suballocType2)
3701  {
3702  VMA_SWAP(suballocType1, suballocType2);
3703  }
3704 
3705  switch(suballocType1)
3706  {
3707  case VMA_SUBALLOCATION_TYPE_FREE:
3708  return false;
3709  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3710  return true;
3711  case VMA_SUBALLOCATION_TYPE_BUFFER:
3712  return
3713  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3714  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3715  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3716  return
3717  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3718  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3719  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3720  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3721  return
3722  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3723  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3724  return false;
3725  default:
3726  VMA_ASSERT(0);
3727  return true;
3728  }
3729 }
3730 
3731 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3732 {
3733 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
3734  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3735  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3736  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3737  {
3738  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3739  }
3740 #else
3741  // no-op
3742 #endif
3743 }
3744 
3745 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3746 {
3747 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
3748  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3749  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3750  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3751  {
3752  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3753  {
3754  return false;
3755  }
3756  }
3757 #endif
3758  return true;
3759 }
3760 
3761 /*
3762 Fills structure with parameters of an example buffer to be used for transfers
3763 during GPU memory defragmentation.
3764 */
3765 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
3766 {
3767  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
3768  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
3769  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
3770  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
3771 }
3772 
3773 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3774 struct VmaMutexLock
3775 {
3776  VMA_CLASS_NO_COPY(VmaMutexLock)
3777 public:
3778  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
3779  m_pMutex(useMutex ? &mutex : VMA_NULL)
3780  { if(m_pMutex) { m_pMutex->Lock(); } }
3781  ~VmaMutexLock()
3782  { if(m_pMutex) { m_pMutex->Unlock(); } }
3783 private:
3784  VMA_MUTEX* m_pMutex;
3785 };
3786 
3787 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3788 struct VmaMutexLockRead
3789 {
3790  VMA_CLASS_NO_COPY(VmaMutexLockRead)
3791 public:
3792  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3793  m_pMutex(useMutex ? &mutex : VMA_NULL)
3794  { if(m_pMutex) { m_pMutex->LockRead(); } }
3795  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
3796 private:
3797  VMA_RW_MUTEX* m_pMutex;
3798 };
3799 
3800 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3801 struct VmaMutexLockWrite
3802 {
3803  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3804 public:
3805  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
3806  m_pMutex(useMutex ? &mutex : VMA_NULL)
3807  { if(m_pMutex) { m_pMutex->LockWrite(); } }
3808  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3809 private:
3810  VMA_RW_MUTEX* m_pMutex;
3811 };
3812 
3813 #if VMA_DEBUG_GLOBAL_MUTEX
3814  static VMA_MUTEX gDebugGlobalMutex;
3815  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3816 #else
3817  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3818 #endif
3819 
3820 // Minimum size of a free suballocation to register it in the free suballocation collection.
3821 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3822 
3823 /*
3824 Performs binary search and returns iterator to first element that is greater or
3825 equal to (key), according to comparison (cmp).
3826 
3827 Cmp should return true if first argument is less than second argument.
3828 
3829 Returned value is the found element, if present in the collection or place where
3830 new element with value (key) should be inserted.
3831 */
3832 template <typename CmpLess, typename IterT, typename KeyT>
3833 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
3834 {
3835  size_t down = 0, up = (end - beg);
3836  while(down < up)
3837  {
3838  const size_t mid = (down + up) / 2;
3839  if(cmp(*(beg+mid), key))
3840  {
3841  down = mid + 1;
3842  }
3843  else
3844  {
3845  up = mid;
3846  }
3847  }
3848  return beg + down;
3849 }
3850 
3851 template<typename CmpLess, typename IterT, typename KeyT>
3852 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
3853 {
3854  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3855  beg, end, value, cmp);
3856  if(it == end ||
3857  (!cmp(*it, value) && !cmp(value, *it)))
3858  {
3859  return it;
3860  }
3861  return end;
3862 }
3863 
3864 /*
3865 Returns true if all pointers in the array are not-null and unique.
3866 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3867 T must be pointer type, e.g. VmaAllocation, VmaPool.
3868 */
3869 template<typename T>
3870 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3871 {
3872  for(uint32_t i = 0; i < count; ++i)
3873  {
3874  const T iPtr = arr[i];
3875  if(iPtr == VMA_NULL)
3876  {
3877  return false;
3878  }
3879  for(uint32_t j = i + 1; j < count; ++j)
3880  {
3881  if(iPtr == arr[j])
3882  {
3883  return false;
3884  }
3885  }
3886  }
3887  return true;
3888 }
3889 
3891 // Memory allocation
3892 
3893 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3894 {
3895  if((pAllocationCallbacks != VMA_NULL) &&
3896  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3897  {
3898  return (*pAllocationCallbacks->pfnAllocation)(
3899  pAllocationCallbacks->pUserData,
3900  size,
3901  alignment,
3902  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3903  }
3904  else
3905  {
3906  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3907  }
3908 }
3909 
3910 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3911 {
3912  if((pAllocationCallbacks != VMA_NULL) &&
3913  (pAllocationCallbacks->pfnFree != VMA_NULL))
3914  {
3915  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3916  }
3917  else
3918  {
3919  VMA_SYSTEM_FREE(ptr);
3920  }
3921 }
3922 
3923 template<typename T>
3924 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3925 {
3926  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3927 }
3928 
3929 template<typename T>
3930 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3931 {
3932  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3933 }
3934 
3935 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3936 
3937 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3938 
3939 template<typename T>
3940 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3941 {
3942  ptr->~T();
3943  VmaFree(pAllocationCallbacks, ptr);
3944 }
3945 
3946 template<typename T>
3947 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3948 {
3949  if(ptr != VMA_NULL)
3950  {
3951  for(size_t i = count; i--; )
3952  {
3953  ptr[i].~T();
3954  }
3955  VmaFree(pAllocationCallbacks, ptr);
3956  }
3957 }
3958 
3959 // STL-compatible allocator.
3960 template<typename T>
3961 class VmaStlAllocator
3962 {
3963 public:
3964  const VkAllocationCallbacks* const m_pCallbacks;
3965  typedef T value_type;
3966 
3967  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3968  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3969 
3970  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3971  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3972 
3973  template<typename U>
3974  bool operator==(const VmaStlAllocator<U>& rhs) const
3975  {
3976  return m_pCallbacks == rhs.m_pCallbacks;
3977  }
3978  template<typename U>
3979  bool operator!=(const VmaStlAllocator<U>& rhs) const
3980  {
3981  return m_pCallbacks != rhs.m_pCallbacks;
3982  }
3983 
3984  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3985 };
3986 
3987 #if VMA_USE_STL_VECTOR
3988 
3989 #define VmaVector std::vector
3990 
3991 template<typename T, typename allocatorT>
3992 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3993 {
3994  vec.insert(vec.begin() + index, item);
3995 }
3996 
3997 template<typename T, typename allocatorT>
3998 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3999 {
4000  vec.erase(vec.begin() + index);
4001 }
4002 
4003 #else // #if VMA_USE_STL_VECTOR
4004 
4005 /* Class with interface compatible with subset of std::vector.
4006 T must be POD because constructors and destructors are not called and memcpy is
4007 used for these objects. */
4008 template<typename T, typename AllocatorT>
4009 class VmaVector
4010 {
4011 public:
4012  typedef T value_type;
4013 
4014  VmaVector(const AllocatorT& allocator) :
4015  m_Allocator(allocator),
4016  m_pArray(VMA_NULL),
4017  m_Count(0),
4018  m_Capacity(0)
4019  {
4020  }
4021 
4022  VmaVector(size_t count, const AllocatorT& allocator) :
4023  m_Allocator(allocator),
4024  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
4025  m_Count(count),
4026  m_Capacity(count)
4027  {
4028  }
4029 
4030  VmaVector(const VmaVector<T, AllocatorT>& src) :
4031  m_Allocator(src.m_Allocator),
4032  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
4033  m_Count(src.m_Count),
4034  m_Capacity(src.m_Count)
4035  {
4036  if(m_Count != 0)
4037  {
4038  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4039  }
4040  }
4041 
4042  ~VmaVector()
4043  {
4044  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4045  }
4046 
4047  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
4048  {
4049  if(&rhs != this)
4050  {
4051  resize(rhs.m_Count);
4052  if(m_Count != 0)
4053  {
4054  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4055  }
4056  }
4057  return *this;
4058  }
4059 
4060  bool empty() const { return m_Count == 0; }
4061  size_t size() const { return m_Count; }
4062  T* data() { return m_pArray; }
4063  const T* data() const { return m_pArray; }
4064 
4065  T& operator[](size_t index)
4066  {
4067  VMA_HEAVY_ASSERT(index < m_Count);
4068  return m_pArray[index];
4069  }
4070  const T& operator[](size_t index) const
4071  {
4072  VMA_HEAVY_ASSERT(index < m_Count);
4073  return m_pArray[index];
4074  }
4075 
4076  T& front()
4077  {
4078  VMA_HEAVY_ASSERT(m_Count > 0);
4079  return m_pArray[0];
4080  }
4081  const T& front() const
4082  {
4083  VMA_HEAVY_ASSERT(m_Count > 0);
4084  return m_pArray[0];
4085  }
4086  T& back()
4087  {
4088  VMA_HEAVY_ASSERT(m_Count > 0);
4089  return m_pArray[m_Count - 1];
4090  }
4091  const T& back() const
4092  {
4093  VMA_HEAVY_ASSERT(m_Count > 0);
4094  return m_pArray[m_Count - 1];
4095  }
4096 
4097  void reserve(size_t newCapacity, bool freeMemory = false)
4098  {
4099  newCapacity = VMA_MAX(newCapacity, m_Count);
4100 
4101  if((newCapacity < m_Capacity) && !freeMemory)
4102  {
4103  newCapacity = m_Capacity;
4104  }
4105 
4106  if(newCapacity != m_Capacity)
4107  {
4108  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4109  if(m_Count != 0)
4110  {
4111  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4112  }
4113  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4114  m_Capacity = newCapacity;
4115  m_pArray = newArray;
4116  }
4117  }
4118 
4119  void resize(size_t newCount, bool freeMemory = false)
4120  {
4121  size_t newCapacity = m_Capacity;
4122  if(newCount > m_Capacity)
4123  {
4124  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4125  }
4126  else if(freeMemory)
4127  {
4128  newCapacity = newCount;
4129  }
4130 
4131  if(newCapacity != m_Capacity)
4132  {
4133  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4134  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4135  if(elementsToCopy != 0)
4136  {
4137  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4138  }
4139  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4140  m_Capacity = newCapacity;
4141  m_pArray = newArray;
4142  }
4143 
4144  m_Count = newCount;
4145  }
4146 
4147  void clear(bool freeMemory = false)
4148  {
4149  resize(0, freeMemory);
4150  }
4151 
4152  void insert(size_t index, const T& src)
4153  {
4154  VMA_HEAVY_ASSERT(index <= m_Count);
4155  const size_t oldCount = size();
4156  resize(oldCount + 1);
4157  if(index < oldCount)
4158  {
4159  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4160  }
4161  m_pArray[index] = src;
4162  }
4163 
4164  void remove(size_t index)
4165  {
4166  VMA_HEAVY_ASSERT(index < m_Count);
4167  const size_t oldCount = size();
4168  if(index < oldCount - 1)
4169  {
4170  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4171  }
4172  resize(oldCount - 1);
4173  }
4174 
4175  void push_back(const T& src)
4176  {
4177  const size_t newIndex = size();
4178  resize(newIndex + 1);
4179  m_pArray[newIndex] = src;
4180  }
4181 
4182  void pop_back()
4183  {
4184  VMA_HEAVY_ASSERT(m_Count > 0);
4185  resize(size() - 1);
4186  }
4187 
4188  void push_front(const T& src)
4189  {
4190  insert(0, src);
4191  }
4192 
4193  void pop_front()
4194  {
4195  VMA_HEAVY_ASSERT(m_Count > 0);
4196  remove(0);
4197  }
4198 
4199  typedef T* iterator;
4200 
4201  iterator begin() { return m_pArray; }
4202  iterator end() { return m_pArray + m_Count; }
4203 
4204 private:
4205  AllocatorT m_Allocator;
4206  T* m_pArray;
4207  size_t m_Count;
4208  size_t m_Capacity;
4209 };
4210 
4211 template<typename T, typename allocatorT>
4212 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4213 {
4214  vec.insert(index, item);
4215 }
4216 
4217 template<typename T, typename allocatorT>
4218 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4219 {
4220  vec.remove(index);
4221 }
4222 
4223 #endif // #if VMA_USE_STL_VECTOR
4224 
4225 template<typename CmpLess, typename VectorT>
4226 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4227 {
4228  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4229  vector.data(),
4230  vector.data() + vector.size(),
4231  value,
4232  CmpLess()) - vector.data();
4233  VmaVectorInsert(vector, indexToInsert, value);
4234  return indexToInsert;
4235 }
4236 
4237 template<typename CmpLess, typename VectorT>
4238 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4239 {
4240  CmpLess comparator;
4241  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4242  vector.begin(),
4243  vector.end(),
4244  value,
4245  comparator);
4246  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4247  {
4248  size_t indexToRemove = it - vector.begin();
4249  VmaVectorRemove(vector, indexToRemove);
4250  return true;
4251  }
4252  return false;
4253 }
4254 
4256 // class VmaPoolAllocator
4257 
4258 /*
4259 Allocator for objects of type T using a list of arrays (pools) to speed up
4260 allocation. Number of elements that can be allocated is not bounded because
4261 allocator can create multiple blocks.
4262 */
4263 template<typename T>
4264 class VmaPoolAllocator
4265 {
4266  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4267 public:
4268  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4269  ~VmaPoolAllocator();
4270  void Clear();
4271  T* Alloc();
4272  void Free(T* ptr);
4273 
4274 private:
4275  union Item
4276  {
4277  uint32_t NextFreeIndex;
4278  T Value;
4279  };
4280 
4281  struct ItemBlock
4282  {
4283  Item* pItems;
4284  uint32_t Capacity;
4285  uint32_t FirstFreeIndex;
4286  };
4287 
4288  const VkAllocationCallbacks* m_pAllocationCallbacks;
4289  const uint32_t m_FirstBlockCapacity;
4290  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4291 
4292  ItemBlock& CreateNewBlock();
4293 };
4294 
4295 template<typename T>
4296 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
4297  m_pAllocationCallbacks(pAllocationCallbacks),
4298  m_FirstBlockCapacity(firstBlockCapacity),
4299  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4300 {
4301  VMA_ASSERT(m_FirstBlockCapacity > 1);
4302 }
4303 
4304 template<typename T>
4305 VmaPoolAllocator<T>::~VmaPoolAllocator()
4306 {
4307  Clear();
4308 }
4309 
4310 template<typename T>
4311 void VmaPoolAllocator<T>::Clear()
4312 {
4313  for(size_t i = m_ItemBlocks.size(); i--; )
4314  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4315  m_ItemBlocks.clear();
4316 }
4317 
4318 template<typename T>
4319 T* VmaPoolAllocator<T>::Alloc()
4320 {
4321  for(size_t i = m_ItemBlocks.size(); i--; )
4322  {
4323  ItemBlock& block = m_ItemBlocks[i];
4324  // This block has some free items: Use first one.
4325  if(block.FirstFreeIndex != UINT32_MAX)
4326  {
4327  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4328  block.FirstFreeIndex = pItem->NextFreeIndex;
4329  return &pItem->Value;
4330  }
4331  }
4332 
4333  // No block has free item: Create new one and use it.
4334  ItemBlock& newBlock = CreateNewBlock();
4335  Item* const pItem = &newBlock.pItems[0];
4336  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4337  return &pItem->Value;
4338 }
4339 
4340 template<typename T>
4341 void VmaPoolAllocator<T>::Free(T* ptr)
4342 {
4343  // Search all memory blocks to find ptr.
4344  for(size_t i = m_ItemBlocks.size(); i--; )
4345  {
4346  ItemBlock& block = m_ItemBlocks[i];
4347 
4348  // Casting to union.
4349  Item* pItemPtr;
4350  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4351 
4352  // Check if pItemPtr is in address range of this block.
4353  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4354  {
4355  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4356  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4357  block.FirstFreeIndex = index;
4358  return;
4359  }
4360  }
4361  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4362 }
4363 
4364 template<typename T>
4365 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4366 {
4367  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4368  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4369 
4370  const ItemBlock newBlock = {
4371  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4372  newBlockCapacity,
4373  0 };
4374 
4375  m_ItemBlocks.push_back(newBlock);
4376 
4377  // Setup singly-linked list of all free items in this block.
4378  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4379  newBlock.pItems[i].NextFreeIndex = i + 1;
4380  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4381  return m_ItemBlocks.back();
4382 }
4383 
4385 // class VmaRawList, VmaList
4386 
4387 #if VMA_USE_STL_LIST
4388 
4389 #define VmaList std::list
4390 
4391 #else // #if VMA_USE_STL_LIST
4392 
4393 template<typename T>
4394 struct VmaListItem
4395 {
4396  VmaListItem* pPrev;
4397  VmaListItem* pNext;
4398  T Value;
4399 };
4400 
4401 // Doubly linked list.
4402 template<typename T>
4403 class VmaRawList
4404 {
4405  VMA_CLASS_NO_COPY(VmaRawList)
4406 public:
4407  typedef VmaListItem<T> ItemType;
4408 
4409  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4410  ~VmaRawList();
4411  void Clear();
4412 
4413  size_t GetCount() const { return m_Count; }
4414  bool IsEmpty() const { return m_Count == 0; }
4415 
4416  ItemType* Front() { return m_pFront; }
4417  const ItemType* Front() const { return m_pFront; }
4418  ItemType* Back() { return m_pBack; }
4419  const ItemType* Back() const { return m_pBack; }
4420 
4421  ItemType* PushBack();
4422  ItemType* PushFront();
4423  ItemType* PushBack(const T& value);
4424  ItemType* PushFront(const T& value);
4425  void PopBack();
4426  void PopFront();
4427 
4428  // Item can be null - it means PushBack.
4429  ItemType* InsertBefore(ItemType* pItem);
4430  // Item can be null - it means PushFront.
4431  ItemType* InsertAfter(ItemType* pItem);
4432 
4433  ItemType* InsertBefore(ItemType* pItem, const T& value);
4434  ItemType* InsertAfter(ItemType* pItem, const T& value);
4435 
4436  void Remove(ItemType* pItem);
4437 
4438 private:
4439  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4440  VmaPoolAllocator<ItemType> m_ItemAllocator;
4441  ItemType* m_pFront;
4442  ItemType* m_pBack;
4443  size_t m_Count;
4444 };
4445 
4446 template<typename T>
4447 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4448  m_pAllocationCallbacks(pAllocationCallbacks),
4449  m_ItemAllocator(pAllocationCallbacks, 128),
4450  m_pFront(VMA_NULL),
4451  m_pBack(VMA_NULL),
4452  m_Count(0)
4453 {
4454 }
4455 
4456 template<typename T>
4457 VmaRawList<T>::~VmaRawList()
4458 {
4459  // Intentionally not calling Clear, because that would be unnecessary
4460  // computations to return all items to m_ItemAllocator as free.
4461 }
4462 
4463 template<typename T>
4464 void VmaRawList<T>::Clear()
4465 {
4466  if(IsEmpty() == false)
4467  {
4468  ItemType* pItem = m_pBack;
4469  while(pItem != VMA_NULL)
4470  {
4471  ItemType* const pPrevItem = pItem->pPrev;
4472  m_ItemAllocator.Free(pItem);
4473  pItem = pPrevItem;
4474  }
4475  m_pFront = VMA_NULL;
4476  m_pBack = VMA_NULL;
4477  m_Count = 0;
4478  }
4479 }
4480 
4481 template<typename T>
4482 VmaListItem<T>* VmaRawList<T>::PushBack()
4483 {
4484  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4485  pNewItem->pNext = VMA_NULL;
4486  if(IsEmpty())
4487  {
4488  pNewItem->pPrev = VMA_NULL;
4489  m_pFront = pNewItem;
4490  m_pBack = pNewItem;
4491  m_Count = 1;
4492  }
4493  else
4494  {
4495  pNewItem->pPrev = m_pBack;
4496  m_pBack->pNext = pNewItem;
4497  m_pBack = pNewItem;
4498  ++m_Count;
4499  }
4500  return pNewItem;
4501 }
4502 
4503 template<typename T>
4504 VmaListItem<T>* VmaRawList<T>::PushFront()
4505 {
4506  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4507  pNewItem->pPrev = VMA_NULL;
4508  if(IsEmpty())
4509  {
4510  pNewItem->pNext = VMA_NULL;
4511  m_pFront = pNewItem;
4512  m_pBack = pNewItem;
4513  m_Count = 1;
4514  }
4515  else
4516  {
4517  pNewItem->pNext = m_pFront;
4518  m_pFront->pPrev = pNewItem;
4519  m_pFront = pNewItem;
4520  ++m_Count;
4521  }
4522  return pNewItem;
4523 }
4524 
4525 template<typename T>
4526 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4527 {
4528  ItemType* const pNewItem = PushBack();
4529  pNewItem->Value = value;
4530  return pNewItem;
4531 }
4532 
4533 template<typename T>
4534 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4535 {
4536  ItemType* const pNewItem = PushFront();
4537  pNewItem->Value = value;
4538  return pNewItem;
4539 }
4540 
4541 template<typename T>
4542 void VmaRawList<T>::PopBack()
4543 {
4544  VMA_HEAVY_ASSERT(m_Count > 0);
4545  ItemType* const pBackItem = m_pBack;
4546  ItemType* const pPrevItem = pBackItem->pPrev;
4547  if(pPrevItem != VMA_NULL)
4548  {
4549  pPrevItem->pNext = VMA_NULL;
4550  }
4551  m_pBack = pPrevItem;
4552  m_ItemAllocator.Free(pBackItem);
4553  --m_Count;
4554 }
4555 
4556 template<typename T>
4557 void VmaRawList<T>::PopFront()
4558 {
4559  VMA_HEAVY_ASSERT(m_Count > 0);
4560  ItemType* const pFrontItem = m_pFront;
4561  ItemType* const pNextItem = pFrontItem->pNext;
4562  if(pNextItem != VMA_NULL)
4563  {
4564  pNextItem->pPrev = VMA_NULL;
4565  }
4566  m_pFront = pNextItem;
4567  m_ItemAllocator.Free(pFrontItem);
4568  --m_Count;
4569 }
4570 
4571 template<typename T>
4572 void VmaRawList<T>::Remove(ItemType* pItem)
4573 {
4574  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4575  VMA_HEAVY_ASSERT(m_Count > 0);
4576 
4577  if(pItem->pPrev != VMA_NULL)
4578  {
4579  pItem->pPrev->pNext = pItem->pNext;
4580  }
4581  else
4582  {
4583  VMA_HEAVY_ASSERT(m_pFront == pItem);
4584  m_pFront = pItem->pNext;
4585  }
4586 
4587  if(pItem->pNext != VMA_NULL)
4588  {
4589  pItem->pNext->pPrev = pItem->pPrev;
4590  }
4591  else
4592  {
4593  VMA_HEAVY_ASSERT(m_pBack == pItem);
4594  m_pBack = pItem->pPrev;
4595  }
4596 
4597  m_ItemAllocator.Free(pItem);
4598  --m_Count;
4599 }
4600 
4601 template<typename T>
4602 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4603 {
4604  if(pItem != VMA_NULL)
4605  {
4606  ItemType* const prevItem = pItem->pPrev;
4607  ItemType* const newItem = m_ItemAllocator.Alloc();
4608  newItem->pPrev = prevItem;
4609  newItem->pNext = pItem;
4610  pItem->pPrev = newItem;
4611  if(prevItem != VMA_NULL)
4612  {
4613  prevItem->pNext = newItem;
4614  }
4615  else
4616  {
4617  VMA_HEAVY_ASSERT(m_pFront == pItem);
4618  m_pFront = newItem;
4619  }
4620  ++m_Count;
4621  return newItem;
4622  }
4623  else
4624  return PushBack();
4625 }
4626 
4627 template<typename T>
4628 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4629 {
4630  if(pItem != VMA_NULL)
4631  {
4632  ItemType* const nextItem = pItem->pNext;
4633  ItemType* const newItem = m_ItemAllocator.Alloc();
4634  newItem->pNext = nextItem;
4635  newItem->pPrev = pItem;
4636  pItem->pNext = newItem;
4637  if(nextItem != VMA_NULL)
4638  {
4639  nextItem->pPrev = newItem;
4640  }
4641  else
4642  {
4643  VMA_HEAVY_ASSERT(m_pBack == pItem);
4644  m_pBack = newItem;
4645  }
4646  ++m_Count;
4647  return newItem;
4648  }
4649  else
4650  return PushFront();
4651 }
4652 
4653 template<typename T>
4654 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4655 {
4656  ItemType* const newItem = InsertBefore(pItem);
4657  newItem->Value = value;
4658  return newItem;
4659 }
4660 
4661 template<typename T>
4662 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4663 {
4664  ItemType* const newItem = InsertAfter(pItem);
4665  newItem->Value = value;
4666  return newItem;
4667 }
4668 
4669 template<typename T, typename AllocatorT>
4670 class VmaList
4671 {
4672  VMA_CLASS_NO_COPY(VmaList)
4673 public:
4674  class iterator
4675  {
4676  public:
4677  iterator() :
4678  m_pList(VMA_NULL),
4679  m_pItem(VMA_NULL)
4680  {
4681  }
4682 
4683  T& operator*() const
4684  {
4685  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4686  return m_pItem->Value;
4687  }
4688  T* operator->() const
4689  {
4690  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4691  return &m_pItem->Value;
4692  }
4693 
4694  iterator& operator++()
4695  {
4696  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4697  m_pItem = m_pItem->pNext;
4698  return *this;
4699  }
4700  iterator& operator--()
4701  {
4702  if(m_pItem != VMA_NULL)
4703  {
4704  m_pItem = m_pItem->pPrev;
4705  }
4706  else
4707  {
4708  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4709  m_pItem = m_pList->Back();
4710  }
4711  return *this;
4712  }
4713 
4714  iterator operator++(int)
4715  {
4716  iterator result = *this;
4717  ++*this;
4718  return result;
4719  }
4720  iterator operator--(int)
4721  {
4722  iterator result = *this;
4723  --*this;
4724  return result;
4725  }
4726 
4727  bool operator==(const iterator& rhs) const
4728  {
4729  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4730  return m_pItem == rhs.m_pItem;
4731  }
4732  bool operator!=(const iterator& rhs) const
4733  {
4734  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4735  return m_pItem != rhs.m_pItem;
4736  }
4737 
4738  private:
4739  VmaRawList<T>* m_pList;
4740  VmaListItem<T>* m_pItem;
4741 
4742  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4743  m_pList(pList),
4744  m_pItem(pItem)
4745  {
4746  }
4747 
4748  friend class VmaList<T, AllocatorT>;
4749  };
4750 
4751  class const_iterator
4752  {
4753  public:
4754  const_iterator() :
4755  m_pList(VMA_NULL),
4756  m_pItem(VMA_NULL)
4757  {
4758  }
4759 
4760  const_iterator(const iterator& src) :
4761  m_pList(src.m_pList),
4762  m_pItem(src.m_pItem)
4763  {
4764  }
4765 
4766  const T& operator*() const
4767  {
4768  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4769  return m_pItem->Value;
4770  }
4771  const T* operator->() const
4772  {
4773  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4774  return &m_pItem->Value;
4775  }
4776 
4777  const_iterator& operator++()
4778  {
4779  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4780  m_pItem = m_pItem->pNext;
4781  return *this;
4782  }
4783  const_iterator& operator--()
4784  {
4785  if(m_pItem != VMA_NULL)
4786  {
4787  m_pItem = m_pItem->pPrev;
4788  }
4789  else
4790  {
4791  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4792  m_pItem = m_pList->Back();
4793  }
4794  return *this;
4795  }
4796 
4797  const_iterator operator++(int)
4798  {
4799  const_iterator result = *this;
4800  ++*this;
4801  return result;
4802  }
4803  const_iterator operator--(int)
4804  {
4805  const_iterator result = *this;
4806  --*this;
4807  return result;
4808  }
4809 
4810  bool operator==(const const_iterator& rhs) const
4811  {
4812  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4813  return m_pItem == rhs.m_pItem;
4814  }
4815  bool operator!=(const const_iterator& rhs) const
4816  {
4817  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4818  return m_pItem != rhs.m_pItem;
4819  }
4820 
4821  private:
4822  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4823  m_pList(pList),
4824  m_pItem(pItem)
4825  {
4826  }
4827 
4828  const VmaRawList<T>* m_pList;
4829  const VmaListItem<T>* m_pItem;
4830 
4831  friend class VmaList<T, AllocatorT>;
4832  };
4833 
4834  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4835 
4836  bool empty() const { return m_RawList.IsEmpty(); }
4837  size_t size() const { return m_RawList.GetCount(); }
4838 
4839  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4840  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4841 
4842  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4843  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4844 
4845  void clear() { m_RawList.Clear(); }
4846  void push_back(const T& value) { m_RawList.PushBack(value); }
4847  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4848  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4849 
4850 private:
4851  VmaRawList<T> m_RawList;
4852 };
4853 
4854 #endif // #if VMA_USE_STL_LIST
4855 
4857 // class VmaMap
4858 
4859 // Unused in this version.
4860 #if 0
4861 
4862 #if VMA_USE_STL_UNORDERED_MAP
4863 
4864 #define VmaPair std::pair
4865 
4866 #define VMA_MAP_TYPE(KeyT, ValueT) \
4867  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4868 
4869 #else // #if VMA_USE_STL_UNORDERED_MAP
4870 
4871 template<typename T1, typename T2>
4872 struct VmaPair
4873 {
4874  T1 first;
4875  T2 second;
4876 
4877  VmaPair() : first(), second() { }
4878  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4879 };
4880 
4881 /* Class compatible with subset of interface of std::unordered_map.
4882 KeyT, ValueT must be POD because they will be stored in VmaVector.
4883 */
4884 template<typename KeyT, typename ValueT>
4885 class VmaMap
4886 {
4887 public:
4888  typedef VmaPair<KeyT, ValueT> PairType;
4889  typedef PairType* iterator;
4890 
4891  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4892 
4893  iterator begin() { return m_Vector.begin(); }
4894  iterator end() { return m_Vector.end(); }
4895 
4896  void insert(const PairType& pair);
4897  iterator find(const KeyT& key);
4898  void erase(iterator it);
4899 
4900 private:
4901  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4902 };
4903 
4904 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4905 
4906 template<typename FirstT, typename SecondT>
4907 struct VmaPairFirstLess
4908 {
4909  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4910  {
4911  return lhs.first < rhs.first;
4912  }
4913  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4914  {
4915  return lhs.first < rhsFirst;
4916  }
4917 };
4918 
4919 template<typename KeyT, typename ValueT>
4920 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4921 {
4922  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4923  m_Vector.data(),
4924  m_Vector.data() + m_Vector.size(),
4925  pair,
4926  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4927  VmaVectorInsert(m_Vector, indexToInsert, pair);
4928 }
4929 
4930 template<typename KeyT, typename ValueT>
4931 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4932 {
4933  PairType* it = VmaBinaryFindFirstNotLess(
4934  m_Vector.data(),
4935  m_Vector.data() + m_Vector.size(),
4936  key,
4937  VmaPairFirstLess<KeyT, ValueT>());
4938  if((it != m_Vector.end()) && (it->first == key))
4939  {
4940  return it;
4941  }
4942  else
4943  {
4944  return m_Vector.end();
4945  }
4946 }
4947 
4948 template<typename KeyT, typename ValueT>
4949 void VmaMap<KeyT, ValueT>::erase(iterator it)
4950 {
4951  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4952 }
4953 
4954 #endif // #if VMA_USE_STL_UNORDERED_MAP
4955 
4956 #endif // #if 0
4957 
4959 
4960 class VmaDeviceMemoryBlock;
4961 
4962 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4963 
4964 struct VmaAllocation_T
4965 {
4966 private:
4967  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4968 
4969  enum FLAGS
4970  {
4971  FLAG_USER_DATA_STRING = 0x01,
4972  };
4973 
4974 public:
4975  enum ALLOCATION_TYPE
4976  {
4977  ALLOCATION_TYPE_NONE,
4978  ALLOCATION_TYPE_BLOCK,
4979  ALLOCATION_TYPE_DEDICATED,
4980  };
4981 
4982  /*
4983  This struct cannot have constructor or destructor. It must be POD because it is
4984  allocated using VmaPoolAllocator.
4985  */
4986 
4987  void Ctor(uint32_t currentFrameIndex, bool userDataString)
4988  {
4989  m_Alignment = 1;
4990  m_Size = 0;
4991  m_pUserData = VMA_NULL;
4992  m_LastUseFrameIndex = currentFrameIndex;
4993  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
4994  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
4995  m_MapCount = 0;
4996  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
4997 
4998 #if VMA_STATS_STRING_ENABLED
4999  m_CreationFrameIndex = currentFrameIndex;
5000  m_BufferImageUsage = 0;
5001 #endif
5002  }
5003 
5004  void Dtor()
5005  {
5006  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
5007 
5008  // Check if owned string was freed.
5009  VMA_ASSERT(m_pUserData == VMA_NULL);
5010  }
5011 
5012  void InitBlockAllocation(
5013  VmaDeviceMemoryBlock* block,
5014  VkDeviceSize offset,
5015  VkDeviceSize alignment,
5016  VkDeviceSize size,
5017  VmaSuballocationType suballocationType,
5018  bool mapped,
5019  bool canBecomeLost)
5020  {
5021  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5022  VMA_ASSERT(block != VMA_NULL);
5023  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5024  m_Alignment = alignment;
5025  m_Size = size;
5026  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5027  m_SuballocationType = (uint8_t)suballocationType;
5028  m_BlockAllocation.m_Block = block;
5029  m_BlockAllocation.m_Offset = offset;
5030  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
5031  }
5032 
5033  void InitLost()
5034  {
5035  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5036  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
5037  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5038  m_BlockAllocation.m_Block = VMA_NULL;
5039  m_BlockAllocation.m_Offset = 0;
5040  m_BlockAllocation.m_CanBecomeLost = true;
5041  }
5042 
5043  void ChangeBlockAllocation(
5044  VmaAllocator hAllocator,
5045  VmaDeviceMemoryBlock* block,
5046  VkDeviceSize offset);
5047 
5048  void ChangeSize(VkDeviceSize newSize);
5049  void ChangeOffset(VkDeviceSize newOffset);
5050 
5051  // pMappedData not null means allocation is created with MAPPED flag.
5052  void InitDedicatedAllocation(
5053  uint32_t memoryTypeIndex,
5054  VkDeviceMemory hMemory,
5055  VmaSuballocationType suballocationType,
5056  void* pMappedData,
5057  VkDeviceSize size)
5058  {
5059  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5060  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
5061  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
5062  m_Alignment = 0;
5063  m_Size = size;
5064  m_SuballocationType = (uint8_t)suballocationType;
5065  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5066  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
5067  m_DedicatedAllocation.m_hMemory = hMemory;
5068  m_DedicatedAllocation.m_pMappedData = pMappedData;
5069  }
5070 
5071  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5072  VkDeviceSize GetAlignment() const { return m_Alignment; }
5073  VkDeviceSize GetSize() const { return m_Size; }
5074  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5075  void* GetUserData() const { return m_pUserData; }
5076  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5077  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5078 
5079  VmaDeviceMemoryBlock* GetBlock() const
5080  {
5081  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5082  return m_BlockAllocation.m_Block;
5083  }
5084  VkDeviceSize GetOffset() const;
5085  VkDeviceMemory GetMemory() const;
5086  uint32_t GetMemoryTypeIndex() const;
5087  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5088  void* GetMappedData() const;
5089  bool CanBecomeLost() const;
5090 
5091  uint32_t GetLastUseFrameIndex() const
5092  {
5093  return m_LastUseFrameIndex.load();
5094  }
5095  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5096  {
5097  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5098  }
5099  /*
5100  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5101  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5102  - Else, returns false.
5103 
5104  If hAllocation is already lost, assert - you should not call it then.
5105  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5106  */
5107  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5108 
5109  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5110  {
5111  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5112  outInfo.blockCount = 1;
5113  outInfo.allocationCount = 1;
5114  outInfo.unusedRangeCount = 0;
5115  outInfo.usedBytes = m_Size;
5116  outInfo.unusedBytes = 0;
5117  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5118  outInfo.unusedRangeSizeMin = UINT64_MAX;
5119  outInfo.unusedRangeSizeMax = 0;
5120  }
5121 
5122  void BlockAllocMap();
5123  void BlockAllocUnmap();
5124  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5125  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5126 
5127 #if VMA_STATS_STRING_ENABLED
5128  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5129  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5130 
5131  void InitBufferImageUsage(uint32_t bufferImageUsage)
5132  {
5133  VMA_ASSERT(m_BufferImageUsage == 0);
5134  m_BufferImageUsage = bufferImageUsage;
5135  }
5136 
5137  void PrintParameters(class VmaJsonWriter& json) const;
5138 #endif
5139 
5140 private:
5141  VkDeviceSize m_Alignment;
5142  VkDeviceSize m_Size;
5143  void* m_pUserData;
5144  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5145  uint8_t m_Type; // ALLOCATION_TYPE
5146  uint8_t m_SuballocationType; // VmaSuballocationType
5147  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5148  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5149  uint8_t m_MapCount;
5150  uint8_t m_Flags; // enum FLAGS
5151 
5152  // Allocation out of VmaDeviceMemoryBlock.
5153  struct BlockAllocation
5154  {
5155  VmaDeviceMemoryBlock* m_Block;
5156  VkDeviceSize m_Offset;
5157  bool m_CanBecomeLost;
5158  };
5159 
5160  // Allocation for an object that has its own private VkDeviceMemory.
5161  struct DedicatedAllocation
5162  {
5163  uint32_t m_MemoryTypeIndex;
5164  VkDeviceMemory m_hMemory;
5165  void* m_pMappedData; // Not null means memory is mapped.
5166  };
5167 
5168  union
5169  {
5170  // Allocation out of VmaDeviceMemoryBlock.
5171  BlockAllocation m_BlockAllocation;
5172  // Allocation for an object that has its own private VkDeviceMemory.
5173  DedicatedAllocation m_DedicatedAllocation;
5174  };
5175 
5176 #if VMA_STATS_STRING_ENABLED
5177  uint32_t m_CreationFrameIndex;
5178  uint32_t m_BufferImageUsage; // 0 if unknown.
5179 #endif
5180 
5181  void FreeUserDataString(VmaAllocator hAllocator);
5182 };
5183 
5184 /*
5185 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5186 allocated memory block or free.
5187 */
5188 struct VmaSuballocation
5189 {
5190  VkDeviceSize offset;
5191  VkDeviceSize size;
5192  VmaAllocation hAllocation;
5193  VmaSuballocationType type;
5194 };
5195 
5196 // Comparator for offsets.
5197 struct VmaSuballocationOffsetLess
5198 {
5199  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5200  {
5201  return lhs.offset < rhs.offset;
5202  }
5203 };
5204 struct VmaSuballocationOffsetGreater
5205 {
5206  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5207  {
5208  return lhs.offset > rhs.offset;
5209  }
5210 };
5211 
5212 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5213 
5214 // Cost of one additional allocation lost, as equivalent in bytes.
5215 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5216 
5217 enum class VmaAllocationRequestType
5218 {
5219  Normal,
5220  // Used by "Linear" algorithm.
5221  UpperAddress,
5222  EndOf1st,
5223  EndOf2nd,
5224 };
5225 
5226 /*
5227 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5228 
5229 If canMakeOtherLost was false:
5230 - item points to a FREE suballocation.
5231 - itemsToMakeLostCount is 0.
5232 
5233 If canMakeOtherLost was true:
5234 - item points to first of sequence of suballocations, which are either FREE,
5235  or point to VmaAllocations that can become lost.
5236 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5237  the requested allocation to succeed.
5238 */
5239 struct VmaAllocationRequest
5240 {
5241  VkDeviceSize offset;
5242  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5243  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5244  VmaSuballocationList::iterator item;
5245  size_t itemsToMakeLostCount;
5246  void* customData;
5247  VmaAllocationRequestType type;
5248 
5249  VkDeviceSize CalcCost() const
5250  {
5251  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5252  }
5253 };
5254 
5255 /*
5256 Data structure used for bookkeeping of allocations and unused ranges of memory
5257 in a single VkDeviceMemory block.
5258 */
5259 class VmaBlockMetadata
5260 {
5261 public:
5262  VmaBlockMetadata(VmaAllocator hAllocator);
5263  virtual ~VmaBlockMetadata() { }
5264  virtual void Init(VkDeviceSize size) { m_Size = size; }
5265 
5266  // Validates all data structures inside this object. If not valid, returns false.
5267  virtual bool Validate() const = 0;
5268  VkDeviceSize GetSize() const { return m_Size; }
5269  virtual size_t GetAllocationCount() const = 0;
5270  virtual VkDeviceSize GetSumFreeSize() const = 0;
5271  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5272  // Returns true if this block is empty - contains only single free suballocation.
5273  virtual bool IsEmpty() const = 0;
5274 
5275  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5276  // Shouldn't modify blockCount.
5277  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5278 
5279 #if VMA_STATS_STRING_ENABLED
5280  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5281 #endif
5282 
5283  // Tries to find a place for suballocation with given parameters inside this block.
5284  // If succeeded, fills pAllocationRequest and returns true.
5285  // If failed, returns false.
5286  virtual bool CreateAllocationRequest(
5287  uint32_t currentFrameIndex,
5288  uint32_t frameInUseCount,
5289  VkDeviceSize bufferImageGranularity,
5290  VkDeviceSize allocSize,
5291  VkDeviceSize allocAlignment,
5292  bool upperAddress,
5293  VmaSuballocationType allocType,
5294  bool canMakeOtherLost,
5295  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5296  uint32_t strategy,
5297  VmaAllocationRequest* pAllocationRequest) = 0;
5298 
5299  virtual bool MakeRequestedAllocationsLost(
5300  uint32_t currentFrameIndex,
5301  uint32_t frameInUseCount,
5302  VmaAllocationRequest* pAllocationRequest) = 0;
5303 
5304  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5305 
5306  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5307 
5308  // Makes actual allocation based on request. Request must already be checked and valid.
5309  virtual void Alloc(
5310  const VmaAllocationRequest& request,
5311  VmaSuballocationType type,
5312  VkDeviceSize allocSize,
5313  VmaAllocation hAllocation) = 0;
5314 
5315  // Frees suballocation assigned to given memory region.
5316  virtual void Free(const VmaAllocation allocation) = 0;
5317  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5318 
5319  // Tries to resize (grow or shrink) space for given allocation, in place.
5320  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
5321 
5322 protected:
5323  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5324 
5325 #if VMA_STATS_STRING_ENABLED
5326  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5327  VkDeviceSize unusedBytes,
5328  size_t allocationCount,
5329  size_t unusedRangeCount) const;
5330  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5331  VkDeviceSize offset,
5332  VmaAllocation hAllocation) const;
5333  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5334  VkDeviceSize offset,
5335  VkDeviceSize size) const;
5336  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5337 #endif
5338 
5339 private:
5340  VkDeviceSize m_Size;
5341  const VkAllocationCallbacks* m_pAllocationCallbacks;
5342 };
5343 
5344 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5345  VMA_ASSERT(0 && "Validation failed: " #cond); \
5346  return false; \
5347  } } while(false)
5348 
5349 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5350 {
5351  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5352 public:
5353  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5354  virtual ~VmaBlockMetadata_Generic();
5355  virtual void Init(VkDeviceSize size);
5356 
5357  virtual bool Validate() const;
5358  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5359  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5360  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5361  virtual bool IsEmpty() const;
5362 
5363  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5364  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5365 
5366 #if VMA_STATS_STRING_ENABLED
5367  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5368 #endif
5369 
5370  virtual bool CreateAllocationRequest(
5371  uint32_t currentFrameIndex,
5372  uint32_t frameInUseCount,
5373  VkDeviceSize bufferImageGranularity,
5374  VkDeviceSize allocSize,
5375  VkDeviceSize allocAlignment,
5376  bool upperAddress,
5377  VmaSuballocationType allocType,
5378  bool canMakeOtherLost,
5379  uint32_t strategy,
5380  VmaAllocationRequest* pAllocationRequest);
5381 
5382  virtual bool MakeRequestedAllocationsLost(
5383  uint32_t currentFrameIndex,
5384  uint32_t frameInUseCount,
5385  VmaAllocationRequest* pAllocationRequest);
5386 
5387  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5388 
5389  virtual VkResult CheckCorruption(const void* pBlockData);
5390 
5391  virtual void Alloc(
5392  const VmaAllocationRequest& request,
5393  VmaSuballocationType type,
5394  VkDeviceSize allocSize,
5395  VmaAllocation hAllocation);
5396 
5397  virtual void Free(const VmaAllocation allocation);
5398  virtual void FreeAtOffset(VkDeviceSize offset);
5399 
5400  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
5401 
5403  // For defragmentation
5404 
5405  bool IsBufferImageGranularityConflictPossible(
5406  VkDeviceSize bufferImageGranularity,
5407  VmaSuballocationType& inOutPrevSuballocType) const;
5408 
5409 private:
5410  friend class VmaDefragmentationAlgorithm_Generic;
5411  friend class VmaDefragmentationAlgorithm_Fast;
5412 
5413  uint32_t m_FreeCount;
5414  VkDeviceSize m_SumFreeSize;
5415  VmaSuballocationList m_Suballocations;
5416  // Suballocations that are free and have size greater than certain threshold.
5417  // Sorted by size, ascending.
5418  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5419 
5420  bool ValidateFreeSuballocationList() const;
5421 
5422  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5423  // If yes, fills pOffset and returns true. If no, returns false.
5424  bool CheckAllocation(
5425  uint32_t currentFrameIndex,
5426  uint32_t frameInUseCount,
5427  VkDeviceSize bufferImageGranularity,
5428  VkDeviceSize allocSize,
5429  VkDeviceSize allocAlignment,
5430  VmaSuballocationType allocType,
5431  VmaSuballocationList::const_iterator suballocItem,
5432  bool canMakeOtherLost,
5433  VkDeviceSize* pOffset,
5434  size_t* itemsToMakeLostCount,
5435  VkDeviceSize* pSumFreeSize,
5436  VkDeviceSize* pSumItemSize) const;
5437  // Given free suballocation, it merges it with following one, which must also be free.
5438  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5439  // Releases given suballocation, making it free.
5440  // Merges it with adjacent free suballocations if applicable.
5441  // Returns iterator to new free suballocation at this place.
5442  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5443  // Given free suballocation, it inserts it into sorted list of
5444  // m_FreeSuballocationsBySize if it's suitable.
5445  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5446  // Given free suballocation, it removes it from sorted list of
5447  // m_FreeSuballocationsBySize if it's suitable.
5448  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5449 };
5450 
5451 /*
5452 Allocations and their references in internal data structure look like this:
5453 
5454 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5455 
5456  0 +-------+
5457  | |
5458  | |
5459  | |
5460  +-------+
5461  | Alloc | 1st[m_1stNullItemsBeginCount]
5462  +-------+
5463  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5464  +-------+
5465  | ... |
5466  +-------+
5467  | Alloc | 1st[1st.size() - 1]
5468  +-------+
5469  | |
5470  | |
5471  | |
5472 GetSize() +-------+
5473 
5474 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5475 
5476  0 +-------+
5477  | Alloc | 2nd[0]
5478  +-------+
5479  | Alloc | 2nd[1]
5480  +-------+
5481  | ... |
5482  +-------+
5483  | Alloc | 2nd[2nd.size() - 1]
5484  +-------+
5485  | |
5486  | |
5487  | |
5488  +-------+
5489  | Alloc | 1st[m_1stNullItemsBeginCount]
5490  +-------+
5491  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5492  +-------+
5493  | ... |
5494  +-------+
5495  | Alloc | 1st[1st.size() - 1]
5496  +-------+
5497  | |
5498 GetSize() +-------+
5499 
5500 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5501 
5502  0 +-------+
5503  | |
5504  | |
5505  | |
5506  +-------+
5507  | Alloc | 1st[m_1stNullItemsBeginCount]
5508  +-------+
5509  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5510  +-------+
5511  | ... |
5512  +-------+
5513  | Alloc | 1st[1st.size() - 1]
5514  +-------+
5515  | |
5516  | |
5517  | |
5518  +-------+
5519  | Alloc | 2nd[2nd.size() - 1]
5520  +-------+
5521  | ... |
5522  +-------+
5523  | Alloc | 2nd[1]
5524  +-------+
5525  | Alloc | 2nd[0]
5526 GetSize() +-------+
5527 
5528 */
5529 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5530 {
5531  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5532 public:
5533  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5534  virtual ~VmaBlockMetadata_Linear();
5535  virtual void Init(VkDeviceSize size);
5536 
5537  virtual bool Validate() const;
5538  virtual size_t GetAllocationCount() const;
5539  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5540  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5541  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5542 
5543  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5544  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5545 
5546 #if VMA_STATS_STRING_ENABLED
5547  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5548 #endif
5549 
5550  virtual bool CreateAllocationRequest(
5551  uint32_t currentFrameIndex,
5552  uint32_t frameInUseCount,
5553  VkDeviceSize bufferImageGranularity,
5554  VkDeviceSize allocSize,
5555  VkDeviceSize allocAlignment,
5556  bool upperAddress,
5557  VmaSuballocationType allocType,
5558  bool canMakeOtherLost,
5559  uint32_t strategy,
5560  VmaAllocationRequest* pAllocationRequest);
5561 
5562  virtual bool MakeRequestedAllocationsLost(
5563  uint32_t currentFrameIndex,
5564  uint32_t frameInUseCount,
5565  VmaAllocationRequest* pAllocationRequest);
5566 
5567  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5568 
5569  virtual VkResult CheckCorruption(const void* pBlockData);
5570 
5571  virtual void Alloc(
5572  const VmaAllocationRequest& request,
5573  VmaSuballocationType type,
5574  VkDeviceSize allocSize,
5575  VmaAllocation hAllocation);
5576 
5577  virtual void Free(const VmaAllocation allocation);
5578  virtual void FreeAtOffset(VkDeviceSize offset);
5579 
5580 private:
5581  /*
5582  There are two suballocation vectors, used in ping-pong way.
5583  The one with index m_1stVectorIndex is called 1st.
5584  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5585  2nd can be non-empty only when 1st is not empty.
5586  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5587  */
5588  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5589 
5590  enum SECOND_VECTOR_MODE
5591  {
5592  SECOND_VECTOR_EMPTY,
5593  /*
5594  Suballocations in 2nd vector are created later than the ones in 1st, but they
5595  all have smaller offset.
5596  */
5597  SECOND_VECTOR_RING_BUFFER,
5598  /*
5599  Suballocations in 2nd vector are upper side of double stack.
5600  They all have offsets higher than those in 1st vector.
5601  Top of this stack means smaller offsets, but higher indices in this vector.
5602  */
5603  SECOND_VECTOR_DOUBLE_STACK,
5604  };
5605 
5606  VkDeviceSize m_SumFreeSize;
5607  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5608  uint32_t m_1stVectorIndex;
5609  SECOND_VECTOR_MODE m_2ndVectorMode;
5610 
5611  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5612  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5613  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5614  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5615 
5616  // Number of items in 1st vector with hAllocation = null at the beginning.
5617  size_t m_1stNullItemsBeginCount;
5618  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5619  size_t m_1stNullItemsMiddleCount;
5620  // Number of items in 2nd vector with hAllocation = null.
5621  size_t m_2ndNullItemsCount;
5622 
5623  bool ShouldCompact1st() const;
5624  void CleanupAfterFree();
5625 
5626  bool CreateAllocationRequest_LowerAddress(
5627  uint32_t currentFrameIndex,
5628  uint32_t frameInUseCount,
5629  VkDeviceSize bufferImageGranularity,
5630  VkDeviceSize allocSize,
5631  VkDeviceSize allocAlignment,
5632  VmaSuballocationType allocType,
5633  bool canMakeOtherLost,
5634  uint32_t strategy,
5635  VmaAllocationRequest* pAllocationRequest);
5636  bool CreateAllocationRequest_UpperAddress(
5637  uint32_t currentFrameIndex,
5638  uint32_t frameInUseCount,
5639  VkDeviceSize bufferImageGranularity,
5640  VkDeviceSize allocSize,
5641  VkDeviceSize allocAlignment,
5642  VmaSuballocationType allocType,
5643  bool canMakeOtherLost,
5644  uint32_t strategy,
5645  VmaAllocationRequest* pAllocationRequest);
5646 };
5647 
5648 /*
5649 - GetSize() is the original size of allocated memory block.
5650 - m_UsableSize is this size aligned down to a power of two.
5651  All allocations and calculations happen relative to m_UsableSize.
5652 - GetUnusableSize() is the difference between them.
5653  It is repoted as separate, unused range, not available for allocations.
5654 
5655 Node at level 0 has size = m_UsableSize.
5656 Each next level contains nodes with size 2 times smaller than current level.
5657 m_LevelCount is the maximum number of levels to use in the current object.
5658 */
5659 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5660 {
5661  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5662 public:
5663  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5664  virtual ~VmaBlockMetadata_Buddy();
5665  virtual void Init(VkDeviceSize size);
5666 
5667  virtual bool Validate() const;
5668  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5669  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5670  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5671  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5672 
5673  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5674  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5675 
5676 #if VMA_STATS_STRING_ENABLED
5677  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5678 #endif
5679 
5680  virtual bool CreateAllocationRequest(
5681  uint32_t currentFrameIndex,
5682  uint32_t frameInUseCount,
5683  VkDeviceSize bufferImageGranularity,
5684  VkDeviceSize allocSize,
5685  VkDeviceSize allocAlignment,
5686  bool upperAddress,
5687  VmaSuballocationType allocType,
5688  bool canMakeOtherLost,
5689  uint32_t strategy,
5690  VmaAllocationRequest* pAllocationRequest);
5691 
5692  virtual bool MakeRequestedAllocationsLost(
5693  uint32_t currentFrameIndex,
5694  uint32_t frameInUseCount,
5695  VmaAllocationRequest* pAllocationRequest);
5696 
5697  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5698 
5699  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5700 
5701  virtual void Alloc(
5702  const VmaAllocationRequest& request,
5703  VmaSuballocationType type,
5704  VkDeviceSize allocSize,
5705  VmaAllocation hAllocation);
5706 
5707  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5708  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5709 
5710 private:
5711  static const VkDeviceSize MIN_NODE_SIZE = 32;
5712  static const size_t MAX_LEVELS = 30;
5713 
5714  struct ValidationContext
5715  {
5716  size_t calculatedAllocationCount;
5717  size_t calculatedFreeCount;
5718  VkDeviceSize calculatedSumFreeSize;
5719 
5720  ValidationContext() :
5721  calculatedAllocationCount(0),
5722  calculatedFreeCount(0),
5723  calculatedSumFreeSize(0) { }
5724  };
5725 
5726  struct Node
5727  {
5728  VkDeviceSize offset;
5729  enum TYPE
5730  {
5731  TYPE_FREE,
5732  TYPE_ALLOCATION,
5733  TYPE_SPLIT,
5734  TYPE_COUNT
5735  } type;
5736  Node* parent;
5737  Node* buddy;
5738 
5739  union
5740  {
5741  struct
5742  {
5743  Node* prev;
5744  Node* next;
5745  } free;
5746  struct
5747  {
5748  VmaAllocation alloc;
5749  } allocation;
5750  struct
5751  {
5752  Node* leftChild;
5753  } split;
5754  };
5755  };
5756 
5757  // Size of the memory block aligned down to a power of two.
5758  VkDeviceSize m_UsableSize;
5759  uint32_t m_LevelCount;
5760 
5761  Node* m_Root;
5762  struct {
5763  Node* front;
5764  Node* back;
5765  } m_FreeList[MAX_LEVELS];
5766  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5767  size_t m_AllocationCount;
5768  // Number of nodes in the tree with type == TYPE_FREE.
5769  size_t m_FreeCount;
5770  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5771  VkDeviceSize m_SumFreeSize;
5772 
5773  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5774  void DeleteNode(Node* node);
5775  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5776  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5777  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5778  // Alloc passed just for validation. Can be null.
5779  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5780  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5781  // Adds node to the front of FreeList at given level.
5782  // node->type must be FREE.
5783  // node->free.prev, next can be undefined.
5784  void AddToFreeListFront(uint32_t level, Node* node);
5785  // Removes node from FreeList at given level.
5786  // node->type must be FREE.
5787  // node->free.prev, next stay untouched.
5788  void RemoveFromFreeList(uint32_t level, Node* node);
5789 
5790 #if VMA_STATS_STRING_ENABLED
5791  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5792 #endif
5793 };
5794 
5795 /*
5796 Represents a single block of device memory (`VkDeviceMemory`) with all the
5797 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5798 
5799 Thread-safety: This class must be externally synchronized.
5800 */
5801 class VmaDeviceMemoryBlock
5802 {
5803  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5804 public:
5805  VmaBlockMetadata* m_pMetadata;
5806 
5807  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5808 
5809  ~VmaDeviceMemoryBlock()
5810  {
5811  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5812  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5813  }
5814 
5815  // Always call after construction.
5816  void Init(
5817  VmaAllocator hAllocator,
5818  VmaPool hParentPool,
5819  uint32_t newMemoryTypeIndex,
5820  VkDeviceMemory newMemory,
5821  VkDeviceSize newSize,
5822  uint32_t id,
5823  uint32_t algorithm);
5824  // Always call before destruction.
5825  void Destroy(VmaAllocator allocator);
5826 
5827  VmaPool GetParentPool() const { return m_hParentPool; }
5828  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5829  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5830  uint32_t GetId() const { return m_Id; }
5831  void* GetMappedData() const { return m_pMappedData; }
5832 
5833  // Validates all data structures inside this object. If not valid, returns false.
5834  bool Validate() const;
5835 
5836  VkResult CheckCorruption(VmaAllocator hAllocator);
5837 
5838  // ppData can be null.
5839  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5840  void Unmap(VmaAllocator hAllocator, uint32_t count);
5841 
5842  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5843  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5844 
5845  VkResult BindBufferMemory(
5846  const VmaAllocator hAllocator,
5847  const VmaAllocation hAllocation,
5848  VkBuffer hBuffer);
5849  VkResult BindImageMemory(
5850  const VmaAllocator hAllocator,
5851  const VmaAllocation hAllocation,
5852  VkImage hImage);
5853 
5854 private:
5855  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
5856  uint32_t m_MemoryTypeIndex;
5857  uint32_t m_Id;
5858  VkDeviceMemory m_hMemory;
5859 
5860  /*
5861  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5862  Also protects m_MapCount, m_pMappedData.
5863  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5864  */
5865  VMA_MUTEX m_Mutex;
5866  uint32_t m_MapCount;
5867  void* m_pMappedData;
5868 };
5869 
5870 struct VmaPointerLess
5871 {
5872  bool operator()(const void* lhs, const void* rhs) const
5873  {
5874  return lhs < rhs;
5875  }
5876 };
5877 
5878 struct VmaDefragmentationMove
5879 {
5880  size_t srcBlockIndex;
5881  size_t dstBlockIndex;
5882  VkDeviceSize srcOffset;
5883  VkDeviceSize dstOffset;
5884  VkDeviceSize size;
5885 };
5886 
5887 class VmaDefragmentationAlgorithm;
5888 
5889 /*
5890 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5891 Vulkan memory type.
5892 
5893 Synchronized internally with a mutex.
5894 */
5895 struct VmaBlockVector
5896 {
5897  VMA_CLASS_NO_COPY(VmaBlockVector)
5898 public:
5899  VmaBlockVector(
5900  VmaAllocator hAllocator,
5901  VmaPool hParentPool,
5902  uint32_t memoryTypeIndex,
5903  VkDeviceSize preferredBlockSize,
5904  size_t minBlockCount,
5905  size_t maxBlockCount,
5906  VkDeviceSize bufferImageGranularity,
5907  uint32_t frameInUseCount,
5908  bool isCustomPool,
5909  bool explicitBlockSize,
5910  uint32_t algorithm);
5911  ~VmaBlockVector();
5912 
5913  VkResult CreateMinBlocks();
5914 
5915  VmaPool GetParentPool() const { return m_hParentPool; }
5916  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5917  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5918  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5919  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5920  uint32_t GetAlgorithm() const { return m_Algorithm; }
5921 
5922  void GetPoolStats(VmaPoolStats* pStats);
5923 
5924  bool IsEmpty() const { return m_Blocks.empty(); }
5925  bool IsCorruptionDetectionEnabled() const;
5926 
5927  VkResult Allocate(
5928  uint32_t currentFrameIndex,
5929  VkDeviceSize size,
5930  VkDeviceSize alignment,
5931  const VmaAllocationCreateInfo& createInfo,
5932  VmaSuballocationType suballocType,
5933  size_t allocationCount,
5934  VmaAllocation* pAllocations);
5935 
5936  void Free(
5937  VmaAllocation hAllocation);
5938 
5939  // Adds statistics of this BlockVector to pStats.
5940  void AddStats(VmaStats* pStats);
5941 
5942 #if VMA_STATS_STRING_ENABLED
5943  void PrintDetailedMap(class VmaJsonWriter& json);
5944 #endif
5945 
5946  void MakePoolAllocationsLost(
5947  uint32_t currentFrameIndex,
5948  size_t* pLostAllocationCount);
5949  VkResult CheckCorruption();
5950 
5951  // Saves results in pCtx->res.
5952  void Defragment(
5953  class VmaBlockVectorDefragmentationContext* pCtx,
5954  VmaDefragmentationStats* pStats,
5955  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
5956  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
5957  VkCommandBuffer commandBuffer);
5958  void DefragmentationEnd(
5959  class VmaBlockVectorDefragmentationContext* pCtx,
5960  VmaDefragmentationStats* pStats);
5961 
5963  // To be used only while the m_Mutex is locked. Used during defragmentation.
5964 
5965  size_t GetBlockCount() const { return m_Blocks.size(); }
5966  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
5967  size_t CalcAllocationCount() const;
5968  bool IsBufferImageGranularityConflictPossible() const;
5969 
5970 private:
5971  friend class VmaDefragmentationAlgorithm_Generic;
5972 
5973  const VmaAllocator m_hAllocator;
5974  const VmaPool m_hParentPool;
5975  const uint32_t m_MemoryTypeIndex;
5976  const VkDeviceSize m_PreferredBlockSize;
5977  const size_t m_MinBlockCount;
5978  const size_t m_MaxBlockCount;
5979  const VkDeviceSize m_BufferImageGranularity;
5980  const uint32_t m_FrameInUseCount;
5981  const bool m_IsCustomPool;
5982  const bool m_ExplicitBlockSize;
5983  const uint32_t m_Algorithm;
5984  /* There can be at most one allocation that is completely empty - a
5985  hysteresis to avoid pessimistic case of alternating creation and destruction
5986  of a VkDeviceMemory. */
5987  bool m_HasEmptyBlock;
5988  VMA_RW_MUTEX m_Mutex;
5989  // Incrementally sorted by sumFreeSize, ascending.
5990  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5991  uint32_t m_NextBlockId;
5992 
5993  VkDeviceSize CalcMaxBlockSize() const;
5994 
5995  // Finds and removes given block from vector.
5996  void Remove(VmaDeviceMemoryBlock* pBlock);
5997 
5998  // Performs single step in sorting m_Blocks. They may not be fully sorted
5999  // after this call.
6000  void IncrementallySortBlocks();
6001 
6002  VkResult AllocatePage(
6003  uint32_t currentFrameIndex,
6004  VkDeviceSize size,
6005  VkDeviceSize alignment,
6006  const VmaAllocationCreateInfo& createInfo,
6007  VmaSuballocationType suballocType,
6008  VmaAllocation* pAllocation);
6009 
6010  // To be used only without CAN_MAKE_OTHER_LOST flag.
6011  VkResult AllocateFromBlock(
6012  VmaDeviceMemoryBlock* pBlock,
6013  uint32_t currentFrameIndex,
6014  VkDeviceSize size,
6015  VkDeviceSize alignment,
6016  VmaAllocationCreateFlags allocFlags,
6017  void* pUserData,
6018  VmaSuballocationType suballocType,
6019  uint32_t strategy,
6020  VmaAllocation* pAllocation);
6021 
6022  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
6023 
6024  // Saves result to pCtx->res.
6025  void ApplyDefragmentationMovesCpu(
6026  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6027  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
6028  // Saves result to pCtx->res.
6029  void ApplyDefragmentationMovesGpu(
6030  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6031  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6032  VkCommandBuffer commandBuffer);
6033 
6034  /*
6035  Used during defragmentation. pDefragmentationStats is optional. It's in/out
6036  - updated with new data.
6037  */
6038  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
6039 };
6040 
6041 struct VmaPool_T
6042 {
6043  VMA_CLASS_NO_COPY(VmaPool_T)
6044 public:
6045  VmaBlockVector m_BlockVector;
6046 
6047  VmaPool_T(
6048  VmaAllocator hAllocator,
6049  const VmaPoolCreateInfo& createInfo,
6050  VkDeviceSize preferredBlockSize);
6051  ~VmaPool_T();
6052 
6053  uint32_t GetId() const { return m_Id; }
6054  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
6055 
6056 #if VMA_STATS_STRING_ENABLED
6057  //void PrintDetailedMap(class VmaStringBuilder& sb);
6058 #endif
6059 
6060 private:
6061  uint32_t m_Id;
6062 };
6063 
6064 /*
6065 Performs defragmentation:
6066 
6067 - Updates `pBlockVector->m_pMetadata`.
6068 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
6069 - Does not move actual data, only returns requested moves as `moves`.
6070 */
6071 class VmaDefragmentationAlgorithm
6072 {
6073  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
6074 public:
6075  VmaDefragmentationAlgorithm(
6076  VmaAllocator hAllocator,
6077  VmaBlockVector* pBlockVector,
6078  uint32_t currentFrameIndex) :
6079  m_hAllocator(hAllocator),
6080  m_pBlockVector(pBlockVector),
6081  m_CurrentFrameIndex(currentFrameIndex)
6082  {
6083  }
6084  virtual ~VmaDefragmentationAlgorithm()
6085  {
6086  }
6087 
6088  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
6089  virtual void AddAll() = 0;
6090 
6091  virtual VkResult Defragment(
6092  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6093  VkDeviceSize maxBytesToMove,
6094  uint32_t maxAllocationsToMove) = 0;
6095 
6096  virtual VkDeviceSize GetBytesMoved() const = 0;
6097  virtual uint32_t GetAllocationsMoved() const = 0;
6098 
6099 protected:
6100  VmaAllocator const m_hAllocator;
6101  VmaBlockVector* const m_pBlockVector;
6102  const uint32_t m_CurrentFrameIndex;
6103 
6104  struct AllocationInfo
6105  {
6106  VmaAllocation m_hAllocation;
6107  VkBool32* m_pChanged;
6108 
6109  AllocationInfo() :
6110  m_hAllocation(VK_NULL_HANDLE),
6111  m_pChanged(VMA_NULL)
6112  {
6113  }
6114  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6115  m_hAllocation(hAlloc),
6116  m_pChanged(pChanged)
6117  {
6118  }
6119  };
6120 };
6121 
6122 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6123 {
6124  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6125 public:
6126  VmaDefragmentationAlgorithm_Generic(
6127  VmaAllocator hAllocator,
6128  VmaBlockVector* pBlockVector,
6129  uint32_t currentFrameIndex,
6130  bool overlappingMoveSupported);
6131  virtual ~VmaDefragmentationAlgorithm_Generic();
6132 
6133  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6134  virtual void AddAll() { m_AllAllocations = true; }
6135 
6136  virtual VkResult Defragment(
6137  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6138  VkDeviceSize maxBytesToMove,
6139  uint32_t maxAllocationsToMove);
6140 
6141  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6142  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6143 
6144 private:
6145  uint32_t m_AllocationCount;
6146  bool m_AllAllocations;
6147 
6148  VkDeviceSize m_BytesMoved;
6149  uint32_t m_AllocationsMoved;
6150 
6151  struct AllocationInfoSizeGreater
6152  {
6153  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6154  {
6155  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6156  }
6157  };
6158 
6159  struct AllocationInfoOffsetGreater
6160  {
6161  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6162  {
6163  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6164  }
6165  };
6166 
6167  struct BlockInfo
6168  {
6169  size_t m_OriginalBlockIndex;
6170  VmaDeviceMemoryBlock* m_pBlock;
6171  bool m_HasNonMovableAllocations;
6172  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6173 
6174  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6175  m_OriginalBlockIndex(SIZE_MAX),
6176  m_pBlock(VMA_NULL),
6177  m_HasNonMovableAllocations(true),
6178  m_Allocations(pAllocationCallbacks)
6179  {
6180  }
6181 
6182  void CalcHasNonMovableAllocations()
6183  {
6184  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6185  const size_t defragmentAllocCount = m_Allocations.size();
6186  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6187  }
6188 
6189  void SortAllocationsBySizeDescending()
6190  {
6191  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6192  }
6193 
6194  void SortAllocationsByOffsetDescending()
6195  {
6196  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6197  }
6198  };
6199 
6200  struct BlockPointerLess
6201  {
6202  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6203  {
6204  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6205  }
6206  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6207  {
6208  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6209  }
6210  };
6211 
6212  // 1. Blocks with some non-movable allocations go first.
6213  // 2. Blocks with smaller sumFreeSize go first.
6214  struct BlockInfoCompareMoveDestination
6215  {
6216  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6217  {
6218  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6219  {
6220  return true;
6221  }
6222  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6223  {
6224  return false;
6225  }
6226  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6227  {
6228  return true;
6229  }
6230  return false;
6231  }
6232  };
6233 
6234  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6235  BlockInfoVector m_Blocks;
6236 
6237  VkResult DefragmentRound(
6238  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6239  VkDeviceSize maxBytesToMove,
6240  uint32_t maxAllocationsToMove);
6241 
6242  size_t CalcBlocksWithNonMovableCount() const;
6243 
6244  static bool MoveMakesSense(
6245  size_t dstBlockIndex, VkDeviceSize dstOffset,
6246  size_t srcBlockIndex, VkDeviceSize srcOffset);
6247 };
6248 
6249 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6250 {
6251  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6252 public:
6253  VmaDefragmentationAlgorithm_Fast(
6254  VmaAllocator hAllocator,
6255  VmaBlockVector* pBlockVector,
6256  uint32_t currentFrameIndex,
6257  bool overlappingMoveSupported);
6258  virtual ~VmaDefragmentationAlgorithm_Fast();
6259 
6260  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6261  virtual void AddAll() { m_AllAllocations = true; }
6262 
6263  virtual VkResult Defragment(
6264  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6265  VkDeviceSize maxBytesToMove,
6266  uint32_t maxAllocationsToMove);
6267 
6268  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6269  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6270 
6271 private:
6272  struct BlockInfo
6273  {
6274  size_t origBlockIndex;
6275  };
6276 
6277  class FreeSpaceDatabase
6278  {
6279  public:
6280  FreeSpaceDatabase()
6281  {
6282  FreeSpace s = {};
6283  s.blockInfoIndex = SIZE_MAX;
6284  for(size_t i = 0; i < MAX_COUNT; ++i)
6285  {
6286  m_FreeSpaces[i] = s;
6287  }
6288  }
6289 
6290  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6291  {
6292  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6293  {
6294  return;
6295  }
6296 
6297  // Find first invalid or the smallest structure.
6298  size_t bestIndex = SIZE_MAX;
6299  for(size_t i = 0; i < MAX_COUNT; ++i)
6300  {
6301  // Empty structure.
6302  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6303  {
6304  bestIndex = i;
6305  break;
6306  }
6307  if(m_FreeSpaces[i].size < size &&
6308  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6309  {
6310  bestIndex = i;
6311  }
6312  }
6313 
6314  if(bestIndex != SIZE_MAX)
6315  {
6316  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6317  m_FreeSpaces[bestIndex].offset = offset;
6318  m_FreeSpaces[bestIndex].size = size;
6319  }
6320  }
6321 
6322  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6323  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6324  {
6325  size_t bestIndex = SIZE_MAX;
6326  VkDeviceSize bestFreeSpaceAfter = 0;
6327  for(size_t i = 0; i < MAX_COUNT; ++i)
6328  {
6329  // Structure is valid.
6330  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6331  {
6332  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6333  // Allocation fits into this structure.
6334  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6335  {
6336  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6337  (dstOffset + size);
6338  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6339  {
6340  bestIndex = i;
6341  bestFreeSpaceAfter = freeSpaceAfter;
6342  }
6343  }
6344  }
6345  }
6346 
6347  if(bestIndex != SIZE_MAX)
6348  {
6349  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6350  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6351 
6352  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6353  {
6354  // Leave this structure for remaining empty space.
6355  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6356  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6357  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6358  }
6359  else
6360  {
6361  // This structure becomes invalid.
6362  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6363  }
6364 
6365  return true;
6366  }
6367 
6368  return false;
6369  }
6370 
6371  private:
6372  static const size_t MAX_COUNT = 4;
6373 
6374  struct FreeSpace
6375  {
6376  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6377  VkDeviceSize offset;
6378  VkDeviceSize size;
6379  } m_FreeSpaces[MAX_COUNT];
6380  };
6381 
6382  const bool m_OverlappingMoveSupported;
6383 
6384  uint32_t m_AllocationCount;
6385  bool m_AllAllocations;
6386 
6387  VkDeviceSize m_BytesMoved;
6388  uint32_t m_AllocationsMoved;
6389 
6390  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6391 
6392  void PreprocessMetadata();
6393  void PostprocessMetadata();
6394  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6395 };
6396 
6397 struct VmaBlockDefragmentationContext
6398 {
6399  enum BLOCK_FLAG
6400  {
6401  BLOCK_FLAG_USED = 0x00000001,
6402  };
6403  uint32_t flags;
6404  VkBuffer hBuffer;
6405 };
6406 
6407 class VmaBlockVectorDefragmentationContext
6408 {
6409  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6410 public:
6411  VkResult res;
6412  bool mutexLocked;
6413  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6414 
6415  VmaBlockVectorDefragmentationContext(
6416  VmaAllocator hAllocator,
6417  VmaPool hCustomPool, // Optional.
6418  VmaBlockVector* pBlockVector,
6419  uint32_t currFrameIndex);
6420  ~VmaBlockVectorDefragmentationContext();
6421 
6422  VmaPool GetCustomPool() const { return m_hCustomPool; }
6423  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6424  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6425 
6426  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6427  void AddAll() { m_AllAllocations = true; }
6428 
6429  void Begin(bool overlappingMoveSupported);
6430 
6431 private:
6432  const VmaAllocator m_hAllocator;
6433  // Null if not from custom pool.
6434  const VmaPool m_hCustomPool;
6435  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6436  VmaBlockVector* const m_pBlockVector;
6437  const uint32_t m_CurrFrameIndex;
6438  // Owner of this object.
6439  VmaDefragmentationAlgorithm* m_pAlgorithm;
6440 
6441  struct AllocInfo
6442  {
6443  VmaAllocation hAlloc;
6444  VkBool32* pChanged;
6445  };
6446  // Used between constructor and Begin.
6447  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6448  bool m_AllAllocations;
6449 };
6450 
6451 struct VmaDefragmentationContext_T
6452 {
6453 private:
6454  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6455 public:
6456  VmaDefragmentationContext_T(
6457  VmaAllocator hAllocator,
6458  uint32_t currFrameIndex,
6459  uint32_t flags,
6460  VmaDefragmentationStats* pStats);
6461  ~VmaDefragmentationContext_T();
6462 
6463  void AddPools(uint32_t poolCount, VmaPool* pPools);
6464  void AddAllocations(
6465  uint32_t allocationCount,
6466  VmaAllocation* pAllocations,
6467  VkBool32* pAllocationsChanged);
6468 
6469  /*
6470  Returns:
6471  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6472  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6473  - Negative value if error occured and object can be destroyed immediately.
6474  */
6475  VkResult Defragment(
6476  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6477  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6478  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6479 
6480 private:
6481  const VmaAllocator m_hAllocator;
6482  const uint32_t m_CurrFrameIndex;
6483  const uint32_t m_Flags;
6484  VmaDefragmentationStats* const m_pStats;
6485  // Owner of these objects.
6486  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6487  // Owner of these objects.
6488  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6489 };
6490 
6491 #if VMA_RECORDING_ENABLED
6492 
6493 class VmaRecorder
6494 {
6495 public:
6496  VmaRecorder();
6497  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6498  void WriteConfiguration(
6499  const VkPhysicalDeviceProperties& devProps,
6500  const VkPhysicalDeviceMemoryProperties& memProps,
6501  bool dedicatedAllocationExtensionEnabled);
6502  ~VmaRecorder();
6503 
6504  void RecordCreateAllocator(uint32_t frameIndex);
6505  void RecordDestroyAllocator(uint32_t frameIndex);
6506  void RecordCreatePool(uint32_t frameIndex,
6507  const VmaPoolCreateInfo& createInfo,
6508  VmaPool pool);
6509  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6510  void RecordAllocateMemory(uint32_t frameIndex,
6511  const VkMemoryRequirements& vkMemReq,
6512  const VmaAllocationCreateInfo& createInfo,
6513  VmaAllocation allocation);
6514  void RecordAllocateMemoryPages(uint32_t frameIndex,
6515  const VkMemoryRequirements& vkMemReq,
6516  const VmaAllocationCreateInfo& createInfo,
6517  uint64_t allocationCount,
6518  const VmaAllocation* pAllocations);
6519  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6520  const VkMemoryRequirements& vkMemReq,
6521  bool requiresDedicatedAllocation,
6522  bool prefersDedicatedAllocation,
6523  const VmaAllocationCreateInfo& createInfo,
6524  VmaAllocation allocation);
6525  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6526  const VkMemoryRequirements& vkMemReq,
6527  bool requiresDedicatedAllocation,
6528  bool prefersDedicatedAllocation,
6529  const VmaAllocationCreateInfo& createInfo,
6530  VmaAllocation allocation);
6531  void RecordFreeMemory(uint32_t frameIndex,
6532  VmaAllocation allocation);
6533  void RecordFreeMemoryPages(uint32_t frameIndex,
6534  uint64_t allocationCount,
6535  const VmaAllocation* pAllocations);
6536  void RecordResizeAllocation(
6537  uint32_t frameIndex,
6538  VmaAllocation allocation,
6539  VkDeviceSize newSize);
6540  void RecordSetAllocationUserData(uint32_t frameIndex,
6541  VmaAllocation allocation,
6542  const void* pUserData);
6543  void RecordCreateLostAllocation(uint32_t frameIndex,
6544  VmaAllocation allocation);
6545  void RecordMapMemory(uint32_t frameIndex,
6546  VmaAllocation allocation);
6547  void RecordUnmapMemory(uint32_t frameIndex,
6548  VmaAllocation allocation);
6549  void RecordFlushAllocation(uint32_t frameIndex,
6550  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6551  void RecordInvalidateAllocation(uint32_t frameIndex,
6552  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6553  void RecordCreateBuffer(uint32_t frameIndex,
6554  const VkBufferCreateInfo& bufCreateInfo,
6555  const VmaAllocationCreateInfo& allocCreateInfo,
6556  VmaAllocation allocation);
6557  void RecordCreateImage(uint32_t frameIndex,
6558  const VkImageCreateInfo& imageCreateInfo,
6559  const VmaAllocationCreateInfo& allocCreateInfo,
6560  VmaAllocation allocation);
6561  void RecordDestroyBuffer(uint32_t frameIndex,
6562  VmaAllocation allocation);
6563  void RecordDestroyImage(uint32_t frameIndex,
6564  VmaAllocation allocation);
6565  void RecordTouchAllocation(uint32_t frameIndex,
6566  VmaAllocation allocation);
6567  void RecordGetAllocationInfo(uint32_t frameIndex,
6568  VmaAllocation allocation);
6569  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6570  VmaPool pool);
6571  void RecordDefragmentationBegin(uint32_t frameIndex,
6572  const VmaDefragmentationInfo2& info,
6574  void RecordDefragmentationEnd(uint32_t frameIndex,
6576 
6577 private:
6578  struct CallParams
6579  {
6580  uint32_t threadId;
6581  double time;
6582  };
6583 
6584  class UserDataString
6585  {
6586  public:
6587  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6588  const char* GetString() const { return m_Str; }
6589 
6590  private:
6591  char m_PtrStr[17];
6592  const char* m_Str;
6593  };
6594 
6595  bool m_UseMutex;
6596  VmaRecordFlags m_Flags;
6597  FILE* m_File;
6598  VMA_MUTEX m_FileMutex;
6599  int64_t m_Freq;
6600  int64_t m_StartCounter;
6601 
6602  void GetBasicParams(CallParams& outParams);
6603 
6604  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6605  template<typename T>
6606  void PrintPointerList(uint64_t count, const T* pItems)
6607  {
6608  if(count)
6609  {
6610  fprintf(m_File, "%p", pItems[0]);
6611  for(uint64_t i = 1; i < count; ++i)
6612  {
6613  fprintf(m_File, " %p", pItems[i]);
6614  }
6615  }
6616  }
6617 
6618  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6619  void Flush();
6620 };
6621 
6622 #endif // #if VMA_RECORDING_ENABLED
6623 
6624 /*
6625 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
6626 */
6627 class VmaAllocationObjectAllocator
6628 {
6629  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
6630 public:
6631  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
6632 
6633  VmaAllocation Allocate();
6634  void Free(VmaAllocation hAlloc);
6635 
6636 private:
6637  VMA_MUTEX m_Mutex;
6638  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
6639 };
6640 
6641 // Main allocator object.
6642 struct VmaAllocator_T
6643 {
6644  VMA_CLASS_NO_COPY(VmaAllocator_T)
6645 public:
6646  bool m_UseMutex;
6647  bool m_UseKhrDedicatedAllocation;
6648  VkDevice m_hDevice;
6649  bool m_AllocationCallbacksSpecified;
6650  VkAllocationCallbacks m_AllocationCallbacks;
6651  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6652  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
6653 
6654  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
6655  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6656  VMA_MUTEX m_HeapSizeLimitMutex;
6657 
6658  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6659  VkPhysicalDeviceMemoryProperties m_MemProps;
6660 
6661  // Default pools.
6662  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6663 
6664  // Each vector is sorted by memory (handle value).
6665  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6666  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6667  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6668 
6669  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6670  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6671  ~VmaAllocator_T();
6672 
6673  const VkAllocationCallbacks* GetAllocationCallbacks() const
6674  {
6675  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6676  }
6677  const VmaVulkanFunctions& GetVulkanFunctions() const
6678  {
6679  return m_VulkanFunctions;
6680  }
6681 
6682  VkDeviceSize GetBufferImageGranularity() const
6683  {
6684  return VMA_MAX(
6685  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6686  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6687  }
6688 
6689  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
6690  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6691 
6692  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6693  {
6694  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6695  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6696  }
6697  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
6698  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6699  {
6700  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6701  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6702  }
6703  // Minimum alignment for all allocations in specific memory type.
6704  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6705  {
6706  return IsMemoryTypeNonCoherent(memTypeIndex) ?
6707  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6708  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6709  }
6710 
6711  bool IsIntegratedGpu() const
6712  {
6713  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6714  }
6715 
6716 #if VMA_RECORDING_ENABLED
6717  VmaRecorder* GetRecorder() const { return m_pRecorder; }
6718 #endif
6719 
6720  void GetBufferMemoryRequirements(
6721  VkBuffer hBuffer,
6722  VkMemoryRequirements& memReq,
6723  bool& requiresDedicatedAllocation,
6724  bool& prefersDedicatedAllocation) const;
6725  void GetImageMemoryRequirements(
6726  VkImage hImage,
6727  VkMemoryRequirements& memReq,
6728  bool& requiresDedicatedAllocation,
6729  bool& prefersDedicatedAllocation) const;
6730 
6731  // Main allocation function.
6732  VkResult AllocateMemory(
6733  const VkMemoryRequirements& vkMemReq,
6734  bool requiresDedicatedAllocation,
6735  bool prefersDedicatedAllocation,
6736  VkBuffer dedicatedBuffer,
6737  VkImage dedicatedImage,
6738  const VmaAllocationCreateInfo& createInfo,
6739  VmaSuballocationType suballocType,
6740  size_t allocationCount,
6741  VmaAllocation* pAllocations);
6742 
6743  // Main deallocation function.
6744  void FreeMemory(
6745  size_t allocationCount,
6746  const VmaAllocation* pAllocations);
6747 
6748  VkResult ResizeAllocation(
6749  const VmaAllocation alloc,
6750  VkDeviceSize newSize);
6751 
6752  void CalculateStats(VmaStats* pStats);
6753 
6754 #if VMA_STATS_STRING_ENABLED
6755  void PrintDetailedMap(class VmaJsonWriter& json);
6756 #endif
6757 
6758  VkResult DefragmentationBegin(
6759  const VmaDefragmentationInfo2& info,
6760  VmaDefragmentationStats* pStats,
6761  VmaDefragmentationContext* pContext);
6762  VkResult DefragmentationEnd(
6763  VmaDefragmentationContext context);
6764 
6765  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
6766  bool TouchAllocation(VmaAllocation hAllocation);
6767 
6768  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
6769  void DestroyPool(VmaPool pool);
6770  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
6771 
6772  void SetCurrentFrameIndex(uint32_t frameIndex);
6773  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6774 
6775  void MakePoolAllocationsLost(
6776  VmaPool hPool,
6777  size_t* pLostAllocationCount);
6778  VkResult CheckPoolCorruption(VmaPool hPool);
6779  VkResult CheckCorruption(uint32_t memoryTypeBits);
6780 
6781  void CreateLostAllocation(VmaAllocation* pAllocation);
6782 
6783  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6784  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6785 
6786  VkResult Map(VmaAllocation hAllocation, void** ppData);
6787  void Unmap(VmaAllocation hAllocation);
6788 
6789  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
6790  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
6791 
6792  void FlushOrInvalidateAllocation(
6793  VmaAllocation hAllocation,
6794  VkDeviceSize offset, VkDeviceSize size,
6795  VMA_CACHE_OPERATION op);
6796 
6797  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
6798 
6799  /*
6800  Returns bit mask of memory types that can support defragmentation on GPU as
6801  they support creation of required buffer for copy operations.
6802  */
6803  uint32_t GetGpuDefragmentationMemoryTypeBits();
6804 
6805 private:
6806  VkDeviceSize m_PreferredLargeHeapBlockSize;
6807 
6808  VkPhysicalDevice m_PhysicalDevice;
6809  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6810  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
6811 
6812  VMA_RW_MUTEX m_PoolsMutex;
6813  // Protected by m_PoolsMutex. Sorted by pointer value.
6814  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6815  uint32_t m_NextPoolId;
6816 
6817  VmaVulkanFunctions m_VulkanFunctions;
6818 
6819 #if VMA_RECORDING_ENABLED
6820  VmaRecorder* m_pRecorder;
6821 #endif
6822 
6823  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
6824 
6825  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6826 
6827  VkResult AllocateMemoryOfType(
6828  VkDeviceSize size,
6829  VkDeviceSize alignment,
6830  bool dedicatedAllocation,
6831  VkBuffer dedicatedBuffer,
6832  VkImage dedicatedImage,
6833  const VmaAllocationCreateInfo& createInfo,
6834  uint32_t memTypeIndex,
6835  VmaSuballocationType suballocType,
6836  size_t allocationCount,
6837  VmaAllocation* pAllocations);
6838 
6839  // Helper function only to be used inside AllocateDedicatedMemory.
6840  VkResult AllocateDedicatedMemoryPage(
6841  VkDeviceSize size,
6842  VmaSuballocationType suballocType,
6843  uint32_t memTypeIndex,
6844  const VkMemoryAllocateInfo& allocInfo,
6845  bool map,
6846  bool isUserDataString,
6847  void* pUserData,
6848  VmaAllocation* pAllocation);
6849 
6850  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
6851  VkResult AllocateDedicatedMemory(
6852  VkDeviceSize size,
6853  VmaSuballocationType suballocType,
6854  uint32_t memTypeIndex,
6855  bool map,
6856  bool isUserDataString,
6857  void* pUserData,
6858  VkBuffer dedicatedBuffer,
6859  VkImage dedicatedImage,
6860  size_t allocationCount,
6861  VmaAllocation* pAllocations);
6862 
6863  void FreeDedicatedMemory(VmaAllocation allocation);
6864 
6865  /*
6866  Calculates and returns bit mask of memory types that can support defragmentation
6867  on GPU as they support creation of required buffer for copy operations.
6868  */
6869  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
6870 };
6871 
6873 // Memory allocation #2 after VmaAllocator_T definition
6874 
6875 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
6876 {
6877  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6878 }
6879 
6880 static void VmaFree(VmaAllocator hAllocator, void* ptr)
6881 {
6882  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6883 }
6884 
6885 template<typename T>
6886 static T* VmaAllocate(VmaAllocator hAllocator)
6887 {
6888  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
6889 }
6890 
6891 template<typename T>
6892 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
6893 {
6894  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
6895 }
6896 
6897 template<typename T>
6898 static void vma_delete(VmaAllocator hAllocator, T* ptr)
6899 {
6900  if(ptr != VMA_NULL)
6901  {
6902  ptr->~T();
6903  VmaFree(hAllocator, ptr);
6904  }
6905 }
6906 
6907 template<typename T>
6908 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6909 {
6910  if(ptr != VMA_NULL)
6911  {
6912  for(size_t i = count; i--; )
6913  ptr[i].~T();
6914  VmaFree(hAllocator, ptr);
6915  }
6916 }
6917 
6919 // VmaStringBuilder
6920 
6921 #if VMA_STATS_STRING_ENABLED
6922 
6923 class VmaStringBuilder
6924 {
6925 public:
6926  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
6927  size_t GetLength() const { return m_Data.size(); }
6928  const char* GetData() const { return m_Data.data(); }
6929 
6930  void Add(char ch) { m_Data.push_back(ch); }
6931  void Add(const char* pStr);
6932  void AddNewLine() { Add('\n'); }
6933  void AddNumber(uint32_t num);
6934  void AddNumber(uint64_t num);
6935  void AddPointer(const void* ptr);
6936 
6937 private:
6938  VmaVector< char, VmaStlAllocator<char> > m_Data;
6939 };
6940 
6941 void VmaStringBuilder::Add(const char* pStr)
6942 {
6943  const size_t strLen = strlen(pStr);
6944  if(strLen > 0)
6945  {
6946  const size_t oldCount = m_Data.size();
6947  m_Data.resize(oldCount + strLen);
6948  memcpy(m_Data.data() + oldCount, pStr, strLen);
6949  }
6950 }
6951 
6952 void VmaStringBuilder::AddNumber(uint32_t num)
6953 {
6954  char buf[11];
6955  VmaUint32ToStr(buf, sizeof(buf), num);
6956  Add(buf);
6957 }
6958 
6959 void VmaStringBuilder::AddNumber(uint64_t num)
6960 {
6961  char buf[21];
6962  VmaUint64ToStr(buf, sizeof(buf), num);
6963  Add(buf);
6964 }
6965 
6966 void VmaStringBuilder::AddPointer(const void* ptr)
6967 {
6968  char buf[21];
6969  VmaPtrToStr(buf, sizeof(buf), ptr);
6970  Add(buf);
6971 }
6972 
6973 #endif // #if VMA_STATS_STRING_ENABLED
6974 
6976 // VmaJsonWriter
6977 
6978 #if VMA_STATS_STRING_ENABLED
6979 
6980 class VmaJsonWriter
6981 {
6982  VMA_CLASS_NO_COPY(VmaJsonWriter)
6983 public:
6984  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6985  ~VmaJsonWriter();
6986 
6987  void BeginObject(bool singleLine = false);
6988  void EndObject();
6989 
6990  void BeginArray(bool singleLine = false);
6991  void EndArray();
6992 
6993  void WriteString(const char* pStr);
6994  void BeginString(const char* pStr = VMA_NULL);
6995  void ContinueString(const char* pStr);
6996  void ContinueString(uint32_t n);
6997  void ContinueString(uint64_t n);
6998  void ContinueString_Pointer(const void* ptr);
6999  void EndString(const char* pStr = VMA_NULL);
7000 
7001  void WriteNumber(uint32_t n);
7002  void WriteNumber(uint64_t n);
7003  void WriteBool(bool b);
7004  void WriteNull();
7005 
7006 private:
7007  static const char* const INDENT;
7008 
7009  enum COLLECTION_TYPE
7010  {
7011  COLLECTION_TYPE_OBJECT,
7012  COLLECTION_TYPE_ARRAY,
7013  };
7014  struct StackItem
7015  {
7016  COLLECTION_TYPE type;
7017  uint32_t valueCount;
7018  bool singleLineMode;
7019  };
7020 
7021  VmaStringBuilder& m_SB;
7022  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
7023  bool m_InsideString;
7024 
7025  void BeginValue(bool isString);
7026  void WriteIndent(bool oneLess = false);
7027 };
7028 
7029 const char* const VmaJsonWriter::INDENT = " ";
7030 
7031 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
7032  m_SB(sb),
7033  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
7034  m_InsideString(false)
7035 {
7036 }
7037 
7038 VmaJsonWriter::~VmaJsonWriter()
7039 {
7040  VMA_ASSERT(!m_InsideString);
7041  VMA_ASSERT(m_Stack.empty());
7042 }
7043 
7044 void VmaJsonWriter::BeginObject(bool singleLine)
7045 {
7046  VMA_ASSERT(!m_InsideString);
7047 
7048  BeginValue(false);
7049  m_SB.Add('{');
7050 
7051  StackItem item;
7052  item.type = COLLECTION_TYPE_OBJECT;
7053  item.valueCount = 0;
7054  item.singleLineMode = singleLine;
7055  m_Stack.push_back(item);
7056 }
7057 
7058 void VmaJsonWriter::EndObject()
7059 {
7060  VMA_ASSERT(!m_InsideString);
7061 
7062  WriteIndent(true);
7063  m_SB.Add('}');
7064 
7065  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
7066  m_Stack.pop_back();
7067 }
7068 
7069 void VmaJsonWriter::BeginArray(bool singleLine)
7070 {
7071  VMA_ASSERT(!m_InsideString);
7072 
7073  BeginValue(false);
7074  m_SB.Add('[');
7075 
7076  StackItem item;
7077  item.type = COLLECTION_TYPE_ARRAY;
7078  item.valueCount = 0;
7079  item.singleLineMode = singleLine;
7080  m_Stack.push_back(item);
7081 }
7082 
7083 void VmaJsonWriter::EndArray()
7084 {
7085  VMA_ASSERT(!m_InsideString);
7086 
7087  WriteIndent(true);
7088  m_SB.Add(']');
7089 
7090  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
7091  m_Stack.pop_back();
7092 }
7093 
7094 void VmaJsonWriter::WriteString(const char* pStr)
7095 {
7096  BeginString(pStr);
7097  EndString();
7098 }
7099 
7100 void VmaJsonWriter::BeginString(const char* pStr)
7101 {
7102  VMA_ASSERT(!m_InsideString);
7103 
7104  BeginValue(true);
7105  m_SB.Add('"');
7106  m_InsideString = true;
7107  if(pStr != VMA_NULL && pStr[0] != '\0')
7108  {
7109  ContinueString(pStr);
7110  }
7111 }
7112 
7113 void VmaJsonWriter::ContinueString(const char* pStr)
7114 {
7115  VMA_ASSERT(m_InsideString);
7116 
7117  const size_t strLen = strlen(pStr);
7118  for(size_t i = 0; i < strLen; ++i)
7119  {
7120  char ch = pStr[i];
7121  if(ch == '\\')
7122  {
7123  m_SB.Add("\\\\");
7124  }
7125  else if(ch == '"')
7126  {
7127  m_SB.Add("\\\"");
7128  }
7129  else if(ch >= 32)
7130  {
7131  m_SB.Add(ch);
7132  }
7133  else switch(ch)
7134  {
7135  case '\b':
7136  m_SB.Add("\\b");
7137  break;
7138  case '\f':
7139  m_SB.Add("\\f");
7140  break;
7141  case '\n':
7142  m_SB.Add("\\n");
7143  break;
7144  case '\r':
7145  m_SB.Add("\\r");
7146  break;
7147  case '\t':
7148  m_SB.Add("\\t");
7149  break;
7150  default:
7151  VMA_ASSERT(0 && "Character not currently supported.");
7152  break;
7153  }
7154  }
7155 }
7156 
7157 void VmaJsonWriter::ContinueString(uint32_t n)
7158 {
7159  VMA_ASSERT(m_InsideString);
7160  m_SB.AddNumber(n);
7161 }
7162 
7163 void VmaJsonWriter::ContinueString(uint64_t n)
7164 {
7165  VMA_ASSERT(m_InsideString);
7166  m_SB.AddNumber(n);
7167 }
7168 
7169 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7170 {
7171  VMA_ASSERT(m_InsideString);
7172  m_SB.AddPointer(ptr);
7173 }
7174 
7175 void VmaJsonWriter::EndString(const char* pStr)
7176 {
7177  VMA_ASSERT(m_InsideString);
7178  if(pStr != VMA_NULL && pStr[0] != '\0')
7179  {
7180  ContinueString(pStr);
7181  }
7182  m_SB.Add('"');
7183  m_InsideString = false;
7184 }
7185 
7186 void VmaJsonWriter::WriteNumber(uint32_t n)
7187 {
7188  VMA_ASSERT(!m_InsideString);
7189  BeginValue(false);
7190  m_SB.AddNumber(n);
7191 }
7192 
7193 void VmaJsonWriter::WriteNumber(uint64_t n)
7194 {
7195  VMA_ASSERT(!m_InsideString);
7196  BeginValue(false);
7197  m_SB.AddNumber(n);
7198 }
7199 
7200 void VmaJsonWriter::WriteBool(bool b)
7201 {
7202  VMA_ASSERT(!m_InsideString);
7203  BeginValue(false);
7204  m_SB.Add(b ? "true" : "false");
7205 }
7206 
7207 void VmaJsonWriter::WriteNull()
7208 {
7209  VMA_ASSERT(!m_InsideString);
7210  BeginValue(false);
7211  m_SB.Add("null");
7212 }
7213 
7214 void VmaJsonWriter::BeginValue(bool isString)
7215 {
7216  if(!m_Stack.empty())
7217  {
7218  StackItem& currItem = m_Stack.back();
7219  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7220  currItem.valueCount % 2 == 0)
7221  {
7222  VMA_ASSERT(isString);
7223  }
7224 
7225  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7226  currItem.valueCount % 2 != 0)
7227  {
7228  m_SB.Add(": ");
7229  }
7230  else if(currItem.valueCount > 0)
7231  {
7232  m_SB.Add(", ");
7233  WriteIndent();
7234  }
7235  else
7236  {
7237  WriteIndent();
7238  }
7239  ++currItem.valueCount;
7240  }
7241 }
7242 
7243 void VmaJsonWriter::WriteIndent(bool oneLess)
7244 {
7245  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7246  {
7247  m_SB.AddNewLine();
7248 
7249  size_t count = m_Stack.size();
7250  if(count > 0 && oneLess)
7251  {
7252  --count;
7253  }
7254  for(size_t i = 0; i < count; ++i)
7255  {
7256  m_SB.Add(INDENT);
7257  }
7258  }
7259 }
7260 
7261 #endif // #if VMA_STATS_STRING_ENABLED
7262 
7264 
7265 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7266 {
7267  if(IsUserDataString())
7268  {
7269  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7270 
7271  FreeUserDataString(hAllocator);
7272 
7273  if(pUserData != VMA_NULL)
7274  {
7275  const char* const newStrSrc = (char*)pUserData;
7276  const size_t newStrLen = strlen(newStrSrc);
7277  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
7278  memcpy(newStrDst, newStrSrc, newStrLen + 1);
7279  m_pUserData = newStrDst;
7280  }
7281  }
7282  else
7283  {
7284  m_pUserData = pUserData;
7285  }
7286 }
7287 
7288 void VmaAllocation_T::ChangeBlockAllocation(
7289  VmaAllocator hAllocator,
7290  VmaDeviceMemoryBlock* block,
7291  VkDeviceSize offset)
7292 {
7293  VMA_ASSERT(block != VMA_NULL);
7294  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7295 
7296  // Move mapping reference counter from old block to new block.
7297  if(block != m_BlockAllocation.m_Block)
7298  {
7299  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7300  if(IsPersistentMap())
7301  ++mapRefCount;
7302  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7303  block->Map(hAllocator, mapRefCount, VMA_NULL);
7304  }
7305 
7306  m_BlockAllocation.m_Block = block;
7307  m_BlockAllocation.m_Offset = offset;
7308 }
7309 
7310 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
7311 {
7312  VMA_ASSERT(newSize > 0);
7313  m_Size = newSize;
7314 }
7315 
7316 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7317 {
7318  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7319  m_BlockAllocation.m_Offset = newOffset;
7320 }
7321 
7322 VkDeviceSize VmaAllocation_T::GetOffset() const
7323 {
7324  switch(m_Type)
7325  {
7326  case ALLOCATION_TYPE_BLOCK:
7327  return m_BlockAllocation.m_Offset;
7328  case ALLOCATION_TYPE_DEDICATED:
7329  return 0;
7330  default:
7331  VMA_ASSERT(0);
7332  return 0;
7333  }
7334 }
7335 
7336 VkDeviceMemory VmaAllocation_T::GetMemory() const
7337 {
7338  switch(m_Type)
7339  {
7340  case ALLOCATION_TYPE_BLOCK:
7341  return m_BlockAllocation.m_Block->GetDeviceMemory();
7342  case ALLOCATION_TYPE_DEDICATED:
7343  return m_DedicatedAllocation.m_hMemory;
7344  default:
7345  VMA_ASSERT(0);
7346  return VK_NULL_HANDLE;
7347  }
7348 }
7349 
7350 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7351 {
7352  switch(m_Type)
7353  {
7354  case ALLOCATION_TYPE_BLOCK:
7355  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7356  case ALLOCATION_TYPE_DEDICATED:
7357  return m_DedicatedAllocation.m_MemoryTypeIndex;
7358  default:
7359  VMA_ASSERT(0);
7360  return UINT32_MAX;
7361  }
7362 }
7363 
7364 void* VmaAllocation_T::GetMappedData() const
7365 {
7366  switch(m_Type)
7367  {
7368  case ALLOCATION_TYPE_BLOCK:
7369  if(m_MapCount != 0)
7370  {
7371  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7372  VMA_ASSERT(pBlockData != VMA_NULL);
7373  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7374  }
7375  else
7376  {
7377  return VMA_NULL;
7378  }
7379  break;
7380  case ALLOCATION_TYPE_DEDICATED:
7381  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7382  return m_DedicatedAllocation.m_pMappedData;
7383  default:
7384  VMA_ASSERT(0);
7385  return VMA_NULL;
7386  }
7387 }
7388 
7389 bool VmaAllocation_T::CanBecomeLost() const
7390 {
7391  switch(m_Type)
7392  {
7393  case ALLOCATION_TYPE_BLOCK:
7394  return m_BlockAllocation.m_CanBecomeLost;
7395  case ALLOCATION_TYPE_DEDICATED:
7396  return false;
7397  default:
7398  VMA_ASSERT(0);
7399  return false;
7400  }
7401 }
7402 
7403 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7404 {
7405  VMA_ASSERT(CanBecomeLost());
7406 
7407  /*
7408  Warning: This is a carefully designed algorithm.
7409  Do not modify unless you really know what you're doing :)
7410  */
7411  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7412  for(;;)
7413  {
7414  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7415  {
7416  VMA_ASSERT(0);
7417  return false;
7418  }
7419  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7420  {
7421  return false;
7422  }
7423  else // Last use time earlier than current time.
7424  {
7425  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7426  {
7427  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7428  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7429  return true;
7430  }
7431  }
7432  }
7433 }
7434 
7435 #if VMA_STATS_STRING_ENABLED
7436 
7437 // Correspond to values of enum VmaSuballocationType.
7438 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7439  "FREE",
7440  "UNKNOWN",
7441  "BUFFER",
7442  "IMAGE_UNKNOWN",
7443  "IMAGE_LINEAR",
7444  "IMAGE_OPTIMAL",
7445 };
7446 
7447 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7448 {
7449  json.WriteString("Type");
7450  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7451 
7452  json.WriteString("Size");
7453  json.WriteNumber(m_Size);
7454 
7455  if(m_pUserData != VMA_NULL)
7456  {
7457  json.WriteString("UserData");
7458  if(IsUserDataString())
7459  {
7460  json.WriteString((const char*)m_pUserData);
7461  }
7462  else
7463  {
7464  json.BeginString();
7465  json.ContinueString_Pointer(m_pUserData);
7466  json.EndString();
7467  }
7468  }
7469 
7470  json.WriteString("CreationFrameIndex");
7471  json.WriteNumber(m_CreationFrameIndex);
7472 
7473  json.WriteString("LastUseFrameIndex");
7474  json.WriteNumber(GetLastUseFrameIndex());
7475 
7476  if(m_BufferImageUsage != 0)
7477  {
7478  json.WriteString("Usage");
7479  json.WriteNumber(m_BufferImageUsage);
7480  }
7481 }
7482 
7483 #endif
7484 
7485 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7486 {
7487  VMA_ASSERT(IsUserDataString());
7488  if(m_pUserData != VMA_NULL)
7489  {
7490  char* const oldStr = (char*)m_pUserData;
7491  const size_t oldStrLen = strlen(oldStr);
7492  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7493  m_pUserData = VMA_NULL;
7494  }
7495 }
7496 
7497 void VmaAllocation_T::BlockAllocMap()
7498 {
7499  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7500 
7501  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7502  {
7503  ++m_MapCount;
7504  }
7505  else
7506  {
7507  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7508  }
7509 }
7510 
7511 void VmaAllocation_T::BlockAllocUnmap()
7512 {
7513  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7514 
7515  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7516  {
7517  --m_MapCount;
7518  }
7519  else
7520  {
7521  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7522  }
7523 }
7524 
7525 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7526 {
7527  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7528 
7529  if(m_MapCount != 0)
7530  {
7531  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7532  {
7533  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7534  *ppData = m_DedicatedAllocation.m_pMappedData;
7535  ++m_MapCount;
7536  return VK_SUCCESS;
7537  }
7538  else
7539  {
7540  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7541  return VK_ERROR_MEMORY_MAP_FAILED;
7542  }
7543  }
7544  else
7545  {
7546  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7547  hAllocator->m_hDevice,
7548  m_DedicatedAllocation.m_hMemory,
7549  0, // offset
7550  VK_WHOLE_SIZE,
7551  0, // flags
7552  ppData);
7553  if(result == VK_SUCCESS)
7554  {
7555  m_DedicatedAllocation.m_pMappedData = *ppData;
7556  m_MapCount = 1;
7557  }
7558  return result;
7559  }
7560 }
7561 
7562 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7563 {
7564  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7565 
7566  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7567  {
7568  --m_MapCount;
7569  if(m_MapCount == 0)
7570  {
7571  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7572  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7573  hAllocator->m_hDevice,
7574  m_DedicatedAllocation.m_hMemory);
7575  }
7576  }
7577  else
7578  {
7579  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7580  }
7581 }
7582 
7583 #if VMA_STATS_STRING_ENABLED
7584 
7585 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7586 {
7587  json.BeginObject();
7588 
7589  json.WriteString("Blocks");
7590  json.WriteNumber(stat.blockCount);
7591 
7592  json.WriteString("Allocations");
7593  json.WriteNumber(stat.allocationCount);
7594 
7595  json.WriteString("UnusedRanges");
7596  json.WriteNumber(stat.unusedRangeCount);
7597 
7598  json.WriteString("UsedBytes");
7599  json.WriteNumber(stat.usedBytes);
7600 
7601  json.WriteString("UnusedBytes");
7602  json.WriteNumber(stat.unusedBytes);
7603 
7604  if(stat.allocationCount > 1)
7605  {
7606  json.WriteString("AllocationSize");
7607  json.BeginObject(true);
7608  json.WriteString("Min");
7609  json.WriteNumber(stat.allocationSizeMin);
7610  json.WriteString("Avg");
7611  json.WriteNumber(stat.allocationSizeAvg);
7612  json.WriteString("Max");
7613  json.WriteNumber(stat.allocationSizeMax);
7614  json.EndObject();
7615  }
7616 
7617  if(stat.unusedRangeCount > 1)
7618  {
7619  json.WriteString("UnusedRangeSize");
7620  json.BeginObject(true);
7621  json.WriteString("Min");
7622  json.WriteNumber(stat.unusedRangeSizeMin);
7623  json.WriteString("Avg");
7624  json.WriteNumber(stat.unusedRangeSizeAvg);
7625  json.WriteString("Max");
7626  json.WriteNumber(stat.unusedRangeSizeMax);
7627  json.EndObject();
7628  }
7629 
7630  json.EndObject();
7631 }
7632 
7633 #endif // #if VMA_STATS_STRING_ENABLED
7634 
7635 struct VmaSuballocationItemSizeLess
7636 {
7637  bool operator()(
7638  const VmaSuballocationList::iterator lhs,
7639  const VmaSuballocationList::iterator rhs) const
7640  {
7641  return lhs->size < rhs->size;
7642  }
7643  bool operator()(
7644  const VmaSuballocationList::iterator lhs,
7645  VkDeviceSize rhsSize) const
7646  {
7647  return lhs->size < rhsSize;
7648  }
7649 };
7650 
7651 
7653 // class VmaBlockMetadata
7654 
7655 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7656  m_Size(0),
7657  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7658 {
7659 }
7660 
7661 #if VMA_STATS_STRING_ENABLED
7662 
7663 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7664  VkDeviceSize unusedBytes,
7665  size_t allocationCount,
7666  size_t unusedRangeCount) const
7667 {
7668  json.BeginObject();
7669 
7670  json.WriteString("TotalBytes");
7671  json.WriteNumber(GetSize());
7672 
7673  json.WriteString("UnusedBytes");
7674  json.WriteNumber(unusedBytes);
7675 
7676  json.WriteString("Allocations");
7677  json.WriteNumber((uint64_t)allocationCount);
7678 
7679  json.WriteString("UnusedRanges");
7680  json.WriteNumber((uint64_t)unusedRangeCount);
7681 
7682  json.WriteString("Suballocations");
7683  json.BeginArray();
7684 }
7685 
7686 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
7687  VkDeviceSize offset,
7688  VmaAllocation hAllocation) const
7689 {
7690  json.BeginObject(true);
7691 
7692  json.WriteString("Offset");
7693  json.WriteNumber(offset);
7694 
7695  hAllocation->PrintParameters(json);
7696 
7697  json.EndObject();
7698 }
7699 
7700 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
7701  VkDeviceSize offset,
7702  VkDeviceSize size) const
7703 {
7704  json.BeginObject(true);
7705 
7706  json.WriteString("Offset");
7707  json.WriteNumber(offset);
7708 
7709  json.WriteString("Type");
7710  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7711 
7712  json.WriteString("Size");
7713  json.WriteNumber(size);
7714 
7715  json.EndObject();
7716 }
7717 
7718 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
7719 {
7720  json.EndArray();
7721  json.EndObject();
7722 }
7723 
7724 #endif // #if VMA_STATS_STRING_ENABLED
7725 
7727 // class VmaBlockMetadata_Generic
7728 
7729 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
7730  VmaBlockMetadata(hAllocator),
7731  m_FreeCount(0),
7732  m_SumFreeSize(0),
7733  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7734  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7735 {
7736 }
7737 
7738 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7739 {
7740 }
7741 
7742 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7743 {
7744  VmaBlockMetadata::Init(size);
7745 
7746  m_FreeCount = 1;
7747  m_SumFreeSize = size;
7748 
7749  VmaSuballocation suballoc = {};
7750  suballoc.offset = 0;
7751  suballoc.size = size;
7752  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7753  suballoc.hAllocation = VK_NULL_HANDLE;
7754 
7755  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7756  m_Suballocations.push_back(suballoc);
7757  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7758  --suballocItem;
7759  m_FreeSuballocationsBySize.push_back(suballocItem);
7760 }
7761 
7762 bool VmaBlockMetadata_Generic::Validate() const
7763 {
7764  VMA_VALIDATE(!m_Suballocations.empty());
7765 
7766  // Expected offset of new suballocation as calculated from previous ones.
7767  VkDeviceSize calculatedOffset = 0;
7768  // Expected number of free suballocations as calculated from traversing their list.
7769  uint32_t calculatedFreeCount = 0;
7770  // Expected sum size of free suballocations as calculated from traversing their list.
7771  VkDeviceSize calculatedSumFreeSize = 0;
7772  // Expected number of free suballocations that should be registered in
7773  // m_FreeSuballocationsBySize calculated from traversing their list.
7774  size_t freeSuballocationsToRegister = 0;
7775  // True if previous visited suballocation was free.
7776  bool prevFree = false;
7777 
7778  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7779  suballocItem != m_Suballocations.cend();
7780  ++suballocItem)
7781  {
7782  const VmaSuballocation& subAlloc = *suballocItem;
7783 
7784  // Actual offset of this suballocation doesn't match expected one.
7785  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7786 
7787  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7788  // Two adjacent free suballocations are invalid. They should be merged.
7789  VMA_VALIDATE(!prevFree || !currFree);
7790 
7791  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7792 
7793  if(currFree)
7794  {
7795  calculatedSumFreeSize += subAlloc.size;
7796  ++calculatedFreeCount;
7797  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7798  {
7799  ++freeSuballocationsToRegister;
7800  }
7801 
7802  // Margin required between allocations - every free space must be at least that large.
7803  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7804  }
7805  else
7806  {
7807  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7808  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7809 
7810  // Margin required between allocations - previous allocation must be free.
7811  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7812  }
7813 
7814  calculatedOffset += subAlloc.size;
7815  prevFree = currFree;
7816  }
7817 
7818  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
7819  // match expected one.
7820  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7821 
7822  VkDeviceSize lastSize = 0;
7823  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7824  {
7825  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7826 
7827  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
7828  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7829  // They must be sorted by size ascending.
7830  VMA_VALIDATE(suballocItem->size >= lastSize);
7831 
7832  lastSize = suballocItem->size;
7833  }
7834 
7835  // Check if totals match calculacted values.
7836  VMA_VALIDATE(ValidateFreeSuballocationList());
7837  VMA_VALIDATE(calculatedOffset == GetSize());
7838  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7839  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7840 
7841  return true;
7842 }
7843 
7844 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
7845 {
7846  if(!m_FreeSuballocationsBySize.empty())
7847  {
7848  return m_FreeSuballocationsBySize.back()->size;
7849  }
7850  else
7851  {
7852  return 0;
7853  }
7854 }
7855 
7856 bool VmaBlockMetadata_Generic::IsEmpty() const
7857 {
7858  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7859 }
7860 
7861 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7862 {
7863  outInfo.blockCount = 1;
7864 
7865  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7866  outInfo.allocationCount = rangeCount - m_FreeCount;
7867  outInfo.unusedRangeCount = m_FreeCount;
7868 
7869  outInfo.unusedBytes = m_SumFreeSize;
7870  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
7871 
7872  outInfo.allocationSizeMin = UINT64_MAX;
7873  outInfo.allocationSizeMax = 0;
7874  outInfo.unusedRangeSizeMin = UINT64_MAX;
7875  outInfo.unusedRangeSizeMax = 0;
7876 
7877  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7878  suballocItem != m_Suballocations.cend();
7879  ++suballocItem)
7880  {
7881  const VmaSuballocation& suballoc = *suballocItem;
7882  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7883  {
7884  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7885  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
7886  }
7887  else
7888  {
7889  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
7890  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
7891  }
7892  }
7893 }
7894 
7895 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
7896 {
7897  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7898 
7899  inoutStats.size += GetSize();
7900  inoutStats.unusedSize += m_SumFreeSize;
7901  inoutStats.allocationCount += rangeCount - m_FreeCount;
7902  inoutStats.unusedRangeCount += m_FreeCount;
7903  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
7904 }
7905 
7906 #if VMA_STATS_STRING_ENABLED
7907 
7908 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7909 {
7910  PrintDetailedMap_Begin(json,
7911  m_SumFreeSize, // unusedBytes
7912  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7913  m_FreeCount); // unusedRangeCount
7914 
7915  size_t i = 0;
7916  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7917  suballocItem != m_Suballocations.cend();
7918  ++suballocItem, ++i)
7919  {
7920  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7921  {
7922  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
7923  }
7924  else
7925  {
7926  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
7927  }
7928  }
7929 
7930  PrintDetailedMap_End(json);
7931 }
7932 
7933 #endif // #if VMA_STATS_STRING_ENABLED
7934 
7935 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7936  uint32_t currentFrameIndex,
7937  uint32_t frameInUseCount,
7938  VkDeviceSize bufferImageGranularity,
7939  VkDeviceSize allocSize,
7940  VkDeviceSize allocAlignment,
7941  bool upperAddress,
7942  VmaSuballocationType allocType,
7943  bool canMakeOtherLost,
7944  uint32_t strategy,
7945  VmaAllocationRequest* pAllocationRequest)
7946 {
7947  VMA_ASSERT(allocSize > 0);
7948  VMA_ASSERT(!upperAddress);
7949  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7950  VMA_ASSERT(pAllocationRequest != VMA_NULL);
7951  VMA_HEAVY_ASSERT(Validate());
7952 
7953  pAllocationRequest->type = VmaAllocationRequestType::Normal;
7954 
7955  // There is not enough total free space in this block to fullfill the request: Early return.
7956  if(canMakeOtherLost == false &&
7957  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
7958  {
7959  return false;
7960  }
7961 
7962  // New algorithm, efficiently searching freeSuballocationsBySize.
7963  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7964  if(freeSuballocCount > 0)
7965  {
7967  {
7968  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7969  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7970  m_FreeSuballocationsBySize.data(),
7971  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7972  allocSize + 2 * VMA_DEBUG_MARGIN,
7973  VmaSuballocationItemSizeLess());
7974  size_t index = it - m_FreeSuballocationsBySize.data();
7975  for(; index < freeSuballocCount; ++index)
7976  {
7977  if(CheckAllocation(
7978  currentFrameIndex,
7979  frameInUseCount,
7980  bufferImageGranularity,
7981  allocSize,
7982  allocAlignment,
7983  allocType,
7984  m_FreeSuballocationsBySize[index],
7985  false, // canMakeOtherLost
7986  &pAllocationRequest->offset,
7987  &pAllocationRequest->itemsToMakeLostCount,
7988  &pAllocationRequest->sumFreeSize,
7989  &pAllocationRequest->sumItemSize))
7990  {
7991  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7992  return true;
7993  }
7994  }
7995  }
7996  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7997  {
7998  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7999  it != m_Suballocations.end();
8000  ++it)
8001  {
8002  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
8003  currentFrameIndex,
8004  frameInUseCount,
8005  bufferImageGranularity,
8006  allocSize,
8007  allocAlignment,
8008  allocType,
8009  it,
8010  false, // canMakeOtherLost
8011  &pAllocationRequest->offset,
8012  &pAllocationRequest->itemsToMakeLostCount,
8013  &pAllocationRequest->sumFreeSize,
8014  &pAllocationRequest->sumItemSize))
8015  {
8016  pAllocationRequest->item = it;
8017  return true;
8018  }
8019  }
8020  }
8021  else // WORST_FIT, FIRST_FIT
8022  {
8023  // Search staring from biggest suballocations.
8024  for(size_t index = freeSuballocCount; index--; )
8025  {
8026  if(CheckAllocation(
8027  currentFrameIndex,
8028  frameInUseCount,
8029  bufferImageGranularity,
8030  allocSize,
8031  allocAlignment,
8032  allocType,
8033  m_FreeSuballocationsBySize[index],
8034  false, // canMakeOtherLost
8035  &pAllocationRequest->offset,
8036  &pAllocationRequest->itemsToMakeLostCount,
8037  &pAllocationRequest->sumFreeSize,
8038  &pAllocationRequest->sumItemSize))
8039  {
8040  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8041  return true;
8042  }
8043  }
8044  }
8045  }
8046 
8047  if(canMakeOtherLost)
8048  {
8049  // Brute-force algorithm. TODO: Come up with something better.
8050 
8051  bool found = false;
8052  VmaAllocationRequest tmpAllocRequest = {};
8053  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
8054  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
8055  suballocIt != m_Suballocations.end();
8056  ++suballocIt)
8057  {
8058  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
8059  suballocIt->hAllocation->CanBecomeLost())
8060  {
8061  if(CheckAllocation(
8062  currentFrameIndex,
8063  frameInUseCount,
8064  bufferImageGranularity,
8065  allocSize,
8066  allocAlignment,
8067  allocType,
8068  suballocIt,
8069  canMakeOtherLost,
8070  &tmpAllocRequest.offset,
8071  &tmpAllocRequest.itemsToMakeLostCount,
8072  &tmpAllocRequest.sumFreeSize,
8073  &tmpAllocRequest.sumItemSize))
8074  {
8076  {
8077  *pAllocationRequest = tmpAllocRequest;
8078  pAllocationRequest->item = suballocIt;
8079  break;
8080  }
8081  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
8082  {
8083  *pAllocationRequest = tmpAllocRequest;
8084  pAllocationRequest->item = suballocIt;
8085  found = true;
8086  }
8087  }
8088  }
8089  }
8090 
8091  return found;
8092  }
8093 
8094  return false;
8095 }
8096 
8097 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
8098  uint32_t currentFrameIndex,
8099  uint32_t frameInUseCount,
8100  VmaAllocationRequest* pAllocationRequest)
8101 {
8102  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
8103 
8104  while(pAllocationRequest->itemsToMakeLostCount > 0)
8105  {
8106  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
8107  {
8108  ++pAllocationRequest->item;
8109  }
8110  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8111  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
8112  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
8113  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8114  {
8115  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
8116  --pAllocationRequest->itemsToMakeLostCount;
8117  }
8118  else
8119  {
8120  return false;
8121  }
8122  }
8123 
8124  VMA_HEAVY_ASSERT(Validate());
8125  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8126  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8127 
8128  return true;
8129 }
8130 
8131 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8132 {
8133  uint32_t lostAllocationCount = 0;
8134  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8135  it != m_Suballocations.end();
8136  ++it)
8137  {
8138  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8139  it->hAllocation->CanBecomeLost() &&
8140  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8141  {
8142  it = FreeSuballocation(it);
8143  ++lostAllocationCount;
8144  }
8145  }
8146  return lostAllocationCount;
8147 }
8148 
8149 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8150 {
8151  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8152  it != m_Suballocations.end();
8153  ++it)
8154  {
8155  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8156  {
8157  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8158  {
8159  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8160  return VK_ERROR_VALIDATION_FAILED_EXT;
8161  }
8162  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8163  {
8164  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8165  return VK_ERROR_VALIDATION_FAILED_EXT;
8166  }
8167  }
8168  }
8169 
8170  return VK_SUCCESS;
8171 }
8172 
8173 void VmaBlockMetadata_Generic::Alloc(
8174  const VmaAllocationRequest& request,
8175  VmaSuballocationType type,
8176  VkDeviceSize allocSize,
8177  VmaAllocation hAllocation)
8178 {
8179  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
8180  VMA_ASSERT(request.item != m_Suballocations.end());
8181  VmaSuballocation& suballoc = *request.item;
8182  // Given suballocation is a free block.
8183  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8184  // Given offset is inside this suballocation.
8185  VMA_ASSERT(request.offset >= suballoc.offset);
8186  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8187  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8188  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8189 
8190  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8191  // it to become used.
8192  UnregisterFreeSuballocation(request.item);
8193 
8194  suballoc.offset = request.offset;
8195  suballoc.size = allocSize;
8196  suballoc.type = type;
8197  suballoc.hAllocation = hAllocation;
8198 
8199  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8200  if(paddingEnd)
8201  {
8202  VmaSuballocation paddingSuballoc = {};
8203  paddingSuballoc.offset = request.offset + allocSize;
8204  paddingSuballoc.size = paddingEnd;
8205  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8206  VmaSuballocationList::iterator next = request.item;
8207  ++next;
8208  const VmaSuballocationList::iterator paddingEndItem =
8209  m_Suballocations.insert(next, paddingSuballoc);
8210  RegisterFreeSuballocation(paddingEndItem);
8211  }
8212 
8213  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8214  if(paddingBegin)
8215  {
8216  VmaSuballocation paddingSuballoc = {};
8217  paddingSuballoc.offset = request.offset - paddingBegin;
8218  paddingSuballoc.size = paddingBegin;
8219  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8220  const VmaSuballocationList::iterator paddingBeginItem =
8221  m_Suballocations.insert(request.item, paddingSuballoc);
8222  RegisterFreeSuballocation(paddingBeginItem);
8223  }
8224 
8225  // Update totals.
8226  m_FreeCount = m_FreeCount - 1;
8227  if(paddingBegin > 0)
8228  {
8229  ++m_FreeCount;
8230  }
8231  if(paddingEnd > 0)
8232  {
8233  ++m_FreeCount;
8234  }
8235  m_SumFreeSize -= allocSize;
8236 }
8237 
8238 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8239 {
8240  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8241  suballocItem != m_Suballocations.end();
8242  ++suballocItem)
8243  {
8244  VmaSuballocation& suballoc = *suballocItem;
8245  if(suballoc.hAllocation == allocation)
8246  {
8247  FreeSuballocation(suballocItem);
8248  VMA_HEAVY_ASSERT(Validate());
8249  return;
8250  }
8251  }
8252  VMA_ASSERT(0 && "Not found!");
8253 }
8254 
8255 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8256 {
8257  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8258  suballocItem != m_Suballocations.end();
8259  ++suballocItem)
8260  {
8261  VmaSuballocation& suballoc = *suballocItem;
8262  if(suballoc.offset == offset)
8263  {
8264  FreeSuballocation(suballocItem);
8265  return;
8266  }
8267  }
8268  VMA_ASSERT(0 && "Not found!");
8269 }
8270 
8271 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
8272 {
8273  typedef VmaSuballocationList::iterator iter_type;
8274  for(iter_type suballocItem = m_Suballocations.begin();
8275  suballocItem != m_Suballocations.end();
8276  ++suballocItem)
8277  {
8278  VmaSuballocation& suballoc = *suballocItem;
8279  if(suballoc.hAllocation == alloc)
8280  {
8281  iter_type nextItem = suballocItem;
8282  ++nextItem;
8283 
8284  // Should have been ensured on higher level.
8285  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
8286 
8287  // Shrinking.
8288  if(newSize < alloc->GetSize())
8289  {
8290  const VkDeviceSize sizeDiff = suballoc.size - newSize;
8291 
8292  // There is next item.
8293  if(nextItem != m_Suballocations.end())
8294  {
8295  // Next item is free.
8296  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8297  {
8298  // Grow this next item backward.
8299  UnregisterFreeSuballocation(nextItem);
8300  nextItem->offset -= sizeDiff;
8301  nextItem->size += sizeDiff;
8302  RegisterFreeSuballocation(nextItem);
8303  }
8304  // Next item is not free.
8305  else
8306  {
8307  // Create free item after current one.
8308  VmaSuballocation newFreeSuballoc;
8309  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8310  newFreeSuballoc.offset = suballoc.offset + newSize;
8311  newFreeSuballoc.size = sizeDiff;
8312  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8313  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
8314  RegisterFreeSuballocation(newFreeSuballocIt);
8315 
8316  ++m_FreeCount;
8317  }
8318  }
8319  // This is the last item.
8320  else
8321  {
8322  // Create free item at the end.
8323  VmaSuballocation newFreeSuballoc;
8324  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8325  newFreeSuballoc.offset = suballoc.offset + newSize;
8326  newFreeSuballoc.size = sizeDiff;
8327  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8328  m_Suballocations.push_back(newFreeSuballoc);
8329 
8330  iter_type newFreeSuballocIt = m_Suballocations.end();
8331  RegisterFreeSuballocation(--newFreeSuballocIt);
8332 
8333  ++m_FreeCount;
8334  }
8335 
8336  suballoc.size = newSize;
8337  m_SumFreeSize += sizeDiff;
8338  }
8339  // Growing.
8340  else
8341  {
8342  const VkDeviceSize sizeDiff = newSize - suballoc.size;
8343 
8344  // There is next item.
8345  if(nextItem != m_Suballocations.end())
8346  {
8347  // Next item is free.
8348  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8349  {
8350  // There is not enough free space, including margin.
8351  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
8352  {
8353  return false;
8354  }
8355 
8356  // There is more free space than required.
8357  if(nextItem->size > sizeDiff)
8358  {
8359  // Move and shrink this next item.
8360  UnregisterFreeSuballocation(nextItem);
8361  nextItem->offset += sizeDiff;
8362  nextItem->size -= sizeDiff;
8363  RegisterFreeSuballocation(nextItem);
8364  }
8365  // There is exactly the amount of free space required.
8366  else
8367  {
8368  // Remove this next free item.
8369  UnregisterFreeSuballocation(nextItem);
8370  m_Suballocations.erase(nextItem);
8371  --m_FreeCount;
8372  }
8373  }
8374  // Next item is not free - there is no space to grow.
8375  else
8376  {
8377  return false;
8378  }
8379  }
8380  // This is the last item - there is no space to grow.
8381  else
8382  {
8383  return false;
8384  }
8385 
8386  suballoc.size = newSize;
8387  m_SumFreeSize -= sizeDiff;
8388  }
8389 
8390  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
8391  return true;
8392  }
8393  }
8394  VMA_ASSERT(0 && "Not found!");
8395  return false;
8396 }
8397 
8398 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8399 {
8400  VkDeviceSize lastSize = 0;
8401  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8402  {
8403  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8404 
8405  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8406  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8407  VMA_VALIDATE(it->size >= lastSize);
8408  lastSize = it->size;
8409  }
8410  return true;
8411 }
8412 
8413 bool VmaBlockMetadata_Generic::CheckAllocation(
8414  uint32_t currentFrameIndex,
8415  uint32_t frameInUseCount,
8416  VkDeviceSize bufferImageGranularity,
8417  VkDeviceSize allocSize,
8418  VkDeviceSize allocAlignment,
8419  VmaSuballocationType allocType,
8420  VmaSuballocationList::const_iterator suballocItem,
8421  bool canMakeOtherLost,
8422  VkDeviceSize* pOffset,
8423  size_t* itemsToMakeLostCount,
8424  VkDeviceSize* pSumFreeSize,
8425  VkDeviceSize* pSumItemSize) const
8426 {
8427  VMA_ASSERT(allocSize > 0);
8428  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8429  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8430  VMA_ASSERT(pOffset != VMA_NULL);
8431 
8432  *itemsToMakeLostCount = 0;
8433  *pSumFreeSize = 0;
8434  *pSumItemSize = 0;
8435 
8436  if(canMakeOtherLost)
8437  {
8438  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8439  {
8440  *pSumFreeSize = suballocItem->size;
8441  }
8442  else
8443  {
8444  if(suballocItem->hAllocation->CanBecomeLost() &&
8445  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8446  {
8447  ++*itemsToMakeLostCount;
8448  *pSumItemSize = suballocItem->size;
8449  }
8450  else
8451  {
8452  return false;
8453  }
8454  }
8455 
8456  // Remaining size is too small for this request: Early return.
8457  if(GetSize() - suballocItem->offset < allocSize)
8458  {
8459  return false;
8460  }
8461 
8462  // Start from offset equal to beginning of this suballocation.
8463  *pOffset = suballocItem->offset;
8464 
8465  // Apply VMA_DEBUG_MARGIN at the beginning.
8466  if(VMA_DEBUG_MARGIN > 0)
8467  {
8468  *pOffset += VMA_DEBUG_MARGIN;
8469  }
8470 
8471  // Apply alignment.
8472  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8473 
8474  // Check previous suballocations for BufferImageGranularity conflicts.
8475  // Make bigger alignment if necessary.
8476  if(bufferImageGranularity > 1)
8477  {
8478  bool bufferImageGranularityConflict = false;
8479  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8480  while(prevSuballocItem != m_Suballocations.cbegin())
8481  {
8482  --prevSuballocItem;
8483  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8484  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8485  {
8486  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8487  {
8488  bufferImageGranularityConflict = true;
8489  break;
8490  }
8491  }
8492  else
8493  // Already on previous page.
8494  break;
8495  }
8496  if(bufferImageGranularityConflict)
8497  {
8498  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8499  }
8500  }
8501 
8502  // Now that we have final *pOffset, check if we are past suballocItem.
8503  // If yes, return false - this function should be called for another suballocItem as starting point.
8504  if(*pOffset >= suballocItem->offset + suballocItem->size)
8505  {
8506  return false;
8507  }
8508 
8509  // Calculate padding at the beginning based on current offset.
8510  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8511 
8512  // Calculate required margin at the end.
8513  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8514 
8515  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8516  // Another early return check.
8517  if(suballocItem->offset + totalSize > GetSize())
8518  {
8519  return false;
8520  }
8521 
8522  // Advance lastSuballocItem until desired size is reached.
8523  // Update itemsToMakeLostCount.
8524  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8525  if(totalSize > suballocItem->size)
8526  {
8527  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8528  while(remainingSize > 0)
8529  {
8530  ++lastSuballocItem;
8531  if(lastSuballocItem == m_Suballocations.cend())
8532  {
8533  return false;
8534  }
8535  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8536  {
8537  *pSumFreeSize += lastSuballocItem->size;
8538  }
8539  else
8540  {
8541  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8542  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8543  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8544  {
8545  ++*itemsToMakeLostCount;
8546  *pSumItemSize += lastSuballocItem->size;
8547  }
8548  else
8549  {
8550  return false;
8551  }
8552  }
8553  remainingSize = (lastSuballocItem->size < remainingSize) ?
8554  remainingSize - lastSuballocItem->size : 0;
8555  }
8556  }
8557 
8558  // Check next suballocations for BufferImageGranularity conflicts.
8559  // If conflict exists, we must mark more allocations lost or fail.
8560  if(bufferImageGranularity > 1)
8561  {
8562  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8563  ++nextSuballocItem;
8564  while(nextSuballocItem != m_Suballocations.cend())
8565  {
8566  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8567  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8568  {
8569  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8570  {
8571  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8572  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8573  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8574  {
8575  ++*itemsToMakeLostCount;
8576  }
8577  else
8578  {
8579  return false;
8580  }
8581  }
8582  }
8583  else
8584  {
8585  // Already on next page.
8586  break;
8587  }
8588  ++nextSuballocItem;
8589  }
8590  }
8591  }
8592  else
8593  {
8594  const VmaSuballocation& suballoc = *suballocItem;
8595  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8596 
8597  *pSumFreeSize = suballoc.size;
8598 
8599  // Size of this suballocation is too small for this request: Early return.
8600  if(suballoc.size < allocSize)
8601  {
8602  return false;
8603  }
8604 
8605  // Start from offset equal to beginning of this suballocation.
8606  *pOffset = suballoc.offset;
8607 
8608  // Apply VMA_DEBUG_MARGIN at the beginning.
8609  if(VMA_DEBUG_MARGIN > 0)
8610  {
8611  *pOffset += VMA_DEBUG_MARGIN;
8612  }
8613 
8614  // Apply alignment.
8615  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8616 
8617  // Check previous suballocations for BufferImageGranularity conflicts.
8618  // Make bigger alignment if necessary.
8619  if(bufferImageGranularity > 1)
8620  {
8621  bool bufferImageGranularityConflict = false;
8622  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8623  while(prevSuballocItem != m_Suballocations.cbegin())
8624  {
8625  --prevSuballocItem;
8626  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8627  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8628  {
8629  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8630  {
8631  bufferImageGranularityConflict = true;
8632  break;
8633  }
8634  }
8635  else
8636  // Already on previous page.
8637  break;
8638  }
8639  if(bufferImageGranularityConflict)
8640  {
8641  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8642  }
8643  }
8644 
8645  // Calculate padding at the beginning based on current offset.
8646  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8647 
8648  // Calculate required margin at the end.
8649  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8650 
8651  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8652  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8653  {
8654  return false;
8655  }
8656 
8657  // Check next suballocations for BufferImageGranularity conflicts.
8658  // If conflict exists, allocation cannot be made here.
8659  if(bufferImageGranularity > 1)
8660  {
8661  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8662  ++nextSuballocItem;
8663  while(nextSuballocItem != m_Suballocations.cend())
8664  {
8665  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8666  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8667  {
8668  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8669  {
8670  return false;
8671  }
8672  }
8673  else
8674  {
8675  // Already on next page.
8676  break;
8677  }
8678  ++nextSuballocItem;
8679  }
8680  }
8681  }
8682 
8683  // All tests passed: Success. pOffset is already filled.
8684  return true;
8685 }
8686 
8687 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8688 {
8689  VMA_ASSERT(item != m_Suballocations.end());
8690  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8691 
8692  VmaSuballocationList::iterator nextItem = item;
8693  ++nextItem;
8694  VMA_ASSERT(nextItem != m_Suballocations.end());
8695  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8696 
8697  item->size += nextItem->size;
8698  --m_FreeCount;
8699  m_Suballocations.erase(nextItem);
8700 }
8701 
8702 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8703 {
8704  // Change this suballocation to be marked as free.
8705  VmaSuballocation& suballoc = *suballocItem;
8706  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8707  suballoc.hAllocation = VK_NULL_HANDLE;
8708 
8709  // Update totals.
8710  ++m_FreeCount;
8711  m_SumFreeSize += suballoc.size;
8712 
8713  // Merge with previous and/or next suballocation if it's also free.
8714  bool mergeWithNext = false;
8715  bool mergeWithPrev = false;
8716 
8717  VmaSuballocationList::iterator nextItem = suballocItem;
8718  ++nextItem;
8719  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8720  {
8721  mergeWithNext = true;
8722  }
8723 
8724  VmaSuballocationList::iterator prevItem = suballocItem;
8725  if(suballocItem != m_Suballocations.begin())
8726  {
8727  --prevItem;
8728  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8729  {
8730  mergeWithPrev = true;
8731  }
8732  }
8733 
8734  if(mergeWithNext)
8735  {
8736  UnregisterFreeSuballocation(nextItem);
8737  MergeFreeWithNext(suballocItem);
8738  }
8739 
8740  if(mergeWithPrev)
8741  {
8742  UnregisterFreeSuballocation(prevItem);
8743  MergeFreeWithNext(prevItem);
8744  RegisterFreeSuballocation(prevItem);
8745  return prevItem;
8746  }
8747  else
8748  {
8749  RegisterFreeSuballocation(suballocItem);
8750  return suballocItem;
8751  }
8752 }
8753 
8754 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8755 {
8756  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8757  VMA_ASSERT(item->size > 0);
8758 
8759  // You may want to enable this validation at the beginning or at the end of
8760  // this function, depending on what do you want to check.
8761  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8762 
8763  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8764  {
8765  if(m_FreeSuballocationsBySize.empty())
8766  {
8767  m_FreeSuballocationsBySize.push_back(item);
8768  }
8769  else
8770  {
8771  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8772  }
8773  }
8774 
8775  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8776 }
8777 
8778 
8779 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8780 {
8781  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8782  VMA_ASSERT(item->size > 0);
8783 
8784  // You may want to enable this validation at the beginning or at the end of
8785  // this function, depending on what do you want to check.
8786  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8787 
8788  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8789  {
8790  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8791  m_FreeSuballocationsBySize.data(),
8792  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8793  item,
8794  VmaSuballocationItemSizeLess());
8795  for(size_t index = it - m_FreeSuballocationsBySize.data();
8796  index < m_FreeSuballocationsBySize.size();
8797  ++index)
8798  {
8799  if(m_FreeSuballocationsBySize[index] == item)
8800  {
8801  VmaVectorRemove(m_FreeSuballocationsBySize, index);
8802  return;
8803  }
8804  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8805  }
8806  VMA_ASSERT(0 && "Not found.");
8807  }
8808 
8809  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8810 }
8811 
8812 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8813  VkDeviceSize bufferImageGranularity,
8814  VmaSuballocationType& inOutPrevSuballocType) const
8815 {
8816  if(bufferImageGranularity == 1 || IsEmpty())
8817  {
8818  return false;
8819  }
8820 
8821  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8822  bool typeConflictFound = false;
8823  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8824  it != m_Suballocations.cend();
8825  ++it)
8826  {
8827  const VmaSuballocationType suballocType = it->type;
8828  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8829  {
8830  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8831  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8832  {
8833  typeConflictFound = true;
8834  }
8835  inOutPrevSuballocType = suballocType;
8836  }
8837  }
8838 
8839  return typeConflictFound || minAlignment >= bufferImageGranularity;
8840 }
8841 
8843 // class VmaBlockMetadata_Linear
8844 
8845 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
8846  VmaBlockMetadata(hAllocator),
8847  m_SumFreeSize(0),
8848  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8849  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8850  m_1stVectorIndex(0),
8851  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8852  m_1stNullItemsBeginCount(0),
8853  m_1stNullItemsMiddleCount(0),
8854  m_2ndNullItemsCount(0)
8855 {
8856 }
8857 
8858 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8859 {
8860 }
8861 
8862 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8863 {
8864  VmaBlockMetadata::Init(size);
8865  m_SumFreeSize = size;
8866 }
8867 
8868 bool VmaBlockMetadata_Linear::Validate() const
8869 {
8870  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8871  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8872 
8873  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8874  VMA_VALIDATE(!suballocations1st.empty() ||
8875  suballocations2nd.empty() ||
8876  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8877 
8878  if(!suballocations1st.empty())
8879  {
8880  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
8881  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8882  // Null item at the end should be just pop_back().
8883  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8884  }
8885  if(!suballocations2nd.empty())
8886  {
8887  // Null item at the end should be just pop_back().
8888  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8889  }
8890 
8891  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8892  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8893 
8894  VkDeviceSize sumUsedSize = 0;
8895  const size_t suballoc1stCount = suballocations1st.size();
8896  VkDeviceSize offset = VMA_DEBUG_MARGIN;
8897 
8898  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8899  {
8900  const size_t suballoc2ndCount = suballocations2nd.size();
8901  size_t nullItem2ndCount = 0;
8902  for(size_t i = 0; i < suballoc2ndCount; ++i)
8903  {
8904  const VmaSuballocation& suballoc = suballocations2nd[i];
8905  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8906 
8907  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8908  VMA_VALIDATE(suballoc.offset >= offset);
8909 
8910  if(!currFree)
8911  {
8912  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8913  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8914  sumUsedSize += suballoc.size;
8915  }
8916  else
8917  {
8918  ++nullItem2ndCount;
8919  }
8920 
8921  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8922  }
8923 
8924  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8925  }
8926 
8927  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8928  {
8929  const VmaSuballocation& suballoc = suballocations1st[i];
8930  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8931  suballoc.hAllocation == VK_NULL_HANDLE);
8932  }
8933 
8934  size_t nullItem1stCount = m_1stNullItemsBeginCount;
8935 
8936  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8937  {
8938  const VmaSuballocation& suballoc = suballocations1st[i];
8939  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8940 
8941  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8942  VMA_VALIDATE(suballoc.offset >= offset);
8943  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8944 
8945  if(!currFree)
8946  {
8947  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8948  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8949  sumUsedSize += suballoc.size;
8950  }
8951  else
8952  {
8953  ++nullItem1stCount;
8954  }
8955 
8956  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8957  }
8958  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8959 
8960  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8961  {
8962  const size_t suballoc2ndCount = suballocations2nd.size();
8963  size_t nullItem2ndCount = 0;
8964  for(size_t i = suballoc2ndCount; i--; )
8965  {
8966  const VmaSuballocation& suballoc = suballocations2nd[i];
8967  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8968 
8969  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8970  VMA_VALIDATE(suballoc.offset >= offset);
8971 
8972  if(!currFree)
8973  {
8974  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8975  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8976  sumUsedSize += suballoc.size;
8977  }
8978  else
8979  {
8980  ++nullItem2ndCount;
8981  }
8982 
8983  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8984  }
8985 
8986  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8987  }
8988 
8989  VMA_VALIDATE(offset <= GetSize());
8990  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
8991 
8992  return true;
8993 }
8994 
8995 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
8996 {
8997  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
8998  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
8999 }
9000 
9001 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
9002 {
9003  const VkDeviceSize size = GetSize();
9004 
9005  /*
9006  We don't consider gaps inside allocation vectors with freed allocations because
9007  they are not suitable for reuse in linear allocator. We consider only space that
9008  is available for new allocations.
9009  */
9010  if(IsEmpty())
9011  {
9012  return size;
9013  }
9014 
9015  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9016 
9017  switch(m_2ndVectorMode)
9018  {
9019  case SECOND_VECTOR_EMPTY:
9020  /*
9021  Available space is after end of 1st, as well as before beginning of 1st (which
9022  whould make it a ring buffer).
9023  */
9024  {
9025  const size_t suballocations1stCount = suballocations1st.size();
9026  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
9027  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9028  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
9029  return VMA_MAX(
9030  firstSuballoc.offset,
9031  size - (lastSuballoc.offset + lastSuballoc.size));
9032  }
9033  break;
9034 
9035  case SECOND_VECTOR_RING_BUFFER:
9036  /*
9037  Available space is only between end of 2nd and beginning of 1st.
9038  */
9039  {
9040  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9041  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
9042  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
9043  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
9044  }
9045  break;
9046 
9047  case SECOND_VECTOR_DOUBLE_STACK:
9048  /*
9049  Available space is only between end of 1st and top of 2nd.
9050  */
9051  {
9052  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9053  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
9054  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
9055  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
9056  }
9057  break;
9058 
9059  default:
9060  VMA_ASSERT(0);
9061  return 0;
9062  }
9063 }
9064 
9065 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9066 {
9067  const VkDeviceSize size = GetSize();
9068  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9069  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9070  const size_t suballoc1stCount = suballocations1st.size();
9071  const size_t suballoc2ndCount = suballocations2nd.size();
9072 
9073  outInfo.blockCount = 1;
9074  outInfo.allocationCount = (uint32_t)GetAllocationCount();
9075  outInfo.unusedRangeCount = 0;
9076  outInfo.usedBytes = 0;
9077  outInfo.allocationSizeMin = UINT64_MAX;
9078  outInfo.allocationSizeMax = 0;
9079  outInfo.unusedRangeSizeMin = UINT64_MAX;
9080  outInfo.unusedRangeSizeMax = 0;
9081 
9082  VkDeviceSize lastOffset = 0;
9083 
9084  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9085  {
9086  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9087  size_t nextAlloc2ndIndex = 0;
9088  while(lastOffset < freeSpace2ndTo1stEnd)
9089  {
9090  // Find next non-null allocation or move nextAllocIndex to the end.
9091  while(nextAlloc2ndIndex < suballoc2ndCount &&
9092  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9093  {
9094  ++nextAlloc2ndIndex;
9095  }
9096 
9097  // Found non-null allocation.
9098  if(nextAlloc2ndIndex < suballoc2ndCount)
9099  {
9100  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9101 
9102  // 1. Process free space before this allocation.
9103  if(lastOffset < suballoc.offset)
9104  {
9105  // There is free space from lastOffset to suballoc.offset.
9106  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9107  ++outInfo.unusedRangeCount;
9108  outInfo.unusedBytes += unusedRangeSize;
9109  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9110  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9111  }
9112 
9113  // 2. Process this allocation.
9114  // There is allocation with suballoc.offset, suballoc.size.
9115  outInfo.usedBytes += suballoc.size;
9116  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9117  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9118 
9119  // 3. Prepare for next iteration.
9120  lastOffset = suballoc.offset + suballoc.size;
9121  ++nextAlloc2ndIndex;
9122  }
9123  // We are at the end.
9124  else
9125  {
9126  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9127  if(lastOffset < freeSpace2ndTo1stEnd)
9128  {
9129  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9130  ++outInfo.unusedRangeCount;
9131  outInfo.unusedBytes += unusedRangeSize;
9132  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9133  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9134  }
9135 
9136  // End of loop.
9137  lastOffset = freeSpace2ndTo1stEnd;
9138  }
9139  }
9140  }
9141 
9142  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9143  const VkDeviceSize freeSpace1stTo2ndEnd =
9144  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9145  while(lastOffset < freeSpace1stTo2ndEnd)
9146  {
9147  // Find next non-null allocation or move nextAllocIndex to the end.
9148  while(nextAlloc1stIndex < suballoc1stCount &&
9149  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9150  {
9151  ++nextAlloc1stIndex;
9152  }
9153 
9154  // Found non-null allocation.
9155  if(nextAlloc1stIndex < suballoc1stCount)
9156  {
9157  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9158 
9159  // 1. Process free space before this allocation.
9160  if(lastOffset < suballoc.offset)
9161  {
9162  // There is free space from lastOffset to suballoc.offset.
9163  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9164  ++outInfo.unusedRangeCount;
9165  outInfo.unusedBytes += unusedRangeSize;
9166  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9167  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9168  }
9169 
9170  // 2. Process this allocation.
9171  // There is allocation with suballoc.offset, suballoc.size.
9172  outInfo.usedBytes += suballoc.size;
9173  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9174  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9175 
9176  // 3. Prepare for next iteration.
9177  lastOffset = suballoc.offset + suballoc.size;
9178  ++nextAlloc1stIndex;
9179  }
9180  // We are at the end.
9181  else
9182  {
9183  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9184  if(lastOffset < freeSpace1stTo2ndEnd)
9185  {
9186  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9187  ++outInfo.unusedRangeCount;
9188  outInfo.unusedBytes += unusedRangeSize;
9189  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9190  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9191  }
9192 
9193  // End of loop.
9194  lastOffset = freeSpace1stTo2ndEnd;
9195  }
9196  }
9197 
9198  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9199  {
9200  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9201  while(lastOffset < size)
9202  {
9203  // Find next non-null allocation or move nextAllocIndex to the end.
9204  while(nextAlloc2ndIndex != SIZE_MAX &&
9205  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9206  {
9207  --nextAlloc2ndIndex;
9208  }
9209 
9210  // Found non-null allocation.
9211  if(nextAlloc2ndIndex != SIZE_MAX)
9212  {
9213  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9214 
9215  // 1. Process free space before this allocation.
9216  if(lastOffset < suballoc.offset)
9217  {
9218  // There is free space from lastOffset to suballoc.offset.
9219  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9220  ++outInfo.unusedRangeCount;
9221  outInfo.unusedBytes += unusedRangeSize;
9222  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9223  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9224  }
9225 
9226  // 2. Process this allocation.
9227  // There is allocation with suballoc.offset, suballoc.size.
9228  outInfo.usedBytes += suballoc.size;
9229  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9230  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9231 
9232  // 3. Prepare for next iteration.
9233  lastOffset = suballoc.offset + suballoc.size;
9234  --nextAlloc2ndIndex;
9235  }
9236  // We are at the end.
9237  else
9238  {
9239  // There is free space from lastOffset to size.
9240  if(lastOffset < size)
9241  {
9242  const VkDeviceSize unusedRangeSize = size - lastOffset;
9243  ++outInfo.unusedRangeCount;
9244  outInfo.unusedBytes += unusedRangeSize;
9245  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9246  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9247  }
9248 
9249  // End of loop.
9250  lastOffset = size;
9251  }
9252  }
9253  }
9254 
9255  outInfo.unusedBytes = size - outInfo.usedBytes;
9256 }
9257 
9258 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9259 {
9260  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9261  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9262  const VkDeviceSize size = GetSize();
9263  const size_t suballoc1stCount = suballocations1st.size();
9264  const size_t suballoc2ndCount = suballocations2nd.size();
9265 
9266  inoutStats.size += size;
9267 
9268  VkDeviceSize lastOffset = 0;
9269 
9270  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9271  {
9272  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9273  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9274  while(lastOffset < freeSpace2ndTo1stEnd)
9275  {
9276  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9277  while(nextAlloc2ndIndex < suballoc2ndCount &&
9278  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9279  {
9280  ++nextAlloc2ndIndex;
9281  }
9282 
9283  // Found non-null allocation.
9284  if(nextAlloc2ndIndex < suballoc2ndCount)
9285  {
9286  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9287 
9288  // 1. Process free space before this allocation.
9289  if(lastOffset < suballoc.offset)
9290  {
9291  // There is free space from lastOffset to suballoc.offset.
9292  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9293  inoutStats.unusedSize += unusedRangeSize;
9294  ++inoutStats.unusedRangeCount;
9295  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9296  }
9297 
9298  // 2. Process this allocation.
9299  // There is allocation with suballoc.offset, suballoc.size.
9300  ++inoutStats.allocationCount;
9301 
9302  // 3. Prepare for next iteration.
9303  lastOffset = suballoc.offset + suballoc.size;
9304  ++nextAlloc2ndIndex;
9305  }
9306  // We are at the end.
9307  else
9308  {
9309  if(lastOffset < freeSpace2ndTo1stEnd)
9310  {
9311  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9312  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9313  inoutStats.unusedSize += unusedRangeSize;
9314  ++inoutStats.unusedRangeCount;
9315  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9316  }
9317 
9318  // End of loop.
9319  lastOffset = freeSpace2ndTo1stEnd;
9320  }
9321  }
9322  }
9323 
9324  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9325  const VkDeviceSize freeSpace1stTo2ndEnd =
9326  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9327  while(lastOffset < freeSpace1stTo2ndEnd)
9328  {
9329  // Find next non-null allocation or move nextAllocIndex to the end.
9330  while(nextAlloc1stIndex < suballoc1stCount &&
9331  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9332  {
9333  ++nextAlloc1stIndex;
9334  }
9335 
9336  // Found non-null allocation.
9337  if(nextAlloc1stIndex < suballoc1stCount)
9338  {
9339  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9340 
9341  // 1. Process free space before this allocation.
9342  if(lastOffset < suballoc.offset)
9343  {
9344  // There is free space from lastOffset to suballoc.offset.
9345  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9346  inoutStats.unusedSize += unusedRangeSize;
9347  ++inoutStats.unusedRangeCount;
9348  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9349  }
9350 
9351  // 2. Process this allocation.
9352  // There is allocation with suballoc.offset, suballoc.size.
9353  ++inoutStats.allocationCount;
9354 
9355  // 3. Prepare for next iteration.
9356  lastOffset = suballoc.offset + suballoc.size;
9357  ++nextAlloc1stIndex;
9358  }
9359  // We are at the end.
9360  else
9361  {
9362  if(lastOffset < freeSpace1stTo2ndEnd)
9363  {
9364  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9365  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9366  inoutStats.unusedSize += unusedRangeSize;
9367  ++inoutStats.unusedRangeCount;
9368  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9369  }
9370 
9371  // End of loop.
9372  lastOffset = freeSpace1stTo2ndEnd;
9373  }
9374  }
9375 
9376  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9377  {
9378  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9379  while(lastOffset < size)
9380  {
9381  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9382  while(nextAlloc2ndIndex != SIZE_MAX &&
9383  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9384  {
9385  --nextAlloc2ndIndex;
9386  }
9387 
9388  // Found non-null allocation.
9389  if(nextAlloc2ndIndex != SIZE_MAX)
9390  {
9391  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9392 
9393  // 1. Process free space before this allocation.
9394  if(lastOffset < suballoc.offset)
9395  {
9396  // There is free space from lastOffset to suballoc.offset.
9397  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9398  inoutStats.unusedSize += unusedRangeSize;
9399  ++inoutStats.unusedRangeCount;
9400  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9401  }
9402 
9403  // 2. Process this allocation.
9404  // There is allocation with suballoc.offset, suballoc.size.
9405  ++inoutStats.allocationCount;
9406 
9407  // 3. Prepare for next iteration.
9408  lastOffset = suballoc.offset + suballoc.size;
9409  --nextAlloc2ndIndex;
9410  }
9411  // We are at the end.
9412  else
9413  {
9414  if(lastOffset < size)
9415  {
9416  // There is free space from lastOffset to size.
9417  const VkDeviceSize unusedRangeSize = size - lastOffset;
9418  inoutStats.unusedSize += unusedRangeSize;
9419  ++inoutStats.unusedRangeCount;
9420  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9421  }
9422 
9423  // End of loop.
9424  lastOffset = size;
9425  }
9426  }
9427  }
9428 }
9429 
9430 #if VMA_STATS_STRING_ENABLED
9431 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9432 {
9433  const VkDeviceSize size = GetSize();
9434  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9435  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9436  const size_t suballoc1stCount = suballocations1st.size();
9437  const size_t suballoc2ndCount = suballocations2nd.size();
9438 
9439  // FIRST PASS
9440 
9441  size_t unusedRangeCount = 0;
9442  VkDeviceSize usedBytes = 0;
9443 
9444  VkDeviceSize lastOffset = 0;
9445 
9446  size_t alloc2ndCount = 0;
9447  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9448  {
9449  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9450  size_t nextAlloc2ndIndex = 0;
9451  while(lastOffset < freeSpace2ndTo1stEnd)
9452  {
9453  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9454  while(nextAlloc2ndIndex < suballoc2ndCount &&
9455  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9456  {
9457  ++nextAlloc2ndIndex;
9458  }
9459 
9460  // Found non-null allocation.
9461  if(nextAlloc2ndIndex < suballoc2ndCount)
9462  {
9463  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9464 
9465  // 1. Process free space before this allocation.
9466  if(lastOffset < suballoc.offset)
9467  {
9468  // There is free space from lastOffset to suballoc.offset.
9469  ++unusedRangeCount;
9470  }
9471 
9472  // 2. Process this allocation.
9473  // There is allocation with suballoc.offset, suballoc.size.
9474  ++alloc2ndCount;
9475  usedBytes += suballoc.size;
9476 
9477  // 3. Prepare for next iteration.
9478  lastOffset = suballoc.offset + suballoc.size;
9479  ++nextAlloc2ndIndex;
9480  }
9481  // We are at the end.
9482  else
9483  {
9484  if(lastOffset < freeSpace2ndTo1stEnd)
9485  {
9486  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9487  ++unusedRangeCount;
9488  }
9489 
9490  // End of loop.
9491  lastOffset = freeSpace2ndTo1stEnd;
9492  }
9493  }
9494  }
9495 
9496  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9497  size_t alloc1stCount = 0;
9498  const VkDeviceSize freeSpace1stTo2ndEnd =
9499  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9500  while(lastOffset < freeSpace1stTo2ndEnd)
9501  {
9502  // Find next non-null allocation or move nextAllocIndex to the end.
9503  while(nextAlloc1stIndex < suballoc1stCount &&
9504  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9505  {
9506  ++nextAlloc1stIndex;
9507  }
9508 
9509  // Found non-null allocation.
9510  if(nextAlloc1stIndex < suballoc1stCount)
9511  {
9512  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9513 
9514  // 1. Process free space before this allocation.
9515  if(lastOffset < suballoc.offset)
9516  {
9517  // There is free space from lastOffset to suballoc.offset.
9518  ++unusedRangeCount;
9519  }
9520 
9521  // 2. Process this allocation.
9522  // There is allocation with suballoc.offset, suballoc.size.
9523  ++alloc1stCount;
9524  usedBytes += suballoc.size;
9525 
9526  // 3. Prepare for next iteration.
9527  lastOffset = suballoc.offset + suballoc.size;
9528  ++nextAlloc1stIndex;
9529  }
9530  // We are at the end.
9531  else
9532  {
9533  if(lastOffset < size)
9534  {
9535  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9536  ++unusedRangeCount;
9537  }
9538 
9539  // End of loop.
9540  lastOffset = freeSpace1stTo2ndEnd;
9541  }
9542  }
9543 
9544  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9545  {
9546  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9547  while(lastOffset < size)
9548  {
9549  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9550  while(nextAlloc2ndIndex != SIZE_MAX &&
9551  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9552  {
9553  --nextAlloc2ndIndex;
9554  }
9555 
9556  // Found non-null allocation.
9557  if(nextAlloc2ndIndex != SIZE_MAX)
9558  {
9559  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9560 
9561  // 1. Process free space before this allocation.
9562  if(lastOffset < suballoc.offset)
9563  {
9564  // There is free space from lastOffset to suballoc.offset.
9565  ++unusedRangeCount;
9566  }
9567 
9568  // 2. Process this allocation.
9569  // There is allocation with suballoc.offset, suballoc.size.
9570  ++alloc2ndCount;
9571  usedBytes += suballoc.size;
9572 
9573  // 3. Prepare for next iteration.
9574  lastOffset = suballoc.offset + suballoc.size;
9575  --nextAlloc2ndIndex;
9576  }
9577  // We are at the end.
9578  else
9579  {
9580  if(lastOffset < size)
9581  {
9582  // There is free space from lastOffset to size.
9583  ++unusedRangeCount;
9584  }
9585 
9586  // End of loop.
9587  lastOffset = size;
9588  }
9589  }
9590  }
9591 
9592  const VkDeviceSize unusedBytes = size - usedBytes;
9593  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9594 
9595  // SECOND PASS
9596  lastOffset = 0;
9597 
9598  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9599  {
9600  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9601  size_t nextAlloc2ndIndex = 0;
9602  while(lastOffset < freeSpace2ndTo1stEnd)
9603  {
9604  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9605  while(nextAlloc2ndIndex < suballoc2ndCount &&
9606  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9607  {
9608  ++nextAlloc2ndIndex;
9609  }
9610 
9611  // Found non-null allocation.
9612  if(nextAlloc2ndIndex < suballoc2ndCount)
9613  {
9614  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9615 
9616  // 1. Process free space before this allocation.
9617  if(lastOffset < suballoc.offset)
9618  {
9619  // There is free space from lastOffset to suballoc.offset.
9620  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9621  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9622  }
9623 
9624  // 2. Process this allocation.
9625  // There is allocation with suballoc.offset, suballoc.size.
9626  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9627 
9628  // 3. Prepare for next iteration.
9629  lastOffset = suballoc.offset + suballoc.size;
9630  ++nextAlloc2ndIndex;
9631  }
9632  // We are at the end.
9633  else
9634  {
9635  if(lastOffset < freeSpace2ndTo1stEnd)
9636  {
9637  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9638  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9639  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9640  }
9641 
9642  // End of loop.
9643  lastOffset = freeSpace2ndTo1stEnd;
9644  }
9645  }
9646  }
9647 
9648  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9649  while(lastOffset < freeSpace1stTo2ndEnd)
9650  {
9651  // Find next non-null allocation or move nextAllocIndex to the end.
9652  while(nextAlloc1stIndex < suballoc1stCount &&
9653  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9654  {
9655  ++nextAlloc1stIndex;
9656  }
9657 
9658  // Found non-null allocation.
9659  if(nextAlloc1stIndex < suballoc1stCount)
9660  {
9661  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9662 
9663  // 1. Process free space before this allocation.
9664  if(lastOffset < suballoc.offset)
9665  {
9666  // There is free space from lastOffset to suballoc.offset.
9667  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9668  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9669  }
9670 
9671  // 2. Process this allocation.
9672  // There is allocation with suballoc.offset, suballoc.size.
9673  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9674 
9675  // 3. Prepare for next iteration.
9676  lastOffset = suballoc.offset + suballoc.size;
9677  ++nextAlloc1stIndex;
9678  }
9679  // We are at the end.
9680  else
9681  {
9682  if(lastOffset < freeSpace1stTo2ndEnd)
9683  {
9684  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9685  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9686  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9687  }
9688 
9689  // End of loop.
9690  lastOffset = freeSpace1stTo2ndEnd;
9691  }
9692  }
9693 
9694  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9695  {
9696  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9697  while(lastOffset < size)
9698  {
9699  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9700  while(nextAlloc2ndIndex != SIZE_MAX &&
9701  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9702  {
9703  --nextAlloc2ndIndex;
9704  }
9705 
9706  // Found non-null allocation.
9707  if(nextAlloc2ndIndex != SIZE_MAX)
9708  {
9709  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9710 
9711  // 1. Process free space before this allocation.
9712  if(lastOffset < suballoc.offset)
9713  {
9714  // There is free space from lastOffset to suballoc.offset.
9715  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9716  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9717  }
9718 
9719  // 2. Process this allocation.
9720  // There is allocation with suballoc.offset, suballoc.size.
9721  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9722 
9723  // 3. Prepare for next iteration.
9724  lastOffset = suballoc.offset + suballoc.size;
9725  --nextAlloc2ndIndex;
9726  }
9727  // We are at the end.
9728  else
9729  {
9730  if(lastOffset < size)
9731  {
9732  // There is free space from lastOffset to size.
9733  const VkDeviceSize unusedRangeSize = size - lastOffset;
9734  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9735  }
9736 
9737  // End of loop.
9738  lastOffset = size;
9739  }
9740  }
9741  }
9742 
9743  PrintDetailedMap_End(json);
9744 }
9745 #endif // #if VMA_STATS_STRING_ENABLED
9746 
9747 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9748  uint32_t currentFrameIndex,
9749  uint32_t frameInUseCount,
9750  VkDeviceSize bufferImageGranularity,
9751  VkDeviceSize allocSize,
9752  VkDeviceSize allocAlignment,
9753  bool upperAddress,
9754  VmaSuballocationType allocType,
9755  bool canMakeOtherLost,
9756  uint32_t strategy,
9757  VmaAllocationRequest* pAllocationRequest)
9758 {
9759  VMA_ASSERT(allocSize > 0);
9760  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9761  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9762  VMA_HEAVY_ASSERT(Validate());
9763  return upperAddress ?
9764  CreateAllocationRequest_UpperAddress(
9765  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9766  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
9767  CreateAllocationRequest_LowerAddress(
9768  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9769  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
9770 }
9771 
9772 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
9773  uint32_t currentFrameIndex,
9774  uint32_t frameInUseCount,
9775  VkDeviceSize bufferImageGranularity,
9776  VkDeviceSize allocSize,
9777  VkDeviceSize allocAlignment,
9778  VmaSuballocationType allocType,
9779  bool canMakeOtherLost,
9780  uint32_t strategy,
9781  VmaAllocationRequest* pAllocationRequest)
9782 {
9783  const VkDeviceSize size = GetSize();
9784  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9785  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9786 
9787  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9788  {
9789  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9790  return false;
9791  }
9792 
9793  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9794  if(allocSize > size)
9795  {
9796  return false;
9797  }
9798  VkDeviceSize resultBaseOffset = size - allocSize;
9799  if(!suballocations2nd.empty())
9800  {
9801  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9802  resultBaseOffset = lastSuballoc.offset - allocSize;
9803  if(allocSize > lastSuballoc.offset)
9804  {
9805  return false;
9806  }
9807  }
9808 
9809  // Start from offset equal to end of free space.
9810  VkDeviceSize resultOffset = resultBaseOffset;
9811 
9812  // Apply VMA_DEBUG_MARGIN at the end.
9813  if(VMA_DEBUG_MARGIN > 0)
9814  {
9815  if(resultOffset < VMA_DEBUG_MARGIN)
9816  {
9817  return false;
9818  }
9819  resultOffset -= VMA_DEBUG_MARGIN;
9820  }
9821 
9822  // Apply alignment.
9823  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9824 
9825  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9826  // Make bigger alignment if necessary.
9827  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9828  {
9829  bool bufferImageGranularityConflict = false;
9830  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9831  {
9832  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9833  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9834  {
9835  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9836  {
9837  bufferImageGranularityConflict = true;
9838  break;
9839  }
9840  }
9841  else
9842  // Already on previous page.
9843  break;
9844  }
9845  if(bufferImageGranularityConflict)
9846  {
9847  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9848  }
9849  }
9850 
9851  // There is enough free space.
9852  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9853  suballocations1st.back().offset + suballocations1st.back().size :
9854  0;
9855  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9856  {
9857  // Check previous suballocations for BufferImageGranularity conflicts.
9858  // If conflict exists, allocation cannot be made here.
9859  if(bufferImageGranularity > 1)
9860  {
9861  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9862  {
9863  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9864  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9865  {
9866  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9867  {
9868  return false;
9869  }
9870  }
9871  else
9872  {
9873  // Already on next page.
9874  break;
9875  }
9876  }
9877  }
9878 
9879  // All tests passed: Success.
9880  pAllocationRequest->offset = resultOffset;
9881  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9882  pAllocationRequest->sumItemSize = 0;
9883  // pAllocationRequest->item unused.
9884  pAllocationRequest->itemsToMakeLostCount = 0;
9885  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
9886  return true;
9887  }
9888 
9889  return false;
9890 }
9891 
9892 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
9893  uint32_t currentFrameIndex,
9894  uint32_t frameInUseCount,
9895  VkDeviceSize bufferImageGranularity,
9896  VkDeviceSize allocSize,
9897  VkDeviceSize allocAlignment,
9898  VmaSuballocationType allocType,
9899  bool canMakeOtherLost,
9900  uint32_t strategy,
9901  VmaAllocationRequest* pAllocationRequest)
9902 {
9903  const VkDeviceSize size = GetSize();
9904  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9905  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9906 
9907  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9908  {
9909  // Try to allocate at the end of 1st vector.
9910 
9911  VkDeviceSize resultBaseOffset = 0;
9912  if(!suballocations1st.empty())
9913  {
9914  const VmaSuballocation& lastSuballoc = suballocations1st.back();
9915  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9916  }
9917 
9918  // Start from offset equal to beginning of free space.
9919  VkDeviceSize resultOffset = resultBaseOffset;
9920 
9921  // Apply VMA_DEBUG_MARGIN at the beginning.
9922  if(VMA_DEBUG_MARGIN > 0)
9923  {
9924  resultOffset += VMA_DEBUG_MARGIN;
9925  }
9926 
9927  // Apply alignment.
9928  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9929 
9930  // Check previous suballocations for BufferImageGranularity conflicts.
9931  // Make bigger alignment if necessary.
9932  if(bufferImageGranularity > 1 && !suballocations1st.empty())
9933  {
9934  bool bufferImageGranularityConflict = false;
9935  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9936  {
9937  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9938  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9939  {
9940  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9941  {
9942  bufferImageGranularityConflict = true;
9943  break;
9944  }
9945  }
9946  else
9947  // Already on previous page.
9948  break;
9949  }
9950  if(bufferImageGranularityConflict)
9951  {
9952  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9953  }
9954  }
9955 
9956  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9957  suballocations2nd.back().offset : size;
9958 
9959  // There is enough free space at the end after alignment.
9960  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9961  {
9962  // Check next suballocations for BufferImageGranularity conflicts.
9963  // If conflict exists, allocation cannot be made here.
9964  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9965  {
9966  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9967  {
9968  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9969  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9970  {
9971  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9972  {
9973  return false;
9974  }
9975  }
9976  else
9977  {
9978  // Already on previous page.
9979  break;
9980  }
9981  }
9982  }
9983 
9984  // All tests passed: Success.
9985  pAllocationRequest->offset = resultOffset;
9986  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
9987  pAllocationRequest->sumItemSize = 0;
9988  // pAllocationRequest->item, customData unused.
9989  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
9990  pAllocationRequest->itemsToMakeLostCount = 0;
9991  return true;
9992  }
9993  }
9994 
9995  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9996  // beginning of 1st vector as the end of free space.
9997  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9998  {
9999  VMA_ASSERT(!suballocations1st.empty());
10000 
10001  VkDeviceSize resultBaseOffset = 0;
10002  if(!suballocations2nd.empty())
10003  {
10004  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10005  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10006  }
10007 
10008  // Start from offset equal to beginning of free space.
10009  VkDeviceSize resultOffset = resultBaseOffset;
10010 
10011  // Apply VMA_DEBUG_MARGIN at the beginning.
10012  if(VMA_DEBUG_MARGIN > 0)
10013  {
10014  resultOffset += VMA_DEBUG_MARGIN;
10015  }
10016 
10017  // Apply alignment.
10018  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10019 
10020  // Check previous suballocations for BufferImageGranularity conflicts.
10021  // Make bigger alignment if necessary.
10022  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10023  {
10024  bool bufferImageGranularityConflict = false;
10025  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
10026  {
10027  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
10028  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10029  {
10030  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10031  {
10032  bufferImageGranularityConflict = true;
10033  break;
10034  }
10035  }
10036  else
10037  // Already on previous page.
10038  break;
10039  }
10040  if(bufferImageGranularityConflict)
10041  {
10042  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10043  }
10044  }
10045 
10046  pAllocationRequest->itemsToMakeLostCount = 0;
10047  pAllocationRequest->sumItemSize = 0;
10048  size_t index1st = m_1stNullItemsBeginCount;
10049 
10050  if(canMakeOtherLost)
10051  {
10052  while(index1st < suballocations1st.size() &&
10053  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
10054  {
10055  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
10056  const VmaSuballocation& suballoc = suballocations1st[index1st];
10057  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
10058  {
10059  // No problem.
10060  }
10061  else
10062  {
10063  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10064  if(suballoc.hAllocation->CanBecomeLost() &&
10065  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10066  {
10067  ++pAllocationRequest->itemsToMakeLostCount;
10068  pAllocationRequest->sumItemSize += suballoc.size;
10069  }
10070  else
10071  {
10072  return false;
10073  }
10074  }
10075  ++index1st;
10076  }
10077 
10078  // Check next suballocations for BufferImageGranularity conflicts.
10079  // If conflict exists, we must mark more allocations lost or fail.
10080  if(bufferImageGranularity > 1)
10081  {
10082  while(index1st < suballocations1st.size())
10083  {
10084  const VmaSuballocation& suballoc = suballocations1st[index1st];
10085  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
10086  {
10087  if(suballoc.hAllocation != VK_NULL_HANDLE)
10088  {
10089  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
10090  if(suballoc.hAllocation->CanBecomeLost() &&
10091  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10092  {
10093  ++pAllocationRequest->itemsToMakeLostCount;
10094  pAllocationRequest->sumItemSize += suballoc.size;
10095  }
10096  else
10097  {
10098  return false;
10099  }
10100  }
10101  }
10102  else
10103  {
10104  // Already on next page.
10105  break;
10106  }
10107  ++index1st;
10108  }
10109  }
10110 
10111  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
10112  if(index1st == suballocations1st.size() &&
10113  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
10114  {
10115  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
10116  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
10117  }
10118  }
10119 
10120  // There is enough free space at the end after alignment.
10121  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
10122  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
10123  {
10124  // Check next suballocations for BufferImageGranularity conflicts.
10125  // If conflict exists, allocation cannot be made here.
10126  if(bufferImageGranularity > 1)
10127  {
10128  for(size_t nextSuballocIndex = index1st;
10129  nextSuballocIndex < suballocations1st.size();
10130  nextSuballocIndex++)
10131  {
10132  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
10133  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10134  {
10135  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10136  {
10137  return false;
10138  }
10139  }
10140  else
10141  {
10142  // Already on next page.
10143  break;
10144  }
10145  }
10146  }
10147 
10148  // All tests passed: Success.
10149  pAllocationRequest->offset = resultOffset;
10150  pAllocationRequest->sumFreeSize =
10151  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
10152  - resultBaseOffset
10153  - pAllocationRequest->sumItemSize;
10154  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
10155  // pAllocationRequest->item, customData unused.
10156  return true;
10157  }
10158  }
10159 
10160  return false;
10161 }
10162 
10163 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10164  uint32_t currentFrameIndex,
10165  uint32_t frameInUseCount,
10166  VmaAllocationRequest* pAllocationRequest)
10167 {
10168  if(pAllocationRequest->itemsToMakeLostCount == 0)
10169  {
10170  return true;
10171  }
10172 
10173  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10174 
10175  // We always start from 1st.
10176  SuballocationVectorType* suballocations = &AccessSuballocations1st();
10177  size_t index = m_1stNullItemsBeginCount;
10178  size_t madeLostCount = 0;
10179  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10180  {
10181  if(index == suballocations->size())
10182  {
10183  index = 0;
10184  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
10185  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10186  {
10187  suballocations = &AccessSuballocations2nd();
10188  }
10189  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
10190  // suballocations continues pointing at AccessSuballocations1st().
10191  VMA_ASSERT(!suballocations->empty());
10192  }
10193  VmaSuballocation& suballoc = (*suballocations)[index];
10194  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10195  {
10196  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10197  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10198  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10199  {
10200  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10201  suballoc.hAllocation = VK_NULL_HANDLE;
10202  m_SumFreeSize += suballoc.size;
10203  if(suballocations == &AccessSuballocations1st())
10204  {
10205  ++m_1stNullItemsMiddleCount;
10206  }
10207  else
10208  {
10209  ++m_2ndNullItemsCount;
10210  }
10211  ++madeLostCount;
10212  }
10213  else
10214  {
10215  return false;
10216  }
10217  }
10218  ++index;
10219  }
10220 
10221  CleanupAfterFree();
10222  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10223 
10224  return true;
10225 }
10226 
10227 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10228 {
10229  uint32_t lostAllocationCount = 0;
10230 
10231  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10232  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10233  {
10234  VmaSuballocation& suballoc = suballocations1st[i];
10235  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10236  suballoc.hAllocation->CanBecomeLost() &&
10237  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10238  {
10239  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10240  suballoc.hAllocation = VK_NULL_HANDLE;
10241  ++m_1stNullItemsMiddleCount;
10242  m_SumFreeSize += suballoc.size;
10243  ++lostAllocationCount;
10244  }
10245  }
10246 
10247  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10248  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10249  {
10250  VmaSuballocation& suballoc = suballocations2nd[i];
10251  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10252  suballoc.hAllocation->CanBecomeLost() &&
10253  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10254  {
10255  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10256  suballoc.hAllocation = VK_NULL_HANDLE;
10257  ++m_2ndNullItemsCount;
10258  m_SumFreeSize += suballoc.size;
10259  ++lostAllocationCount;
10260  }
10261  }
10262 
10263  if(lostAllocationCount)
10264  {
10265  CleanupAfterFree();
10266  }
10267 
10268  return lostAllocationCount;
10269 }
10270 
10271 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10272 {
10273  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10274  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10275  {
10276  const VmaSuballocation& suballoc = suballocations1st[i];
10277  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10278  {
10279  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10280  {
10281  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10282  return VK_ERROR_VALIDATION_FAILED_EXT;
10283  }
10284  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10285  {
10286  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10287  return VK_ERROR_VALIDATION_FAILED_EXT;
10288  }
10289  }
10290  }
10291 
10292  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10293  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10294  {
10295  const VmaSuballocation& suballoc = suballocations2nd[i];
10296  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10297  {
10298  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10299  {
10300  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10301  return VK_ERROR_VALIDATION_FAILED_EXT;
10302  }
10303  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10304  {
10305  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10306  return VK_ERROR_VALIDATION_FAILED_EXT;
10307  }
10308  }
10309  }
10310 
10311  return VK_SUCCESS;
10312 }
10313 
10314 void VmaBlockMetadata_Linear::Alloc(
10315  const VmaAllocationRequest& request,
10316  VmaSuballocationType type,
10317  VkDeviceSize allocSize,
10318  VmaAllocation hAllocation)
10319 {
10320  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10321 
10322  switch(request.type)
10323  {
10324  case VmaAllocationRequestType::UpperAddress:
10325  {
10326  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10327  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10328  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10329  suballocations2nd.push_back(newSuballoc);
10330  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10331  }
10332  break;
10333  case VmaAllocationRequestType::EndOf1st:
10334  {
10335  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10336 
10337  VMA_ASSERT(suballocations1st.empty() ||
10338  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
10339  // Check if it fits before the end of the block.
10340  VMA_ASSERT(request.offset + allocSize <= GetSize());
10341 
10342  suballocations1st.push_back(newSuballoc);
10343  }
10344  break;
10345  case VmaAllocationRequestType::EndOf2nd:
10346  {
10347  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10348  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10349  VMA_ASSERT(!suballocations1st.empty() &&
10350  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
10351  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10352 
10353  switch(m_2ndVectorMode)
10354  {
10355  case SECOND_VECTOR_EMPTY:
10356  // First allocation from second part ring buffer.
10357  VMA_ASSERT(suballocations2nd.empty());
10358  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10359  break;
10360  case SECOND_VECTOR_RING_BUFFER:
10361  // 2-part ring buffer is already started.
10362  VMA_ASSERT(!suballocations2nd.empty());
10363  break;
10364  case SECOND_VECTOR_DOUBLE_STACK:
10365  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10366  break;
10367  default:
10368  VMA_ASSERT(0);
10369  }
10370 
10371  suballocations2nd.push_back(newSuballoc);
10372  }
10373  break;
10374  default:
10375  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10376  }
10377 
10378  m_SumFreeSize -= newSuballoc.size;
10379 }
10380 
10381 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10382 {
10383  FreeAtOffset(allocation->GetOffset());
10384 }
10385 
10386 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10387 {
10388  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10389  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10390 
10391  if(!suballocations1st.empty())
10392  {
10393  // First allocation: Mark it as next empty at the beginning.
10394  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10395  if(firstSuballoc.offset == offset)
10396  {
10397  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10398  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10399  m_SumFreeSize += firstSuballoc.size;
10400  ++m_1stNullItemsBeginCount;
10401  CleanupAfterFree();
10402  return;
10403  }
10404  }
10405 
10406  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10407  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10408  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10409  {
10410  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10411  if(lastSuballoc.offset == offset)
10412  {
10413  m_SumFreeSize += lastSuballoc.size;
10414  suballocations2nd.pop_back();
10415  CleanupAfterFree();
10416  return;
10417  }
10418  }
10419  // Last allocation in 1st vector.
10420  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10421  {
10422  VmaSuballocation& lastSuballoc = suballocations1st.back();
10423  if(lastSuballoc.offset == offset)
10424  {
10425  m_SumFreeSize += lastSuballoc.size;
10426  suballocations1st.pop_back();
10427  CleanupAfterFree();
10428  return;
10429  }
10430  }
10431 
10432  // Item from the middle of 1st vector.
10433  {
10434  VmaSuballocation refSuballoc;
10435  refSuballoc.offset = offset;
10436  // Rest of members stays uninitialized intentionally for better performance.
10437  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
10438  suballocations1st.begin() + m_1stNullItemsBeginCount,
10439  suballocations1st.end(),
10440  refSuballoc,
10441  VmaSuballocationOffsetLess());
10442  if(it != suballocations1st.end())
10443  {
10444  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10445  it->hAllocation = VK_NULL_HANDLE;
10446  ++m_1stNullItemsMiddleCount;
10447  m_SumFreeSize += it->size;
10448  CleanupAfterFree();
10449  return;
10450  }
10451  }
10452 
10453  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10454  {
10455  // Item from the middle of 2nd vector.
10456  VmaSuballocation refSuballoc;
10457  refSuballoc.offset = offset;
10458  // Rest of members stays uninitialized intentionally for better performance.
10459  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10460  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
10461  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
10462  if(it != suballocations2nd.end())
10463  {
10464  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10465  it->hAllocation = VK_NULL_HANDLE;
10466  ++m_2ndNullItemsCount;
10467  m_SumFreeSize += it->size;
10468  CleanupAfterFree();
10469  return;
10470  }
10471  }
10472 
10473  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10474 }
10475 
10476 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10477 {
10478  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10479  const size_t suballocCount = AccessSuballocations1st().size();
10480  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10481 }
10482 
10483 void VmaBlockMetadata_Linear::CleanupAfterFree()
10484 {
10485  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10486  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10487 
10488  if(IsEmpty())
10489  {
10490  suballocations1st.clear();
10491  suballocations2nd.clear();
10492  m_1stNullItemsBeginCount = 0;
10493  m_1stNullItemsMiddleCount = 0;
10494  m_2ndNullItemsCount = 0;
10495  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10496  }
10497  else
10498  {
10499  const size_t suballoc1stCount = suballocations1st.size();
10500  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10501  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10502 
10503  // Find more null items at the beginning of 1st vector.
10504  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10505  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10506  {
10507  ++m_1stNullItemsBeginCount;
10508  --m_1stNullItemsMiddleCount;
10509  }
10510 
10511  // Find more null items at the end of 1st vector.
10512  while(m_1stNullItemsMiddleCount > 0 &&
10513  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10514  {
10515  --m_1stNullItemsMiddleCount;
10516  suballocations1st.pop_back();
10517  }
10518 
10519  // Find more null items at the end of 2nd vector.
10520  while(m_2ndNullItemsCount > 0 &&
10521  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10522  {
10523  --m_2ndNullItemsCount;
10524  suballocations2nd.pop_back();
10525  }
10526 
10527  // Find more null items at the beginning of 2nd vector.
10528  while(m_2ndNullItemsCount > 0 &&
10529  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
10530  {
10531  --m_2ndNullItemsCount;
10532  VmaVectorRemove(suballocations2nd, 0);
10533  }
10534 
10535  if(ShouldCompact1st())
10536  {
10537  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10538  size_t srcIndex = m_1stNullItemsBeginCount;
10539  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10540  {
10541  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10542  {
10543  ++srcIndex;
10544  }
10545  if(dstIndex != srcIndex)
10546  {
10547  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10548  }
10549  ++srcIndex;
10550  }
10551  suballocations1st.resize(nonNullItemCount);
10552  m_1stNullItemsBeginCount = 0;
10553  m_1stNullItemsMiddleCount = 0;
10554  }
10555 
10556  // 2nd vector became empty.
10557  if(suballocations2nd.empty())
10558  {
10559  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10560  }
10561 
10562  // 1st vector became empty.
10563  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10564  {
10565  suballocations1st.clear();
10566  m_1stNullItemsBeginCount = 0;
10567 
10568  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10569  {
10570  // Swap 1st with 2nd. Now 2nd is empty.
10571  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10572  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10573  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10574  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10575  {
10576  ++m_1stNullItemsBeginCount;
10577  --m_1stNullItemsMiddleCount;
10578  }
10579  m_2ndNullItemsCount = 0;
10580  m_1stVectorIndex ^= 1;
10581  }
10582  }
10583  }
10584 
10585  VMA_HEAVY_ASSERT(Validate());
10586 }
10587 
10588 
10590 // class VmaBlockMetadata_Buddy
10591 
10592 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10593  VmaBlockMetadata(hAllocator),
10594  m_Root(VMA_NULL),
10595  m_AllocationCount(0),
10596  m_FreeCount(1),
10597  m_SumFreeSize(0)
10598 {
10599  memset(m_FreeList, 0, sizeof(m_FreeList));
10600 }
10601 
10602 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10603 {
10604  DeleteNode(m_Root);
10605 }
10606 
10607 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10608 {
10609  VmaBlockMetadata::Init(size);
10610 
10611  m_UsableSize = VmaPrevPow2(size);
10612  m_SumFreeSize = m_UsableSize;
10613 
10614  // Calculate m_LevelCount.
10615  m_LevelCount = 1;
10616  while(m_LevelCount < MAX_LEVELS &&
10617  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10618  {
10619  ++m_LevelCount;
10620  }
10621 
10622  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10623  rootNode->offset = 0;
10624  rootNode->type = Node::TYPE_FREE;
10625  rootNode->parent = VMA_NULL;
10626  rootNode->buddy = VMA_NULL;
10627 
10628  m_Root = rootNode;
10629  AddToFreeListFront(0, rootNode);
10630 }
10631 
10632 bool VmaBlockMetadata_Buddy::Validate() const
10633 {
10634  // Validate tree.
10635  ValidationContext ctx;
10636  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10637  {
10638  VMA_VALIDATE(false && "ValidateNode failed.");
10639  }
10640  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10641  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10642 
10643  // Validate free node lists.
10644  for(uint32_t level = 0; level < m_LevelCount; ++level)
10645  {
10646  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10647  m_FreeList[level].front->free.prev == VMA_NULL);
10648 
10649  for(Node* node = m_FreeList[level].front;
10650  node != VMA_NULL;
10651  node = node->free.next)
10652  {
10653  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10654 
10655  if(node->free.next == VMA_NULL)
10656  {
10657  VMA_VALIDATE(m_FreeList[level].back == node);
10658  }
10659  else
10660  {
10661  VMA_VALIDATE(node->free.next->free.prev == node);
10662  }
10663  }
10664  }
10665 
10666  // Validate that free lists ar higher levels are empty.
10667  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10668  {
10669  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10670  }
10671 
10672  return true;
10673 }
10674 
10675 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10676 {
10677  for(uint32_t level = 0; level < m_LevelCount; ++level)
10678  {
10679  if(m_FreeList[level].front != VMA_NULL)
10680  {
10681  return LevelToNodeSize(level);
10682  }
10683  }
10684  return 0;
10685 }
10686 
10687 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10688 {
10689  const VkDeviceSize unusableSize = GetUnusableSize();
10690 
10691  outInfo.blockCount = 1;
10692 
10693  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10694  outInfo.usedBytes = outInfo.unusedBytes = 0;
10695 
10696  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10697  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10698  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10699 
10700  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10701 
10702  if(unusableSize > 0)
10703  {
10704  ++outInfo.unusedRangeCount;
10705  outInfo.unusedBytes += unusableSize;
10706  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10707  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10708  }
10709 }
10710 
10711 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10712 {
10713  const VkDeviceSize unusableSize = GetUnusableSize();
10714 
10715  inoutStats.size += GetSize();
10716  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10717  inoutStats.allocationCount += m_AllocationCount;
10718  inoutStats.unusedRangeCount += m_FreeCount;
10719  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10720 
10721  if(unusableSize > 0)
10722  {
10723  ++inoutStats.unusedRangeCount;
10724  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10725  }
10726 }
10727 
10728 #if VMA_STATS_STRING_ENABLED
10729 
10730 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10731 {
10732  // TODO optimize
10733  VmaStatInfo stat;
10734  CalcAllocationStatInfo(stat);
10735 
10736  PrintDetailedMap_Begin(
10737  json,
10738  stat.unusedBytes,
10739  stat.allocationCount,
10740  stat.unusedRangeCount);
10741 
10742  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10743 
10744  const VkDeviceSize unusableSize = GetUnusableSize();
10745  if(unusableSize > 0)
10746  {
10747  PrintDetailedMap_UnusedRange(json,
10748  m_UsableSize, // offset
10749  unusableSize); // size
10750  }
10751 
10752  PrintDetailedMap_End(json);
10753 }
10754 
10755 #endif // #if VMA_STATS_STRING_ENABLED
10756 
10757 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10758  uint32_t currentFrameIndex,
10759  uint32_t frameInUseCount,
10760  VkDeviceSize bufferImageGranularity,
10761  VkDeviceSize allocSize,
10762  VkDeviceSize allocAlignment,
10763  bool upperAddress,
10764  VmaSuballocationType allocType,
10765  bool canMakeOtherLost,
10766  uint32_t strategy,
10767  VmaAllocationRequest* pAllocationRequest)
10768 {
10769  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10770 
10771  // Simple way to respect bufferImageGranularity. May be optimized some day.
10772  // Whenever it might be an OPTIMAL image...
10773  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10774  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10775  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10776  {
10777  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10778  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10779  }
10780 
10781  if(allocSize > m_UsableSize)
10782  {
10783  return false;
10784  }
10785 
10786  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10787  for(uint32_t level = targetLevel + 1; level--; )
10788  {
10789  for(Node* freeNode = m_FreeList[level].front;
10790  freeNode != VMA_NULL;
10791  freeNode = freeNode->free.next)
10792  {
10793  if(freeNode->offset % allocAlignment == 0)
10794  {
10795  pAllocationRequest->type = VmaAllocationRequestType::Normal;
10796  pAllocationRequest->offset = freeNode->offset;
10797  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10798  pAllocationRequest->sumItemSize = 0;
10799  pAllocationRequest->itemsToMakeLostCount = 0;
10800  pAllocationRequest->customData = (void*)(uintptr_t)level;
10801  return true;
10802  }
10803  }
10804  }
10805 
10806  return false;
10807 }
10808 
10809 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10810  uint32_t currentFrameIndex,
10811  uint32_t frameInUseCount,
10812  VmaAllocationRequest* pAllocationRequest)
10813 {
10814  /*
10815  Lost allocations are not supported in buddy allocator at the moment.
10816  Support might be added in the future.
10817  */
10818  return pAllocationRequest->itemsToMakeLostCount == 0;
10819 }
10820 
10821 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10822 {
10823  /*
10824  Lost allocations are not supported in buddy allocator at the moment.
10825  Support might be added in the future.
10826  */
10827  return 0;
10828 }
10829 
10830 void VmaBlockMetadata_Buddy::Alloc(
10831  const VmaAllocationRequest& request,
10832  VmaSuballocationType type,
10833  VkDeviceSize allocSize,
10834  VmaAllocation hAllocation)
10835 {
10836  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
10837 
10838  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10839  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10840 
10841  Node* currNode = m_FreeList[currLevel].front;
10842  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10843  while(currNode->offset != request.offset)
10844  {
10845  currNode = currNode->free.next;
10846  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10847  }
10848 
10849  // Go down, splitting free nodes.
10850  while(currLevel < targetLevel)
10851  {
10852  // currNode is already first free node at currLevel.
10853  // Remove it from list of free nodes at this currLevel.
10854  RemoveFromFreeList(currLevel, currNode);
10855 
10856  const uint32_t childrenLevel = currLevel + 1;
10857 
10858  // Create two free sub-nodes.
10859  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10860  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10861 
10862  leftChild->offset = currNode->offset;
10863  leftChild->type = Node::TYPE_FREE;
10864  leftChild->parent = currNode;
10865  leftChild->buddy = rightChild;
10866 
10867  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10868  rightChild->type = Node::TYPE_FREE;
10869  rightChild->parent = currNode;
10870  rightChild->buddy = leftChild;
10871 
10872  // Convert current currNode to split type.
10873  currNode->type = Node::TYPE_SPLIT;
10874  currNode->split.leftChild = leftChild;
10875 
10876  // Add child nodes to free list. Order is important!
10877  AddToFreeListFront(childrenLevel, rightChild);
10878  AddToFreeListFront(childrenLevel, leftChild);
10879 
10880  ++m_FreeCount;
10881  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
10882  ++currLevel;
10883  currNode = m_FreeList[currLevel].front;
10884 
10885  /*
10886  We can be sure that currNode, as left child of node previously split,
10887  also fullfills the alignment requirement.
10888  */
10889  }
10890 
10891  // Remove from free list.
10892  VMA_ASSERT(currLevel == targetLevel &&
10893  currNode != VMA_NULL &&
10894  currNode->type == Node::TYPE_FREE);
10895  RemoveFromFreeList(currLevel, currNode);
10896 
10897  // Convert to allocation node.
10898  currNode->type = Node::TYPE_ALLOCATION;
10899  currNode->allocation.alloc = hAllocation;
10900 
10901  ++m_AllocationCount;
10902  --m_FreeCount;
10903  m_SumFreeSize -= allocSize;
10904 }
10905 
10906 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10907 {
10908  if(node->type == Node::TYPE_SPLIT)
10909  {
10910  DeleteNode(node->split.leftChild->buddy);
10911  DeleteNode(node->split.leftChild);
10912  }
10913 
10914  vma_delete(GetAllocationCallbacks(), node);
10915 }
10916 
10917 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
10918 {
10919  VMA_VALIDATE(level < m_LevelCount);
10920  VMA_VALIDATE(curr->parent == parent);
10921  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10922  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10923  switch(curr->type)
10924  {
10925  case Node::TYPE_FREE:
10926  // curr->free.prev, next are validated separately.
10927  ctx.calculatedSumFreeSize += levelNodeSize;
10928  ++ctx.calculatedFreeCount;
10929  break;
10930  case Node::TYPE_ALLOCATION:
10931  ++ctx.calculatedAllocationCount;
10932  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10933  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10934  break;
10935  case Node::TYPE_SPLIT:
10936  {
10937  const uint32_t childrenLevel = level + 1;
10938  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10939  const Node* const leftChild = curr->split.leftChild;
10940  VMA_VALIDATE(leftChild != VMA_NULL);
10941  VMA_VALIDATE(leftChild->offset == curr->offset);
10942  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10943  {
10944  VMA_VALIDATE(false && "ValidateNode for left child failed.");
10945  }
10946  const Node* const rightChild = leftChild->buddy;
10947  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10948  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10949  {
10950  VMA_VALIDATE(false && "ValidateNode for right child failed.");
10951  }
10952  }
10953  break;
10954  default:
10955  return false;
10956  }
10957 
10958  return true;
10959 }
10960 
10961 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
10962 {
10963  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
10964  uint32_t level = 0;
10965  VkDeviceSize currLevelNodeSize = m_UsableSize;
10966  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10967  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10968  {
10969  ++level;
10970  currLevelNodeSize = nextLevelNodeSize;
10971  nextLevelNodeSize = currLevelNodeSize >> 1;
10972  }
10973  return level;
10974 }
10975 
10976 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
10977 {
10978  // Find node and level.
10979  Node* node = m_Root;
10980  VkDeviceSize nodeOffset = 0;
10981  uint32_t level = 0;
10982  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
10983  while(node->type == Node::TYPE_SPLIT)
10984  {
10985  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
10986  if(offset < nodeOffset + nextLevelSize)
10987  {
10988  node = node->split.leftChild;
10989  }
10990  else
10991  {
10992  node = node->split.leftChild->buddy;
10993  nodeOffset += nextLevelSize;
10994  }
10995  ++level;
10996  levelNodeSize = nextLevelSize;
10997  }
10998 
10999  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
11000  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
11001 
11002  ++m_FreeCount;
11003  --m_AllocationCount;
11004  m_SumFreeSize += alloc->GetSize();
11005 
11006  node->type = Node::TYPE_FREE;
11007 
11008  // Join free nodes if possible.
11009  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
11010  {
11011  RemoveFromFreeList(level, node->buddy);
11012  Node* const parent = node->parent;
11013 
11014  vma_delete(GetAllocationCallbacks(), node->buddy);
11015  vma_delete(GetAllocationCallbacks(), node);
11016  parent->type = Node::TYPE_FREE;
11017 
11018  node = parent;
11019  --level;
11020  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
11021  --m_FreeCount;
11022  }
11023 
11024  AddToFreeListFront(level, node);
11025 }
11026 
11027 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
11028 {
11029  switch(node->type)
11030  {
11031  case Node::TYPE_FREE:
11032  ++outInfo.unusedRangeCount;
11033  outInfo.unusedBytes += levelNodeSize;
11034  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
11035  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
11036  break;
11037  case Node::TYPE_ALLOCATION:
11038  {
11039  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11040  ++outInfo.allocationCount;
11041  outInfo.usedBytes += allocSize;
11042  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
11043  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
11044 
11045  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
11046  if(unusedRangeSize > 0)
11047  {
11048  ++outInfo.unusedRangeCount;
11049  outInfo.unusedBytes += unusedRangeSize;
11050  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
11051  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
11052  }
11053  }
11054  break;
11055  case Node::TYPE_SPLIT:
11056  {
11057  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11058  const Node* const leftChild = node->split.leftChild;
11059  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
11060  const Node* const rightChild = leftChild->buddy;
11061  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
11062  }
11063  break;
11064  default:
11065  VMA_ASSERT(0);
11066  }
11067 }
11068 
11069 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
11070 {
11071  VMA_ASSERT(node->type == Node::TYPE_FREE);
11072 
11073  // List is empty.
11074  Node* const frontNode = m_FreeList[level].front;
11075  if(frontNode == VMA_NULL)
11076  {
11077  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
11078  node->free.prev = node->free.next = VMA_NULL;
11079  m_FreeList[level].front = m_FreeList[level].back = node;
11080  }
11081  else
11082  {
11083  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
11084  node->free.prev = VMA_NULL;
11085  node->free.next = frontNode;
11086  frontNode->free.prev = node;
11087  m_FreeList[level].front = node;
11088  }
11089 }
11090 
11091 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
11092 {
11093  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
11094 
11095  // It is at the front.
11096  if(node->free.prev == VMA_NULL)
11097  {
11098  VMA_ASSERT(m_FreeList[level].front == node);
11099  m_FreeList[level].front = node->free.next;
11100  }
11101  else
11102  {
11103  Node* const prevFreeNode = node->free.prev;
11104  VMA_ASSERT(prevFreeNode->free.next == node);
11105  prevFreeNode->free.next = node->free.next;
11106  }
11107 
11108  // It is at the back.
11109  if(node->free.next == VMA_NULL)
11110  {
11111  VMA_ASSERT(m_FreeList[level].back == node);
11112  m_FreeList[level].back = node->free.prev;
11113  }
11114  else
11115  {
11116  Node* const nextFreeNode = node->free.next;
11117  VMA_ASSERT(nextFreeNode->free.prev == node);
11118  nextFreeNode->free.prev = node->free.prev;
11119  }
11120 }
11121 
11122 #if VMA_STATS_STRING_ENABLED
11123 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
11124 {
11125  switch(node->type)
11126  {
11127  case Node::TYPE_FREE:
11128  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
11129  break;
11130  case Node::TYPE_ALLOCATION:
11131  {
11132  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
11133  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11134  if(allocSize < levelNodeSize)
11135  {
11136  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
11137  }
11138  }
11139  break;
11140  case Node::TYPE_SPLIT:
11141  {
11142  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11143  const Node* const leftChild = node->split.leftChild;
11144  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
11145  const Node* const rightChild = leftChild->buddy;
11146  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
11147  }
11148  break;
11149  default:
11150  VMA_ASSERT(0);
11151  }
11152 }
11153 #endif // #if VMA_STATS_STRING_ENABLED
11154 
11155 
11157 // class VmaDeviceMemoryBlock
11158 
11159 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
11160  m_pMetadata(VMA_NULL),
11161  m_MemoryTypeIndex(UINT32_MAX),
11162  m_Id(0),
11163  m_hMemory(VK_NULL_HANDLE),
11164  m_MapCount(0),
11165  m_pMappedData(VMA_NULL)
11166 {
11167 }
11168 
11169 void VmaDeviceMemoryBlock::Init(
11170  VmaAllocator hAllocator,
11171  VmaPool hParentPool,
11172  uint32_t newMemoryTypeIndex,
11173  VkDeviceMemory newMemory,
11174  VkDeviceSize newSize,
11175  uint32_t id,
11176  uint32_t algorithm)
11177 {
11178  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11179 
11180  m_hParentPool = hParentPool;
11181  m_MemoryTypeIndex = newMemoryTypeIndex;
11182  m_Id = id;
11183  m_hMemory = newMemory;
11184 
11185  switch(algorithm)
11186  {
11188  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
11189  break;
11191  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11192  break;
11193  default:
11194  VMA_ASSERT(0);
11195  // Fall-through.
11196  case 0:
11197  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11198  }
11199  m_pMetadata->Init(newSize);
11200 }
11201 
11202 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11203 {
11204  // This is the most important assert in the entire library.
11205  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11206  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11207 
11208  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11209  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11210  m_hMemory = VK_NULL_HANDLE;
11211 
11212  vma_delete(allocator, m_pMetadata);
11213  m_pMetadata = VMA_NULL;
11214 }
11215 
11216 bool VmaDeviceMemoryBlock::Validate() const
11217 {
11218  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11219  (m_pMetadata->GetSize() != 0));
11220 
11221  return m_pMetadata->Validate();
11222 }
11223 
11224 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11225 {
11226  void* pData = nullptr;
11227  VkResult res = Map(hAllocator, 1, &pData);
11228  if(res != VK_SUCCESS)
11229  {
11230  return res;
11231  }
11232 
11233  res = m_pMetadata->CheckCorruption(pData);
11234 
11235  Unmap(hAllocator, 1);
11236 
11237  return res;
11238 }
11239 
11240 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11241 {
11242  if(count == 0)
11243  {
11244  return VK_SUCCESS;
11245  }
11246 
11247  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11248  if(m_MapCount != 0)
11249  {
11250  m_MapCount += count;
11251  VMA_ASSERT(m_pMappedData != VMA_NULL);
11252  if(ppData != VMA_NULL)
11253  {
11254  *ppData = m_pMappedData;
11255  }
11256  return VK_SUCCESS;
11257  }
11258  else
11259  {
11260  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11261  hAllocator->m_hDevice,
11262  m_hMemory,
11263  0, // offset
11264  VK_WHOLE_SIZE,
11265  0, // flags
11266  &m_pMappedData);
11267  if(result == VK_SUCCESS)
11268  {
11269  if(ppData != VMA_NULL)
11270  {
11271  *ppData = m_pMappedData;
11272  }
11273  m_MapCount = count;
11274  }
11275  return result;
11276  }
11277 }
11278 
11279 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11280 {
11281  if(count == 0)
11282  {
11283  return;
11284  }
11285 
11286  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11287  if(m_MapCount >= count)
11288  {
11289  m_MapCount -= count;
11290  if(m_MapCount == 0)
11291  {
11292  m_pMappedData = VMA_NULL;
11293  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11294  }
11295  }
11296  else
11297  {
11298  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11299  }
11300 }
11301 
11302 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11303 {
11304  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11305  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11306 
11307  void* pData;
11308  VkResult res = Map(hAllocator, 1, &pData);
11309  if(res != VK_SUCCESS)
11310  {
11311  return res;
11312  }
11313 
11314  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11315  VmaWriteMagicValue(pData, allocOffset + allocSize);
11316 
11317  Unmap(hAllocator, 1);
11318 
11319  return VK_SUCCESS;
11320 }
11321 
11322 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11323 {
11324  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11325  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11326 
11327  void* pData;
11328  VkResult res = Map(hAllocator, 1, &pData);
11329  if(res != VK_SUCCESS)
11330  {
11331  return res;
11332  }
11333 
11334  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11335  {
11336  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11337  }
11338  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11339  {
11340  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11341  }
11342 
11343  Unmap(hAllocator, 1);
11344 
11345  return VK_SUCCESS;
11346 }
11347 
11348 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11349  const VmaAllocator hAllocator,
11350  const VmaAllocation hAllocation,
11351  VkBuffer hBuffer)
11352 {
11353  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11354  hAllocation->GetBlock() == this);
11355  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11356  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11357  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
11358  hAllocator->m_hDevice,
11359  hBuffer,
11360  m_hMemory,
11361  hAllocation->GetOffset());
11362 }
11363 
11364 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11365  const VmaAllocator hAllocator,
11366  const VmaAllocation hAllocation,
11367  VkImage hImage)
11368 {
11369  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11370  hAllocation->GetBlock() == this);
11371  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11372  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11373  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
11374  hAllocator->m_hDevice,
11375  hImage,
11376  m_hMemory,
11377  hAllocation->GetOffset());
11378 }
11379 
11380 static void InitStatInfo(VmaStatInfo& outInfo)
11381 {
11382  memset(&outInfo, 0, sizeof(outInfo));
11383  outInfo.allocationSizeMin = UINT64_MAX;
11384  outInfo.unusedRangeSizeMin = UINT64_MAX;
11385 }
11386 
11387 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11388 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11389 {
11390  inoutInfo.blockCount += srcInfo.blockCount;
11391  inoutInfo.allocationCount += srcInfo.allocationCount;
11392  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11393  inoutInfo.usedBytes += srcInfo.usedBytes;
11394  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11395  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11396  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11397  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11398  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11399 }
11400 
11401 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11402 {
11403  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11404  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11405  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11406  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11407 }
11408 
11409 VmaPool_T::VmaPool_T(
11410  VmaAllocator hAllocator,
11411  const VmaPoolCreateInfo& createInfo,
11412  VkDeviceSize preferredBlockSize) :
11413  m_BlockVector(
11414  hAllocator,
11415  this, // hParentPool
11416  createInfo.memoryTypeIndex,
11417  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11418  createInfo.minBlockCount,
11419  createInfo.maxBlockCount,
11420  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11421  createInfo.frameInUseCount,
11422  true, // isCustomPool
11423  createInfo.blockSize != 0, // explicitBlockSize
11424  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11425  m_Id(0)
11426 {
11427 }
11428 
11429 VmaPool_T::~VmaPool_T()
11430 {
11431 }
11432 
11433 #if VMA_STATS_STRING_ENABLED
11434 
11435 #endif // #if VMA_STATS_STRING_ENABLED
11436 
11437 VmaBlockVector::VmaBlockVector(
11438  VmaAllocator hAllocator,
11439  VmaPool hParentPool,
11440  uint32_t memoryTypeIndex,
11441  VkDeviceSize preferredBlockSize,
11442  size_t minBlockCount,
11443  size_t maxBlockCount,
11444  VkDeviceSize bufferImageGranularity,
11445  uint32_t frameInUseCount,
11446  bool isCustomPool,
11447  bool explicitBlockSize,
11448  uint32_t algorithm) :
11449  m_hAllocator(hAllocator),
11450  m_hParentPool(hParentPool),
11451  m_MemoryTypeIndex(memoryTypeIndex),
11452  m_PreferredBlockSize(preferredBlockSize),
11453  m_MinBlockCount(minBlockCount),
11454  m_MaxBlockCount(maxBlockCount),
11455  m_BufferImageGranularity(bufferImageGranularity),
11456  m_FrameInUseCount(frameInUseCount),
11457  m_IsCustomPool(isCustomPool),
11458  m_ExplicitBlockSize(explicitBlockSize),
11459  m_Algorithm(algorithm),
11460  m_HasEmptyBlock(false),
11461  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11462  m_NextBlockId(0)
11463 {
11464 }
11465 
11466 VmaBlockVector::~VmaBlockVector()
11467 {
11468  for(size_t i = m_Blocks.size(); i--; )
11469  {
11470  m_Blocks[i]->Destroy(m_hAllocator);
11471  vma_delete(m_hAllocator, m_Blocks[i]);
11472  }
11473 }
11474 
11475 VkResult VmaBlockVector::CreateMinBlocks()
11476 {
11477  for(size_t i = 0; i < m_MinBlockCount; ++i)
11478  {
11479  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11480  if(res != VK_SUCCESS)
11481  {
11482  return res;
11483  }
11484  }
11485  return VK_SUCCESS;
11486 }
11487 
11488 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11489 {
11490  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11491 
11492  const size_t blockCount = m_Blocks.size();
11493 
11494  pStats->size = 0;
11495  pStats->unusedSize = 0;
11496  pStats->allocationCount = 0;
11497  pStats->unusedRangeCount = 0;
11498  pStats->unusedRangeSizeMax = 0;
11499  pStats->blockCount = blockCount;
11500 
11501  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11502  {
11503  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11504  VMA_ASSERT(pBlock);
11505  VMA_HEAVY_ASSERT(pBlock->Validate());
11506  pBlock->m_pMetadata->AddPoolStats(*pStats);
11507  }
11508 }
11509 
11510 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11511 {
11512  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11513  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11514  (VMA_DEBUG_MARGIN > 0) &&
11515  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
11516  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11517 }
11518 
11519 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11520 
11521 VkResult VmaBlockVector::Allocate(
11522  uint32_t currentFrameIndex,
11523  VkDeviceSize size,
11524  VkDeviceSize alignment,
11525  const VmaAllocationCreateInfo& createInfo,
11526  VmaSuballocationType suballocType,
11527  size_t allocationCount,
11528  VmaAllocation* pAllocations)
11529 {
11530  size_t allocIndex;
11531  VkResult res = VK_SUCCESS;
11532 
11533  if(IsCorruptionDetectionEnabled())
11534  {
11535  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11536  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11537  }
11538 
11539  {
11540  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11541  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11542  {
11543  res = AllocatePage(
11544  currentFrameIndex,
11545  size,
11546  alignment,
11547  createInfo,
11548  suballocType,
11549  pAllocations + allocIndex);
11550  if(res != VK_SUCCESS)
11551  {
11552  break;
11553  }
11554  }
11555  }
11556 
11557  if(res != VK_SUCCESS)
11558  {
11559  // Free all already created allocations.
11560  while(allocIndex--)
11561  {
11562  Free(pAllocations[allocIndex]);
11563  }
11564  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11565  }
11566 
11567  return res;
11568 }
11569 
11570 VkResult VmaBlockVector::AllocatePage(
11571  uint32_t currentFrameIndex,
11572  VkDeviceSize size,
11573  VkDeviceSize alignment,
11574  const VmaAllocationCreateInfo& createInfo,
11575  VmaSuballocationType suballocType,
11576  VmaAllocation* pAllocation)
11577 {
11578  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11579  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11580  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11581  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11582  const bool canCreateNewBlock =
11583  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11584  (m_Blocks.size() < m_MaxBlockCount);
11585  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11586 
11587  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11588  // Which in turn is available only when maxBlockCount = 1.
11589  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11590  {
11591  canMakeOtherLost = false;
11592  }
11593 
11594  // Upper address can only be used with linear allocator and within single memory block.
11595  if(isUpperAddress &&
11596  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11597  {
11598  return VK_ERROR_FEATURE_NOT_PRESENT;
11599  }
11600 
11601  // Validate strategy.
11602  switch(strategy)
11603  {
11604  case 0:
11606  break;
11610  break;
11611  default:
11612  return VK_ERROR_FEATURE_NOT_PRESENT;
11613  }
11614 
11615  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11616  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11617  {
11618  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11619  }
11620 
11621  /*
11622  Under certain condition, this whole section can be skipped for optimization, so
11623  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11624  e.g. for custom pools with linear algorithm.
11625  */
11626  if(!canMakeOtherLost || canCreateNewBlock)
11627  {
11628  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11629  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11631 
11632  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11633  {
11634  // Use only last block.
11635  if(!m_Blocks.empty())
11636  {
11637  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11638  VMA_ASSERT(pCurrBlock);
11639  VkResult res = AllocateFromBlock(
11640  pCurrBlock,
11641  currentFrameIndex,
11642  size,
11643  alignment,
11644  allocFlagsCopy,
11645  createInfo.pUserData,
11646  suballocType,
11647  strategy,
11648  pAllocation);
11649  if(res == VK_SUCCESS)
11650  {
11651  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11652  return VK_SUCCESS;
11653  }
11654  }
11655  }
11656  else
11657  {
11659  {
11660  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11661  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11662  {
11663  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11664  VMA_ASSERT(pCurrBlock);
11665  VkResult res = AllocateFromBlock(
11666  pCurrBlock,
11667  currentFrameIndex,
11668  size,
11669  alignment,
11670  allocFlagsCopy,
11671  createInfo.pUserData,
11672  suballocType,
11673  strategy,
11674  pAllocation);
11675  if(res == VK_SUCCESS)
11676  {
11677  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11678  return VK_SUCCESS;
11679  }
11680  }
11681  }
11682  else // WORST_FIT, FIRST_FIT
11683  {
11684  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11685  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11686  {
11687  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11688  VMA_ASSERT(pCurrBlock);
11689  VkResult res = AllocateFromBlock(
11690  pCurrBlock,
11691  currentFrameIndex,
11692  size,
11693  alignment,
11694  allocFlagsCopy,
11695  createInfo.pUserData,
11696  suballocType,
11697  strategy,
11698  pAllocation);
11699  if(res == VK_SUCCESS)
11700  {
11701  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11702  return VK_SUCCESS;
11703  }
11704  }
11705  }
11706  }
11707 
11708  // 2. Try to create new block.
11709  if(canCreateNewBlock)
11710  {
11711  // Calculate optimal size for new block.
11712  VkDeviceSize newBlockSize = m_PreferredBlockSize;
11713  uint32_t newBlockSizeShift = 0;
11714  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11715 
11716  if(!m_ExplicitBlockSize)
11717  {
11718  // Allocate 1/8, 1/4, 1/2 as first blocks.
11719  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11720  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11721  {
11722  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11723  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11724  {
11725  newBlockSize = smallerNewBlockSize;
11726  ++newBlockSizeShift;
11727  }
11728  else
11729  {
11730  break;
11731  }
11732  }
11733  }
11734 
11735  size_t newBlockIndex = 0;
11736  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11737  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11738  if(!m_ExplicitBlockSize)
11739  {
11740  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11741  {
11742  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11743  if(smallerNewBlockSize >= size)
11744  {
11745  newBlockSize = smallerNewBlockSize;
11746  ++newBlockSizeShift;
11747  res = CreateBlock(newBlockSize, &newBlockIndex);
11748  }
11749  else
11750  {
11751  break;
11752  }
11753  }
11754  }
11755 
11756  if(res == VK_SUCCESS)
11757  {
11758  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11759  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11760 
11761  res = AllocateFromBlock(
11762  pBlock,
11763  currentFrameIndex,
11764  size,
11765  alignment,
11766  allocFlagsCopy,
11767  createInfo.pUserData,
11768  suballocType,
11769  strategy,
11770  pAllocation);
11771  if(res == VK_SUCCESS)
11772  {
11773  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
11774  return VK_SUCCESS;
11775  }
11776  else
11777  {
11778  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11779  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11780  }
11781  }
11782  }
11783  }
11784 
11785  // 3. Try to allocate from existing blocks with making other allocations lost.
11786  if(canMakeOtherLost)
11787  {
11788  uint32_t tryIndex = 0;
11789  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11790  {
11791  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11792  VmaAllocationRequest bestRequest = {};
11793  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11794 
11795  // 1. Search existing allocations.
11797  {
11798  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11799  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11800  {
11801  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11802  VMA_ASSERT(pCurrBlock);
11803  VmaAllocationRequest currRequest = {};
11804  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11805  currentFrameIndex,
11806  m_FrameInUseCount,
11807  m_BufferImageGranularity,
11808  size,
11809  alignment,
11810  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11811  suballocType,
11812  canMakeOtherLost,
11813  strategy,
11814  &currRequest))
11815  {
11816  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11817  if(pBestRequestBlock == VMA_NULL ||
11818  currRequestCost < bestRequestCost)
11819  {
11820  pBestRequestBlock = pCurrBlock;
11821  bestRequest = currRequest;
11822  bestRequestCost = currRequestCost;
11823 
11824  if(bestRequestCost == 0)
11825  {
11826  break;
11827  }
11828  }
11829  }
11830  }
11831  }
11832  else // WORST_FIT, FIRST_FIT
11833  {
11834  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11835  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11836  {
11837  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11838  VMA_ASSERT(pCurrBlock);
11839  VmaAllocationRequest currRequest = {};
11840  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11841  currentFrameIndex,
11842  m_FrameInUseCount,
11843  m_BufferImageGranularity,
11844  size,
11845  alignment,
11846  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11847  suballocType,
11848  canMakeOtherLost,
11849  strategy,
11850  &currRequest))
11851  {
11852  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11853  if(pBestRequestBlock == VMA_NULL ||
11854  currRequestCost < bestRequestCost ||
11856  {
11857  pBestRequestBlock = pCurrBlock;
11858  bestRequest = currRequest;
11859  bestRequestCost = currRequestCost;
11860 
11861  if(bestRequestCost == 0 ||
11863  {
11864  break;
11865  }
11866  }
11867  }
11868  }
11869  }
11870 
11871  if(pBestRequestBlock != VMA_NULL)
11872  {
11873  if(mapped)
11874  {
11875  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11876  if(res != VK_SUCCESS)
11877  {
11878  return res;
11879  }
11880  }
11881 
11882  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11883  currentFrameIndex,
11884  m_FrameInUseCount,
11885  &bestRequest))
11886  {
11887  // We no longer have an empty Allocation.
11888  if(pBestRequestBlock->m_pMetadata->IsEmpty())
11889  {
11890  m_HasEmptyBlock = false;
11891  }
11892  // Allocate from this pBlock.
11893  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
11894  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
11895  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
11896  (*pAllocation)->InitBlockAllocation(
11897  pBestRequestBlock,
11898  bestRequest.offset,
11899  alignment,
11900  size,
11901  suballocType,
11902  mapped,
11903  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11904  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11905  VMA_DEBUG_LOG(" Returned from existing block");
11906  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
11907  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11908  {
11909  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11910  }
11911  if(IsCorruptionDetectionEnabled())
11912  {
11913  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11914  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11915  }
11916  return VK_SUCCESS;
11917  }
11918  // else: Some allocations must have been touched while we are here. Next try.
11919  }
11920  else
11921  {
11922  // Could not find place in any of the blocks - break outer loop.
11923  break;
11924  }
11925  }
11926  /* Maximum number of tries exceeded - a very unlike event when many other
11927  threads are simultaneously touching allocations making it impossible to make
11928  lost at the same time as we try to allocate. */
11929  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11930  {
11931  return VK_ERROR_TOO_MANY_OBJECTS;
11932  }
11933  }
11934 
11935  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11936 }
11937 
11938 void VmaBlockVector::Free(
11939  VmaAllocation hAllocation)
11940 {
11941  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11942 
11943  // Scope for lock.
11944  {
11945  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11946 
11947  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11948 
11949  if(IsCorruptionDetectionEnabled())
11950  {
11951  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11952  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
11953  }
11954 
11955  if(hAllocation->IsPersistentMap())
11956  {
11957  pBlock->Unmap(m_hAllocator, 1);
11958  }
11959 
11960  pBlock->m_pMetadata->Free(hAllocation);
11961  VMA_HEAVY_ASSERT(pBlock->Validate());
11962 
11963  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
11964 
11965  // pBlock became empty after this deallocation.
11966  if(pBlock->m_pMetadata->IsEmpty())
11967  {
11968  // Already has empty Allocation. We don't want to have two, so delete this one.
11969  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11970  {
11971  pBlockToDelete = pBlock;
11972  Remove(pBlock);
11973  }
11974  // We now have first empty block.
11975  else
11976  {
11977  m_HasEmptyBlock = true;
11978  }
11979  }
11980  // pBlock didn't become empty, but we have another empty block - find and free that one.
11981  // (This is optional, heuristics.)
11982  else if(m_HasEmptyBlock)
11983  {
11984  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
11985  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
11986  {
11987  pBlockToDelete = pLastBlock;
11988  m_Blocks.pop_back();
11989  m_HasEmptyBlock = false;
11990  }
11991  }
11992 
11993  IncrementallySortBlocks();
11994  }
11995 
11996  // Destruction of a free Allocation. Deferred until this point, outside of mutex
11997  // lock, for performance reason.
11998  if(pBlockToDelete != VMA_NULL)
11999  {
12000  VMA_DEBUG_LOG(" Deleted empty allocation");
12001  pBlockToDelete->Destroy(m_hAllocator);
12002  vma_delete(m_hAllocator, pBlockToDelete);
12003  }
12004 }
12005 
12006 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
12007 {
12008  VkDeviceSize result = 0;
12009  for(size_t i = m_Blocks.size(); i--; )
12010  {
12011  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
12012  if(result >= m_PreferredBlockSize)
12013  {
12014  break;
12015  }
12016  }
12017  return result;
12018 }
12019 
12020 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
12021 {
12022  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12023  {
12024  if(m_Blocks[blockIndex] == pBlock)
12025  {
12026  VmaVectorRemove(m_Blocks, blockIndex);
12027  return;
12028  }
12029  }
12030  VMA_ASSERT(0);
12031 }
12032 
12033 void VmaBlockVector::IncrementallySortBlocks()
12034 {
12035  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12036  {
12037  // Bubble sort only until first swap.
12038  for(size_t i = 1; i < m_Blocks.size(); ++i)
12039  {
12040  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
12041  {
12042  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
12043  return;
12044  }
12045  }
12046  }
12047 }
12048 
12049 VkResult VmaBlockVector::AllocateFromBlock(
12050  VmaDeviceMemoryBlock* pBlock,
12051  uint32_t currentFrameIndex,
12052  VkDeviceSize size,
12053  VkDeviceSize alignment,
12054  VmaAllocationCreateFlags allocFlags,
12055  void* pUserData,
12056  VmaSuballocationType suballocType,
12057  uint32_t strategy,
12058  VmaAllocation* pAllocation)
12059 {
12060  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
12061  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12062  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12063  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12064 
12065  VmaAllocationRequest currRequest = {};
12066  if(pBlock->m_pMetadata->CreateAllocationRequest(
12067  currentFrameIndex,
12068  m_FrameInUseCount,
12069  m_BufferImageGranularity,
12070  size,
12071  alignment,
12072  isUpperAddress,
12073  suballocType,
12074  false, // canMakeOtherLost
12075  strategy,
12076  &currRequest))
12077  {
12078  // Allocate from pCurrBlock.
12079  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
12080 
12081  if(mapped)
12082  {
12083  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12084  if(res != VK_SUCCESS)
12085  {
12086  return res;
12087  }
12088  }
12089 
12090  // We no longer have an empty Allocation.
12091  if(pBlock->m_pMetadata->IsEmpty())
12092  {
12093  m_HasEmptyBlock = false;
12094  }
12095 
12096  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
12097  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
12098  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
12099  (*pAllocation)->InitBlockAllocation(
12100  pBlock,
12101  currRequest.offset,
12102  alignment,
12103  size,
12104  suballocType,
12105  mapped,
12106  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12107  VMA_HEAVY_ASSERT(pBlock->Validate());
12108  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12109  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12110  {
12111  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12112  }
12113  if(IsCorruptionDetectionEnabled())
12114  {
12115  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
12116  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12117  }
12118  return VK_SUCCESS;
12119  }
12120  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12121 }
12122 
12123 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12124 {
12125  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12126  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12127  allocInfo.allocationSize = blockSize;
12128  VkDeviceMemory mem = VK_NULL_HANDLE;
12129  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12130  if(res < 0)
12131  {
12132  return res;
12133  }
12134 
12135  // New VkDeviceMemory successfully created.
12136 
12137  // Create new Allocation for it.
12138  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12139  pBlock->Init(
12140  m_hAllocator,
12141  m_hParentPool,
12142  m_MemoryTypeIndex,
12143  mem,
12144  allocInfo.allocationSize,
12145  m_NextBlockId++,
12146  m_Algorithm);
12147 
12148  m_Blocks.push_back(pBlock);
12149  if(pNewBlockIndex != VMA_NULL)
12150  {
12151  *pNewBlockIndex = m_Blocks.size() - 1;
12152  }
12153 
12154  return VK_SUCCESS;
12155 }
12156 
12157 void VmaBlockVector::ApplyDefragmentationMovesCpu(
12158  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12159  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
12160 {
12161  const size_t blockCount = m_Blocks.size();
12162  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
12163 
12164  enum BLOCK_FLAG
12165  {
12166  BLOCK_FLAG_USED = 0x00000001,
12167  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
12168  };
12169 
12170  struct BlockInfo
12171  {
12172  uint32_t flags;
12173  void* pMappedData;
12174  };
12175  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
12176  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
12177  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
12178 
12179  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12180  const size_t moveCount = moves.size();
12181  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12182  {
12183  const VmaDefragmentationMove& move = moves[moveIndex];
12184  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
12185  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
12186  }
12187 
12188  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12189 
12190  // Go over all blocks. Get mapped pointer or map if necessary.
12191  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12192  {
12193  BlockInfo& currBlockInfo = blockInfo[blockIndex];
12194  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12195  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12196  {
12197  currBlockInfo.pMappedData = pBlock->GetMappedData();
12198  // It is not originally mapped - map it.
12199  if(currBlockInfo.pMappedData == VMA_NULL)
12200  {
12201  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12202  if(pDefragCtx->res == VK_SUCCESS)
12203  {
12204  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12205  }
12206  }
12207  }
12208  }
12209 
12210  // Go over all moves. Do actual data transfer.
12211  if(pDefragCtx->res == VK_SUCCESS)
12212  {
12213  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12214  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12215 
12216  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12217  {
12218  const VmaDefragmentationMove& move = moves[moveIndex];
12219 
12220  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12221  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12222 
12223  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12224 
12225  // Invalidate source.
12226  if(isNonCoherent)
12227  {
12228  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12229  memRange.memory = pSrcBlock->GetDeviceMemory();
12230  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12231  memRange.size = VMA_MIN(
12232  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12233  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12234  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12235  }
12236 
12237  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12238  memmove(
12239  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12240  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12241  static_cast<size_t>(move.size));
12242 
12243  if(IsCorruptionDetectionEnabled())
12244  {
12245  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12246  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12247  }
12248 
12249  // Flush destination.
12250  if(isNonCoherent)
12251  {
12252  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12253  memRange.memory = pDstBlock->GetDeviceMemory();
12254  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12255  memRange.size = VMA_MIN(
12256  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12257  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12258  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12259  }
12260  }
12261  }
12262 
12263  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12264  // Regardless of pCtx->res == VK_SUCCESS.
12265  for(size_t blockIndex = blockCount; blockIndex--; )
12266  {
12267  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12268  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12269  {
12270  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12271  pBlock->Unmap(m_hAllocator, 1);
12272  }
12273  }
12274 }
12275 
12276 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12277  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12278  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12279  VkCommandBuffer commandBuffer)
12280 {
12281  const size_t blockCount = m_Blocks.size();
12282 
12283  pDefragCtx->blockContexts.resize(blockCount);
12284  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12285 
12286  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12287  const size_t moveCount = moves.size();
12288  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12289  {
12290  const VmaDefragmentationMove& move = moves[moveIndex];
12291  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12292  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12293  }
12294 
12295  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12296 
12297  // Go over all blocks. Create and bind buffer for whole block if necessary.
12298  {
12299  VkBufferCreateInfo bufCreateInfo;
12300  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
12301 
12302  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12303  {
12304  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12305  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12306  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12307  {
12308  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12309  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12310  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12311  if(pDefragCtx->res == VK_SUCCESS)
12312  {
12313  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12314  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12315  }
12316  }
12317  }
12318  }
12319 
12320  // Go over all moves. Post data transfer commands to command buffer.
12321  if(pDefragCtx->res == VK_SUCCESS)
12322  {
12323  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12324  {
12325  const VmaDefragmentationMove& move = moves[moveIndex];
12326 
12327  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12328  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12329 
12330  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12331 
12332  VkBufferCopy region = {
12333  move.srcOffset,
12334  move.dstOffset,
12335  move.size };
12336  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12337  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12338  }
12339  }
12340 
12341  // Save buffers to defrag context for later destruction.
12342  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12343  {
12344  pDefragCtx->res = VK_NOT_READY;
12345  }
12346 }
12347 
12348 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12349 {
12350  m_HasEmptyBlock = false;
12351  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12352  {
12353  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12354  if(pBlock->m_pMetadata->IsEmpty())
12355  {
12356  if(m_Blocks.size() > m_MinBlockCount)
12357  {
12358  if(pDefragmentationStats != VMA_NULL)
12359  {
12360  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12361  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12362  }
12363 
12364  VmaVectorRemove(m_Blocks, blockIndex);
12365  pBlock->Destroy(m_hAllocator);
12366  vma_delete(m_hAllocator, pBlock);
12367  }
12368  else
12369  {
12370  m_HasEmptyBlock = true;
12371  }
12372  }
12373  }
12374 }
12375 
12376 #if VMA_STATS_STRING_ENABLED
12377 
12378 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12379 {
12380  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12381 
12382  json.BeginObject();
12383 
12384  if(m_IsCustomPool)
12385  {
12386  json.WriteString("MemoryTypeIndex");
12387  json.WriteNumber(m_MemoryTypeIndex);
12388 
12389  json.WriteString("BlockSize");
12390  json.WriteNumber(m_PreferredBlockSize);
12391 
12392  json.WriteString("BlockCount");
12393  json.BeginObject(true);
12394  if(m_MinBlockCount > 0)
12395  {
12396  json.WriteString("Min");
12397  json.WriteNumber((uint64_t)m_MinBlockCount);
12398  }
12399  if(m_MaxBlockCount < SIZE_MAX)
12400  {
12401  json.WriteString("Max");
12402  json.WriteNumber((uint64_t)m_MaxBlockCount);
12403  }
12404  json.WriteString("Cur");
12405  json.WriteNumber((uint64_t)m_Blocks.size());
12406  json.EndObject();
12407 
12408  if(m_FrameInUseCount > 0)
12409  {
12410  json.WriteString("FrameInUseCount");
12411  json.WriteNumber(m_FrameInUseCount);
12412  }
12413 
12414  if(m_Algorithm != 0)
12415  {
12416  json.WriteString("Algorithm");
12417  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12418  }
12419  }
12420  else
12421  {
12422  json.WriteString("PreferredBlockSize");
12423  json.WriteNumber(m_PreferredBlockSize);
12424  }
12425 
12426  json.WriteString("Blocks");
12427  json.BeginObject();
12428  for(size_t i = 0; i < m_Blocks.size(); ++i)
12429  {
12430  json.BeginString();
12431  json.ContinueString(m_Blocks[i]->GetId());
12432  json.EndString();
12433 
12434  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12435  }
12436  json.EndObject();
12437 
12438  json.EndObject();
12439 }
12440 
12441 #endif // #if VMA_STATS_STRING_ENABLED
12442 
12443 void VmaBlockVector::Defragment(
12444  class VmaBlockVectorDefragmentationContext* pCtx,
12445  VmaDefragmentationStats* pStats,
12446  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12447  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12448  VkCommandBuffer commandBuffer)
12449 {
12450  pCtx->res = VK_SUCCESS;
12451 
12452  const VkMemoryPropertyFlags memPropFlags =
12453  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12454  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12455 
12456  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12457  isHostVisible;
12458  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12459  !IsCorruptionDetectionEnabled() &&
12460  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
12461 
12462  // There are options to defragment this memory type.
12463  if(canDefragmentOnCpu || canDefragmentOnGpu)
12464  {
12465  bool defragmentOnGpu;
12466  // There is only one option to defragment this memory type.
12467  if(canDefragmentOnGpu != canDefragmentOnCpu)
12468  {
12469  defragmentOnGpu = canDefragmentOnGpu;
12470  }
12471  // Both options are available: Heuristics to choose the best one.
12472  else
12473  {
12474  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12475  m_hAllocator->IsIntegratedGpu();
12476  }
12477 
12478  bool overlappingMoveSupported = !defragmentOnGpu;
12479 
12480  if(m_hAllocator->m_UseMutex)
12481  {
12482  m_Mutex.LockWrite();
12483  pCtx->mutexLocked = true;
12484  }
12485 
12486  pCtx->Begin(overlappingMoveSupported);
12487 
12488  // Defragment.
12489 
12490  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12491  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12492  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12493  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12494  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12495 
12496  // Accumulate statistics.
12497  if(pStats != VMA_NULL)
12498  {
12499  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12500  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12501  pStats->bytesMoved += bytesMoved;
12502  pStats->allocationsMoved += allocationsMoved;
12503  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12504  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12505  if(defragmentOnGpu)
12506  {
12507  maxGpuBytesToMove -= bytesMoved;
12508  maxGpuAllocationsToMove -= allocationsMoved;
12509  }
12510  else
12511  {
12512  maxCpuBytesToMove -= bytesMoved;
12513  maxCpuAllocationsToMove -= allocationsMoved;
12514  }
12515  }
12516 
12517  if(pCtx->res >= VK_SUCCESS)
12518  {
12519  if(defragmentOnGpu)
12520  {
12521  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12522  }
12523  else
12524  {
12525  ApplyDefragmentationMovesCpu(pCtx, moves);
12526  }
12527  }
12528  }
12529 }
12530 
12531 void VmaBlockVector::DefragmentationEnd(
12532  class VmaBlockVectorDefragmentationContext* pCtx,
12533  VmaDefragmentationStats* pStats)
12534 {
12535  // Destroy buffers.
12536  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12537  {
12538  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12539  if(blockCtx.hBuffer)
12540  {
12541  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12542  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12543  }
12544  }
12545 
12546  if(pCtx->res >= VK_SUCCESS)
12547  {
12548  FreeEmptyBlocks(pStats);
12549  }
12550 
12551  if(pCtx->mutexLocked)
12552  {
12553  VMA_ASSERT(m_hAllocator->m_UseMutex);
12554  m_Mutex.UnlockWrite();
12555  }
12556 }
12557 
12558 size_t VmaBlockVector::CalcAllocationCount() const
12559 {
12560  size_t result = 0;
12561  for(size_t i = 0; i < m_Blocks.size(); ++i)
12562  {
12563  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12564  }
12565  return result;
12566 }
12567 
12568 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12569 {
12570  if(m_BufferImageGranularity == 1)
12571  {
12572  return false;
12573  }
12574  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12575  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12576  {
12577  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12578  VMA_ASSERT(m_Algorithm == 0);
12579  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12580  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12581  {
12582  return true;
12583  }
12584  }
12585  return false;
12586 }
12587 
12588 void VmaBlockVector::MakePoolAllocationsLost(
12589  uint32_t currentFrameIndex,
12590  size_t* pLostAllocationCount)
12591 {
12592  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12593  size_t lostAllocationCount = 0;
12594  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12595  {
12596  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12597  VMA_ASSERT(pBlock);
12598  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12599  }
12600  if(pLostAllocationCount != VMA_NULL)
12601  {
12602  *pLostAllocationCount = lostAllocationCount;
12603  }
12604 }
12605 
12606 VkResult VmaBlockVector::CheckCorruption()
12607 {
12608  if(!IsCorruptionDetectionEnabled())
12609  {
12610  return VK_ERROR_FEATURE_NOT_PRESENT;
12611  }
12612 
12613  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12614  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12615  {
12616  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12617  VMA_ASSERT(pBlock);
12618  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12619  if(res != VK_SUCCESS)
12620  {
12621  return res;
12622  }
12623  }
12624  return VK_SUCCESS;
12625 }
12626 
12627 void VmaBlockVector::AddStats(VmaStats* pStats)
12628 {
12629  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12630  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12631 
12632  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12633 
12634  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12635  {
12636  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12637  VMA_ASSERT(pBlock);
12638  VMA_HEAVY_ASSERT(pBlock->Validate());
12639  VmaStatInfo allocationStatInfo;
12640  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12641  VmaAddStatInfo(pStats->total, allocationStatInfo);
12642  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12643  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12644  }
12645 }
12646 
12648 // VmaDefragmentationAlgorithm_Generic members definition
12649 
12650 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12651  VmaAllocator hAllocator,
12652  VmaBlockVector* pBlockVector,
12653  uint32_t currentFrameIndex,
12654  bool overlappingMoveSupported) :
12655  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12656  m_AllocationCount(0),
12657  m_AllAllocations(false),
12658  m_BytesMoved(0),
12659  m_AllocationsMoved(0),
12660  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12661 {
12662  // Create block info for each block.
12663  const size_t blockCount = m_pBlockVector->m_Blocks.size();
12664  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12665  {
12666  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12667  pBlockInfo->m_OriginalBlockIndex = blockIndex;
12668  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12669  m_Blocks.push_back(pBlockInfo);
12670  }
12671 
12672  // Sort them by m_pBlock pointer value.
12673  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12674 }
12675 
12676 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12677 {
12678  for(size_t i = m_Blocks.size(); i--; )
12679  {
12680  vma_delete(m_hAllocator, m_Blocks[i]);
12681  }
12682 }
12683 
12684 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12685 {
12686  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12687  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12688  {
12689  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12690  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12691  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12692  {
12693  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12694  (*it)->m_Allocations.push_back(allocInfo);
12695  }
12696  else
12697  {
12698  VMA_ASSERT(0);
12699  }
12700 
12701  ++m_AllocationCount;
12702  }
12703 }
12704 
12705 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12706  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12707  VkDeviceSize maxBytesToMove,
12708  uint32_t maxAllocationsToMove)
12709 {
12710  if(m_Blocks.empty())
12711  {
12712  return VK_SUCCESS;
12713  }
12714 
12715  // This is a choice based on research.
12716  // Option 1:
12717  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12718  // Option 2:
12719  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12720  // Option 3:
12721  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12722 
12723  size_t srcBlockMinIndex = 0;
12724  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12725  /*
12726  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12727  {
12728  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12729  if(blocksWithNonMovableCount > 0)
12730  {
12731  srcBlockMinIndex = blocksWithNonMovableCount - 1;
12732  }
12733  }
12734  */
12735 
12736  size_t srcBlockIndex = m_Blocks.size() - 1;
12737  size_t srcAllocIndex = SIZE_MAX;
12738  for(;;)
12739  {
12740  // 1. Find next allocation to move.
12741  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12742  // 1.2. Then start from last to first m_Allocations.
12743  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12744  {
12745  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12746  {
12747  // Finished: no more allocations to process.
12748  if(srcBlockIndex == srcBlockMinIndex)
12749  {
12750  return VK_SUCCESS;
12751  }
12752  else
12753  {
12754  --srcBlockIndex;
12755  srcAllocIndex = SIZE_MAX;
12756  }
12757  }
12758  else
12759  {
12760  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12761  }
12762  }
12763 
12764  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12765  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12766 
12767  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12768  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12769  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12770  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12771 
12772  // 2. Try to find new place for this allocation in preceding or current block.
12773  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12774  {
12775  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12776  VmaAllocationRequest dstAllocRequest;
12777  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12778  m_CurrentFrameIndex,
12779  m_pBlockVector->GetFrameInUseCount(),
12780  m_pBlockVector->GetBufferImageGranularity(),
12781  size,
12782  alignment,
12783  false, // upperAddress
12784  suballocType,
12785  false, // canMakeOtherLost
12786  strategy,
12787  &dstAllocRequest) &&
12788  MoveMakesSense(
12789  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12790  {
12791  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12792 
12793  // Reached limit on number of allocations or bytes to move.
12794  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12795  (m_BytesMoved + size > maxBytesToMove))
12796  {
12797  return VK_SUCCESS;
12798  }
12799 
12800  VmaDefragmentationMove move;
12801  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12802  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12803  move.srcOffset = srcOffset;
12804  move.dstOffset = dstAllocRequest.offset;
12805  move.size = size;
12806  moves.push_back(move);
12807 
12808  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12809  dstAllocRequest,
12810  suballocType,
12811  size,
12812  allocInfo.m_hAllocation);
12813  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12814 
12815  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12816 
12817  if(allocInfo.m_pChanged != VMA_NULL)
12818  {
12819  *allocInfo.m_pChanged = VK_TRUE;
12820  }
12821 
12822  ++m_AllocationsMoved;
12823  m_BytesMoved += size;
12824 
12825  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12826 
12827  break;
12828  }
12829  }
12830 
12831  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
12832 
12833  if(srcAllocIndex > 0)
12834  {
12835  --srcAllocIndex;
12836  }
12837  else
12838  {
12839  if(srcBlockIndex > 0)
12840  {
12841  --srcBlockIndex;
12842  srcAllocIndex = SIZE_MAX;
12843  }
12844  else
12845  {
12846  return VK_SUCCESS;
12847  }
12848  }
12849  }
12850 }
12851 
12852 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
12853 {
12854  size_t result = 0;
12855  for(size_t i = 0; i < m_Blocks.size(); ++i)
12856  {
12857  if(m_Blocks[i]->m_HasNonMovableAllocations)
12858  {
12859  ++result;
12860  }
12861  }
12862  return result;
12863 }
12864 
12865 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12866  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12867  VkDeviceSize maxBytesToMove,
12868  uint32_t maxAllocationsToMove)
12869 {
12870  if(!m_AllAllocations && m_AllocationCount == 0)
12871  {
12872  return VK_SUCCESS;
12873  }
12874 
12875  const size_t blockCount = m_Blocks.size();
12876  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12877  {
12878  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12879 
12880  if(m_AllAllocations)
12881  {
12882  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12883  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12884  it != pMetadata->m_Suballocations.end();
12885  ++it)
12886  {
12887  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12888  {
12889  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12890  pBlockInfo->m_Allocations.push_back(allocInfo);
12891  }
12892  }
12893  }
12894 
12895  pBlockInfo->CalcHasNonMovableAllocations();
12896 
12897  // This is a choice based on research.
12898  // Option 1:
12899  pBlockInfo->SortAllocationsByOffsetDescending();
12900  // Option 2:
12901  //pBlockInfo->SortAllocationsBySizeDescending();
12902  }
12903 
12904  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
12905  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12906 
12907  // This is a choice based on research.
12908  const uint32_t roundCount = 2;
12909 
12910  // Execute defragmentation rounds (the main part).
12911  VkResult result = VK_SUCCESS;
12912  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12913  {
12914  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12915  }
12916 
12917  return result;
12918 }
12919 
12920 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12921  size_t dstBlockIndex, VkDeviceSize dstOffset,
12922  size_t srcBlockIndex, VkDeviceSize srcOffset)
12923 {
12924  if(dstBlockIndex < srcBlockIndex)
12925  {
12926  return true;
12927  }
12928  if(dstBlockIndex > srcBlockIndex)
12929  {
12930  return false;
12931  }
12932  if(dstOffset < srcOffset)
12933  {
12934  return true;
12935  }
12936  return false;
12937 }
12938 
12940 // VmaDefragmentationAlgorithm_Fast
12941 
12942 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12943  VmaAllocator hAllocator,
12944  VmaBlockVector* pBlockVector,
12945  uint32_t currentFrameIndex,
12946  bool overlappingMoveSupported) :
12947  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12948  m_OverlappingMoveSupported(overlappingMoveSupported),
12949  m_AllocationCount(0),
12950  m_AllAllocations(false),
12951  m_BytesMoved(0),
12952  m_AllocationsMoved(0),
12953  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12954 {
12955  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12956 
12957 }
12958 
12959 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12960 {
12961 }
12962 
12963 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12964  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12965  VkDeviceSize maxBytesToMove,
12966  uint32_t maxAllocationsToMove)
12967 {
12968  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12969 
12970  const size_t blockCount = m_pBlockVector->GetBlockCount();
12971  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12972  {
12973  return VK_SUCCESS;
12974  }
12975 
12976  PreprocessMetadata();
12977 
12978  // Sort blocks in order from most destination.
12979 
12980  m_BlockInfos.resize(blockCount);
12981  for(size_t i = 0; i < blockCount; ++i)
12982  {
12983  m_BlockInfos[i].origBlockIndex = i;
12984  }
12985 
12986  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
12987  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
12988  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
12989  });
12990 
12991  // THE MAIN ALGORITHM
12992 
12993  FreeSpaceDatabase freeSpaceDb;
12994 
12995  size_t dstBlockInfoIndex = 0;
12996  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12997  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12998  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12999  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
13000  VkDeviceSize dstOffset = 0;
13001 
13002  bool end = false;
13003  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
13004  {
13005  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
13006  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
13007  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
13008  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
13009  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
13010  {
13011  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
13012  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
13013  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
13014  if(m_AllocationsMoved == maxAllocationsToMove ||
13015  m_BytesMoved + srcAllocSize > maxBytesToMove)
13016  {
13017  end = true;
13018  break;
13019  }
13020  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
13021 
13022  // Try to place it in one of free spaces from the database.
13023  size_t freeSpaceInfoIndex;
13024  VkDeviceSize dstAllocOffset;
13025  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
13026  freeSpaceInfoIndex, dstAllocOffset))
13027  {
13028  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
13029  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
13030  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
13031 
13032  // Same block
13033  if(freeSpaceInfoIndex == srcBlockInfoIndex)
13034  {
13035  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13036 
13037  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13038 
13039  VmaSuballocation suballoc = *srcSuballocIt;
13040  suballoc.offset = dstAllocOffset;
13041  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
13042  m_BytesMoved += srcAllocSize;
13043  ++m_AllocationsMoved;
13044 
13045  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13046  ++nextSuballocIt;
13047  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13048  srcSuballocIt = nextSuballocIt;
13049 
13050  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13051 
13052  VmaDefragmentationMove move = {
13053  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13054  srcAllocOffset, dstAllocOffset,
13055  srcAllocSize };
13056  moves.push_back(move);
13057  }
13058  // Different block
13059  else
13060  {
13061  // MOVE OPTION 2: Move the allocation to a different block.
13062 
13063  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
13064 
13065  VmaSuballocation suballoc = *srcSuballocIt;
13066  suballoc.offset = dstAllocOffset;
13067  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
13068  m_BytesMoved += srcAllocSize;
13069  ++m_AllocationsMoved;
13070 
13071  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13072  ++nextSuballocIt;
13073  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13074  srcSuballocIt = nextSuballocIt;
13075 
13076  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13077 
13078  VmaDefragmentationMove move = {
13079  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13080  srcAllocOffset, dstAllocOffset,
13081  srcAllocSize };
13082  moves.push_back(move);
13083  }
13084  }
13085  else
13086  {
13087  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
13088 
13089  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
13090  while(dstBlockInfoIndex < srcBlockInfoIndex &&
13091  dstAllocOffset + srcAllocSize > dstBlockSize)
13092  {
13093  // But before that, register remaining free space at the end of dst block.
13094  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
13095 
13096  ++dstBlockInfoIndex;
13097  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13098  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13099  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13100  dstBlockSize = pDstMetadata->GetSize();
13101  dstOffset = 0;
13102  dstAllocOffset = 0;
13103  }
13104 
13105  // Same block
13106  if(dstBlockInfoIndex == srcBlockInfoIndex)
13107  {
13108  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13109 
13110  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
13111 
13112  bool skipOver = overlap;
13113  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
13114  {
13115  // If destination and source place overlap, skip if it would move it
13116  // by only < 1/64 of its size.
13117  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
13118  }
13119 
13120  if(skipOver)
13121  {
13122  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
13123 
13124  dstOffset = srcAllocOffset + srcAllocSize;
13125  ++srcSuballocIt;
13126  }
13127  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13128  else
13129  {
13130  srcSuballocIt->offset = dstAllocOffset;
13131  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
13132  dstOffset = dstAllocOffset + srcAllocSize;
13133  m_BytesMoved += srcAllocSize;
13134  ++m_AllocationsMoved;
13135  ++srcSuballocIt;
13136  VmaDefragmentationMove move = {
13137  srcOrigBlockIndex, dstOrigBlockIndex,
13138  srcAllocOffset, dstAllocOffset,
13139  srcAllocSize };
13140  moves.push_back(move);
13141  }
13142  }
13143  // Different block
13144  else
13145  {
13146  // MOVE OPTION 2: Move the allocation to a different block.
13147 
13148  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
13149  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
13150 
13151  VmaSuballocation suballoc = *srcSuballocIt;
13152  suballoc.offset = dstAllocOffset;
13153  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
13154  dstOffset = dstAllocOffset + srcAllocSize;
13155  m_BytesMoved += srcAllocSize;
13156  ++m_AllocationsMoved;
13157 
13158  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13159  ++nextSuballocIt;
13160  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13161  srcSuballocIt = nextSuballocIt;
13162 
13163  pDstMetadata->m_Suballocations.push_back(suballoc);
13164 
13165  VmaDefragmentationMove move = {
13166  srcOrigBlockIndex, dstOrigBlockIndex,
13167  srcAllocOffset, dstAllocOffset,
13168  srcAllocSize };
13169  moves.push_back(move);
13170  }
13171  }
13172  }
13173  }
13174 
13175  m_BlockInfos.clear();
13176 
13177  PostprocessMetadata();
13178 
13179  return VK_SUCCESS;
13180 }
13181 
13182 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
13183 {
13184  const size_t blockCount = m_pBlockVector->GetBlockCount();
13185  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13186  {
13187  VmaBlockMetadata_Generic* const pMetadata =
13188  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13189  pMetadata->m_FreeCount = 0;
13190  pMetadata->m_SumFreeSize = pMetadata->GetSize();
13191  pMetadata->m_FreeSuballocationsBySize.clear();
13192  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13193  it != pMetadata->m_Suballocations.end(); )
13194  {
13195  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13196  {
13197  VmaSuballocationList::iterator nextIt = it;
13198  ++nextIt;
13199  pMetadata->m_Suballocations.erase(it);
13200  it = nextIt;
13201  }
13202  else
13203  {
13204  ++it;
13205  }
13206  }
13207  }
13208 }
13209 
13210 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13211 {
13212  const size_t blockCount = m_pBlockVector->GetBlockCount();
13213  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13214  {
13215  VmaBlockMetadata_Generic* const pMetadata =
13216  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13217  const VkDeviceSize blockSize = pMetadata->GetSize();
13218 
13219  // No allocations in this block - entire area is free.
13220  if(pMetadata->m_Suballocations.empty())
13221  {
13222  pMetadata->m_FreeCount = 1;
13223  //pMetadata->m_SumFreeSize is already set to blockSize.
13224  VmaSuballocation suballoc = {
13225  0, // offset
13226  blockSize, // size
13227  VMA_NULL, // hAllocation
13228  VMA_SUBALLOCATION_TYPE_FREE };
13229  pMetadata->m_Suballocations.push_back(suballoc);
13230  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13231  }
13232  // There are some allocations in this block.
13233  else
13234  {
13235  VkDeviceSize offset = 0;
13236  VmaSuballocationList::iterator it;
13237  for(it = pMetadata->m_Suballocations.begin();
13238  it != pMetadata->m_Suballocations.end();
13239  ++it)
13240  {
13241  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13242  VMA_ASSERT(it->offset >= offset);
13243 
13244  // Need to insert preceding free space.
13245  if(it->offset > offset)
13246  {
13247  ++pMetadata->m_FreeCount;
13248  const VkDeviceSize freeSize = it->offset - offset;
13249  VmaSuballocation suballoc = {
13250  offset, // offset
13251  freeSize, // size
13252  VMA_NULL, // hAllocation
13253  VMA_SUBALLOCATION_TYPE_FREE };
13254  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13255  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13256  {
13257  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13258  }
13259  }
13260 
13261  pMetadata->m_SumFreeSize -= it->size;
13262  offset = it->offset + it->size;
13263  }
13264 
13265  // Need to insert trailing free space.
13266  if(offset < blockSize)
13267  {
13268  ++pMetadata->m_FreeCount;
13269  const VkDeviceSize freeSize = blockSize - offset;
13270  VmaSuballocation suballoc = {
13271  offset, // offset
13272  freeSize, // size
13273  VMA_NULL, // hAllocation
13274  VMA_SUBALLOCATION_TYPE_FREE };
13275  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13276  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13277  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13278  {
13279  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13280  }
13281  }
13282 
13283  VMA_SORT(
13284  pMetadata->m_FreeSuballocationsBySize.begin(),
13285  pMetadata->m_FreeSuballocationsBySize.end(),
13286  VmaSuballocationItemSizeLess());
13287  }
13288 
13289  VMA_HEAVY_ASSERT(pMetadata->Validate());
13290  }
13291 }
13292 
13293 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13294 {
13295  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13296  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13297  while(it != pMetadata->m_Suballocations.end())
13298  {
13299  if(it->offset < suballoc.offset)
13300  {
13301  ++it;
13302  }
13303  }
13304  pMetadata->m_Suballocations.insert(it, suballoc);
13305 }
13306 
13308 // VmaBlockVectorDefragmentationContext
13309 
13310 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13311  VmaAllocator hAllocator,
13312  VmaPool hCustomPool,
13313  VmaBlockVector* pBlockVector,
13314  uint32_t currFrameIndex) :
13315  res(VK_SUCCESS),
13316  mutexLocked(false),
13317  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13318  m_hAllocator(hAllocator),
13319  m_hCustomPool(hCustomPool),
13320  m_pBlockVector(pBlockVector),
13321  m_CurrFrameIndex(currFrameIndex),
13322  m_pAlgorithm(VMA_NULL),
13323  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13324  m_AllAllocations(false)
13325 {
13326 }
13327 
13328 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13329 {
13330  vma_delete(m_hAllocator, m_pAlgorithm);
13331 }
13332 
13333 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13334 {
13335  AllocInfo info = { hAlloc, pChanged };
13336  m_Allocations.push_back(info);
13337 }
13338 
13339 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13340 {
13341  const bool allAllocations = m_AllAllocations ||
13342  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13343 
13344  /********************************
13345  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13346  ********************************/
13347 
13348  /*
13349  Fast algorithm is supported only when certain criteria are met:
13350  - VMA_DEBUG_MARGIN is 0.
13351  - All allocations in this block vector are moveable.
13352  - There is no possibility of image/buffer granularity conflict.
13353  */
13354  if(VMA_DEBUG_MARGIN == 0 &&
13355  allAllocations &&
13356  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13357  {
13358  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13359  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13360  }
13361  else
13362  {
13363  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13364  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13365  }
13366 
13367  if(allAllocations)
13368  {
13369  m_pAlgorithm->AddAll();
13370  }
13371  else
13372  {
13373  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13374  {
13375  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13376  }
13377  }
13378 }
13379 
13381 // VmaDefragmentationContext
13382 
13383 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13384  VmaAllocator hAllocator,
13385  uint32_t currFrameIndex,
13386  uint32_t flags,
13387  VmaDefragmentationStats* pStats) :
13388  m_hAllocator(hAllocator),
13389  m_CurrFrameIndex(currFrameIndex),
13390  m_Flags(flags),
13391  m_pStats(pStats),
13392  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13393 {
13394  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13395 }
13396 
13397 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13398 {
13399  for(size_t i = m_CustomPoolContexts.size(); i--; )
13400  {
13401  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13402  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13403  vma_delete(m_hAllocator, pBlockVectorCtx);
13404  }
13405  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13406  {
13407  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13408  if(pBlockVectorCtx)
13409  {
13410  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13411  vma_delete(m_hAllocator, pBlockVectorCtx);
13412  }
13413  }
13414 }
13415 
13416 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13417 {
13418  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13419  {
13420  VmaPool pool = pPools[poolIndex];
13421  VMA_ASSERT(pool);
13422  // Pools with algorithm other than default are not defragmented.
13423  if(pool->m_BlockVector.GetAlgorithm() == 0)
13424  {
13425  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13426 
13427  for(size_t i = m_CustomPoolContexts.size(); i--; )
13428  {
13429  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13430  {
13431  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13432  break;
13433  }
13434  }
13435 
13436  if(!pBlockVectorDefragCtx)
13437  {
13438  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13439  m_hAllocator,
13440  pool,
13441  &pool->m_BlockVector,
13442  m_CurrFrameIndex);
13443  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13444  }
13445 
13446  pBlockVectorDefragCtx->AddAll();
13447  }
13448  }
13449 }
13450 
13451 void VmaDefragmentationContext_T::AddAllocations(
13452  uint32_t allocationCount,
13453  VmaAllocation* pAllocations,
13454  VkBool32* pAllocationsChanged)
13455 {
13456  // Dispatch pAllocations among defragmentators. Create them when necessary.
13457  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13458  {
13459  const VmaAllocation hAlloc = pAllocations[allocIndex];
13460  VMA_ASSERT(hAlloc);
13461  // DedicatedAlloc cannot be defragmented.
13462  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13463  // Lost allocation cannot be defragmented.
13464  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13465  {
13466  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13467 
13468  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
13469  // This allocation belongs to custom pool.
13470  if(hAllocPool != VK_NULL_HANDLE)
13471  {
13472  // Pools with algorithm other than default are not defragmented.
13473  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13474  {
13475  for(size_t i = m_CustomPoolContexts.size(); i--; )
13476  {
13477  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13478  {
13479  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13480  break;
13481  }
13482  }
13483  if(!pBlockVectorDefragCtx)
13484  {
13485  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13486  m_hAllocator,
13487  hAllocPool,
13488  &hAllocPool->m_BlockVector,
13489  m_CurrFrameIndex);
13490  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13491  }
13492  }
13493  }
13494  // This allocation belongs to default pool.
13495  else
13496  {
13497  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13498  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13499  if(!pBlockVectorDefragCtx)
13500  {
13501  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13502  m_hAllocator,
13503  VMA_NULL, // hCustomPool
13504  m_hAllocator->m_pBlockVectors[memTypeIndex],
13505  m_CurrFrameIndex);
13506  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13507  }
13508  }
13509 
13510  if(pBlockVectorDefragCtx)
13511  {
13512  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13513  &pAllocationsChanged[allocIndex] : VMA_NULL;
13514  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13515  }
13516  }
13517  }
13518 }
13519 
13520 VkResult VmaDefragmentationContext_T::Defragment(
13521  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13522  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13523  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13524 {
13525  if(pStats)
13526  {
13527  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13528  }
13529 
13530  if(commandBuffer == VK_NULL_HANDLE)
13531  {
13532  maxGpuBytesToMove = 0;
13533  maxGpuAllocationsToMove = 0;
13534  }
13535 
13536  VkResult res = VK_SUCCESS;
13537 
13538  // Process default pools.
13539  for(uint32_t memTypeIndex = 0;
13540  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13541  ++memTypeIndex)
13542  {
13543  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13544  if(pBlockVectorCtx)
13545  {
13546  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13547  pBlockVectorCtx->GetBlockVector()->Defragment(
13548  pBlockVectorCtx,
13549  pStats,
13550  maxCpuBytesToMove, maxCpuAllocationsToMove,
13551  maxGpuBytesToMove, maxGpuAllocationsToMove,
13552  commandBuffer);
13553  if(pBlockVectorCtx->res != VK_SUCCESS)
13554  {
13555  res = pBlockVectorCtx->res;
13556  }
13557  }
13558  }
13559 
13560  // Process custom pools.
13561  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13562  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13563  ++customCtxIndex)
13564  {
13565  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13566  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13567  pBlockVectorCtx->GetBlockVector()->Defragment(
13568  pBlockVectorCtx,
13569  pStats,
13570  maxCpuBytesToMove, maxCpuAllocationsToMove,
13571  maxGpuBytesToMove, maxGpuAllocationsToMove,
13572  commandBuffer);
13573  if(pBlockVectorCtx->res != VK_SUCCESS)
13574  {
13575  res = pBlockVectorCtx->res;
13576  }
13577  }
13578 
13579  return res;
13580 }
13581 
13583 // VmaRecorder
13584 
13585 #if VMA_RECORDING_ENABLED
13586 
13587 VmaRecorder::VmaRecorder() :
13588  m_UseMutex(true),
13589  m_Flags(0),
13590  m_File(VMA_NULL),
13591  m_Freq(INT64_MAX),
13592  m_StartCounter(INT64_MAX)
13593 {
13594 }
13595 
13596 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13597 {
13598  m_UseMutex = useMutex;
13599  m_Flags = settings.flags;
13600 
13601  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13602  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13603 
13604  // Open file for writing.
13605  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13606  if(err != 0)
13607  {
13608  return VK_ERROR_INITIALIZATION_FAILED;
13609  }
13610 
13611  // Write header.
13612  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13613  fprintf(m_File, "%s\n", "1,5");
13614 
13615  return VK_SUCCESS;
13616 }
13617 
13618 VmaRecorder::~VmaRecorder()
13619 {
13620  if(m_File != VMA_NULL)
13621  {
13622  fclose(m_File);
13623  }
13624 }
13625 
13626 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13627 {
13628  CallParams callParams;
13629  GetBasicParams(callParams);
13630 
13631  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13632  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13633  Flush();
13634 }
13635 
13636 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13637 {
13638  CallParams callParams;
13639  GetBasicParams(callParams);
13640 
13641  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13642  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13643  Flush();
13644 }
13645 
13646 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13647 {
13648  CallParams callParams;
13649  GetBasicParams(callParams);
13650 
13651  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13652  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13653  createInfo.memoryTypeIndex,
13654  createInfo.flags,
13655  createInfo.blockSize,
13656  (uint64_t)createInfo.minBlockCount,
13657  (uint64_t)createInfo.maxBlockCount,
13658  createInfo.frameInUseCount,
13659  pool);
13660  Flush();
13661 }
13662 
13663 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13664 {
13665  CallParams callParams;
13666  GetBasicParams(callParams);
13667 
13668  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13669  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13670  pool);
13671  Flush();
13672 }
13673 
13674 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13675  const VkMemoryRequirements& vkMemReq,
13676  const VmaAllocationCreateInfo& createInfo,
13677  VmaAllocation allocation)
13678 {
13679  CallParams callParams;
13680  GetBasicParams(callParams);
13681 
13682  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13683  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13684  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13685  vkMemReq.size,
13686  vkMemReq.alignment,
13687  vkMemReq.memoryTypeBits,
13688  createInfo.flags,
13689  createInfo.usage,
13690  createInfo.requiredFlags,
13691  createInfo.preferredFlags,
13692  createInfo.memoryTypeBits,
13693  createInfo.pool,
13694  allocation,
13695  userDataStr.GetString());
13696  Flush();
13697 }
13698 
13699 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
13700  const VkMemoryRequirements& vkMemReq,
13701  const VmaAllocationCreateInfo& createInfo,
13702  uint64_t allocationCount,
13703  const VmaAllocation* pAllocations)
13704 {
13705  CallParams callParams;
13706  GetBasicParams(callParams);
13707 
13708  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13709  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13710  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
13711  vkMemReq.size,
13712  vkMemReq.alignment,
13713  vkMemReq.memoryTypeBits,
13714  createInfo.flags,
13715  createInfo.usage,
13716  createInfo.requiredFlags,
13717  createInfo.preferredFlags,
13718  createInfo.memoryTypeBits,
13719  createInfo.pool);
13720  PrintPointerList(allocationCount, pAllocations);
13721  fprintf(m_File, ",%s\n", userDataStr.GetString());
13722  Flush();
13723 }
13724 
13725 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13726  const VkMemoryRequirements& vkMemReq,
13727  bool requiresDedicatedAllocation,
13728  bool prefersDedicatedAllocation,
13729  const VmaAllocationCreateInfo& createInfo,
13730  VmaAllocation allocation)
13731 {
13732  CallParams callParams;
13733  GetBasicParams(callParams);
13734 
13735  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13736  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13737  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13738  vkMemReq.size,
13739  vkMemReq.alignment,
13740  vkMemReq.memoryTypeBits,
13741  requiresDedicatedAllocation ? 1 : 0,
13742  prefersDedicatedAllocation ? 1 : 0,
13743  createInfo.flags,
13744  createInfo.usage,
13745  createInfo.requiredFlags,
13746  createInfo.preferredFlags,
13747  createInfo.memoryTypeBits,
13748  createInfo.pool,
13749  allocation,
13750  userDataStr.GetString());
13751  Flush();
13752 }
13753 
13754 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13755  const VkMemoryRequirements& vkMemReq,
13756  bool requiresDedicatedAllocation,
13757  bool prefersDedicatedAllocation,
13758  const VmaAllocationCreateInfo& createInfo,
13759  VmaAllocation allocation)
13760 {
13761  CallParams callParams;
13762  GetBasicParams(callParams);
13763 
13764  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13765  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13766  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13767  vkMemReq.size,
13768  vkMemReq.alignment,
13769  vkMemReq.memoryTypeBits,
13770  requiresDedicatedAllocation ? 1 : 0,
13771  prefersDedicatedAllocation ? 1 : 0,
13772  createInfo.flags,
13773  createInfo.usage,
13774  createInfo.requiredFlags,
13775  createInfo.preferredFlags,
13776  createInfo.memoryTypeBits,
13777  createInfo.pool,
13778  allocation,
13779  userDataStr.GetString());
13780  Flush();
13781 }
13782 
13783 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13784  VmaAllocation allocation)
13785 {
13786  CallParams callParams;
13787  GetBasicParams(callParams);
13788 
13789  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13790  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13791  allocation);
13792  Flush();
13793 }
13794 
13795 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
13796  uint64_t allocationCount,
13797  const VmaAllocation* pAllocations)
13798 {
13799  CallParams callParams;
13800  GetBasicParams(callParams);
13801 
13802  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13803  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
13804  PrintPointerList(allocationCount, pAllocations);
13805  fprintf(m_File, "\n");
13806  Flush();
13807 }
13808 
13809 void VmaRecorder::RecordResizeAllocation(
13810  uint32_t frameIndex,
13811  VmaAllocation allocation,
13812  VkDeviceSize newSize)
13813 {
13814  CallParams callParams;
13815  GetBasicParams(callParams);
13816 
13817  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13818  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
13819  allocation, newSize);
13820  Flush();
13821 }
13822 
13823 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13824  VmaAllocation allocation,
13825  const void* pUserData)
13826 {
13827  CallParams callParams;
13828  GetBasicParams(callParams);
13829 
13830  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13831  UserDataString userDataStr(
13832  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
13833  pUserData);
13834  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13835  allocation,
13836  userDataStr.GetString());
13837  Flush();
13838 }
13839 
13840 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13841  VmaAllocation allocation)
13842 {
13843  CallParams callParams;
13844  GetBasicParams(callParams);
13845 
13846  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13847  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13848  allocation);
13849  Flush();
13850 }
13851 
13852 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13853  VmaAllocation allocation)
13854 {
13855  CallParams callParams;
13856  GetBasicParams(callParams);
13857 
13858  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13859  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13860  allocation);
13861  Flush();
13862 }
13863 
13864 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13865  VmaAllocation allocation)
13866 {
13867  CallParams callParams;
13868  GetBasicParams(callParams);
13869 
13870  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13871  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13872  allocation);
13873  Flush();
13874 }
13875 
13876 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13877  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13878 {
13879  CallParams callParams;
13880  GetBasicParams(callParams);
13881 
13882  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13883  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13884  allocation,
13885  offset,
13886  size);
13887  Flush();
13888 }
13889 
13890 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13891  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13892 {
13893  CallParams callParams;
13894  GetBasicParams(callParams);
13895 
13896  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13897  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13898  allocation,
13899  offset,
13900  size);
13901  Flush();
13902 }
13903 
13904 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13905  const VkBufferCreateInfo& bufCreateInfo,
13906  const VmaAllocationCreateInfo& allocCreateInfo,
13907  VmaAllocation allocation)
13908 {
13909  CallParams callParams;
13910  GetBasicParams(callParams);
13911 
13912  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13913  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13914  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13915  bufCreateInfo.flags,
13916  bufCreateInfo.size,
13917  bufCreateInfo.usage,
13918  bufCreateInfo.sharingMode,
13919  allocCreateInfo.flags,
13920  allocCreateInfo.usage,
13921  allocCreateInfo.requiredFlags,
13922  allocCreateInfo.preferredFlags,
13923  allocCreateInfo.memoryTypeBits,
13924  allocCreateInfo.pool,
13925  allocation,
13926  userDataStr.GetString());
13927  Flush();
13928 }
13929 
13930 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13931  const VkImageCreateInfo& imageCreateInfo,
13932  const VmaAllocationCreateInfo& allocCreateInfo,
13933  VmaAllocation allocation)
13934 {
13935  CallParams callParams;
13936  GetBasicParams(callParams);
13937 
13938  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13939  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13940  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13941  imageCreateInfo.flags,
13942  imageCreateInfo.imageType,
13943  imageCreateInfo.format,
13944  imageCreateInfo.extent.width,
13945  imageCreateInfo.extent.height,
13946  imageCreateInfo.extent.depth,
13947  imageCreateInfo.mipLevels,
13948  imageCreateInfo.arrayLayers,
13949  imageCreateInfo.samples,
13950  imageCreateInfo.tiling,
13951  imageCreateInfo.usage,
13952  imageCreateInfo.sharingMode,
13953  imageCreateInfo.initialLayout,
13954  allocCreateInfo.flags,
13955  allocCreateInfo.usage,
13956  allocCreateInfo.requiredFlags,
13957  allocCreateInfo.preferredFlags,
13958  allocCreateInfo.memoryTypeBits,
13959  allocCreateInfo.pool,
13960  allocation,
13961  userDataStr.GetString());
13962  Flush();
13963 }
13964 
13965 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13966  VmaAllocation allocation)
13967 {
13968  CallParams callParams;
13969  GetBasicParams(callParams);
13970 
13971  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13972  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13973  allocation);
13974  Flush();
13975 }
13976 
13977 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
13978  VmaAllocation allocation)
13979 {
13980  CallParams callParams;
13981  GetBasicParams(callParams);
13982 
13983  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13984  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
13985  allocation);
13986  Flush();
13987 }
13988 
13989 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
13990  VmaAllocation allocation)
13991 {
13992  CallParams callParams;
13993  GetBasicParams(callParams);
13994 
13995  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13996  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13997  allocation);
13998  Flush();
13999 }
14000 
14001 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
14002  VmaAllocation allocation)
14003 {
14004  CallParams callParams;
14005  GetBasicParams(callParams);
14006 
14007  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14008  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
14009  allocation);
14010  Flush();
14011 }
14012 
14013 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
14014  VmaPool pool)
14015 {
14016  CallParams callParams;
14017  GetBasicParams(callParams);
14018 
14019  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14020  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
14021  pool);
14022  Flush();
14023 }
14024 
14025 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
14026  const VmaDefragmentationInfo2& info,
14028 {
14029  CallParams callParams;
14030  GetBasicParams(callParams);
14031 
14032  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14033  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
14034  info.flags);
14035  PrintPointerList(info.allocationCount, info.pAllocations);
14036  fprintf(m_File, ",");
14037  PrintPointerList(info.poolCount, info.pPools);
14038  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
14039  info.maxCpuBytesToMove,
14041  info.maxGpuBytesToMove,
14043  info.commandBuffer,
14044  ctx);
14045  Flush();
14046 }
14047 
14048 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
14050 {
14051  CallParams callParams;
14052  GetBasicParams(callParams);
14053 
14054  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14055  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
14056  ctx);
14057  Flush();
14058 }
14059 
14060 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
14061 {
14062  if(pUserData != VMA_NULL)
14063  {
14064  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
14065  {
14066  m_Str = (const char*)pUserData;
14067  }
14068  else
14069  {
14070  sprintf_s(m_PtrStr, "%p", pUserData);
14071  m_Str = m_PtrStr;
14072  }
14073  }
14074  else
14075  {
14076  m_Str = "";
14077  }
14078 }
14079 
14080 void VmaRecorder::WriteConfiguration(
14081  const VkPhysicalDeviceProperties& devProps,
14082  const VkPhysicalDeviceMemoryProperties& memProps,
14083  bool dedicatedAllocationExtensionEnabled)
14084 {
14085  fprintf(m_File, "Config,Begin\n");
14086 
14087  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
14088  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
14089  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
14090  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
14091  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
14092  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
14093 
14094  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
14095  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
14096  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
14097 
14098  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
14099  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
14100  {
14101  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
14102  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
14103  }
14104  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
14105  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
14106  {
14107  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
14108  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
14109  }
14110 
14111  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
14112 
14113  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
14114  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
14115  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
14116  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
14117  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
14118  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
14119  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
14120  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
14121  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14122 
14123  fprintf(m_File, "Config,End\n");
14124 }
14125 
14126 void VmaRecorder::GetBasicParams(CallParams& outParams)
14127 {
14128  outParams.threadId = GetCurrentThreadId();
14129 
14130  LARGE_INTEGER counter;
14131  QueryPerformanceCounter(&counter);
14132  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
14133 }
14134 
14135 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
14136 {
14137  if(count)
14138  {
14139  fprintf(m_File, "%p", pItems[0]);
14140  for(uint64_t i = 1; i < count; ++i)
14141  {
14142  fprintf(m_File, " %p", pItems[i]);
14143  }
14144  }
14145 }
14146 
14147 void VmaRecorder::Flush()
14148 {
14149  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
14150  {
14151  fflush(m_File);
14152  }
14153 }
14154 
14155 #endif // #if VMA_RECORDING_ENABLED
14156 
14158 // VmaAllocationObjectAllocator
14159 
14160 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
14161  m_Allocator(pAllocationCallbacks, 1024)
14162 {
14163 }
14164 
14165 VmaAllocation VmaAllocationObjectAllocator::Allocate()
14166 {
14167  VmaMutexLock mutexLock(m_Mutex);
14168  return m_Allocator.Alloc();
14169 }
14170 
14171 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
14172 {
14173  VmaMutexLock mutexLock(m_Mutex);
14174  m_Allocator.Free(hAlloc);
14175 }
14176 
14178 // VmaAllocator_T
14179 
14180 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
14181  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
14182  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
14183  m_hDevice(pCreateInfo->device),
14184  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
14185  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
14186  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
14187  m_AllocationObjectAllocator(&m_AllocationCallbacks),
14188  m_PreferredLargeHeapBlockSize(0),
14189  m_PhysicalDevice(pCreateInfo->physicalDevice),
14190  m_CurrentFrameIndex(0),
14191  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
14192  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
14193  m_NextPoolId(0)
14195  ,m_pRecorder(VMA_NULL)
14196 #endif
14197 {
14198  if(VMA_DEBUG_DETECT_CORRUPTION)
14199  {
14200  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
14201  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
14202  }
14203 
14204  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
14205 
14206 #if !(VMA_DEDICATED_ALLOCATION)
14208  {
14209  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14210  }
14211 #endif
14212 
14213  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
14214  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14215  memset(&m_MemProps, 0, sizeof(m_MemProps));
14216 
14217  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14218  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
14219 
14220  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14221  {
14222  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
14223  }
14224 
14225  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14226  {
14227  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14228  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14229  }
14230 
14231  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14232 
14233  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14234  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14235 
14236  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14237  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14238  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14239  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14240 
14241  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14242  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14243 
14244  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14245  {
14246  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14247  {
14248  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14249  if(limit != VK_WHOLE_SIZE)
14250  {
14251  m_HeapSizeLimit[heapIndex] = limit;
14252  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14253  {
14254  m_MemProps.memoryHeaps[heapIndex].size = limit;
14255  }
14256  }
14257  }
14258  }
14259 
14260  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14261  {
14262  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14263 
14264  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14265  this,
14266  VK_NULL_HANDLE, // hParentPool
14267  memTypeIndex,
14268  preferredBlockSize,
14269  0,
14270  SIZE_MAX,
14271  GetBufferImageGranularity(),
14272  pCreateInfo->frameInUseCount,
14273  false, // isCustomPool
14274  false, // explicitBlockSize
14275  false); // linearAlgorithm
14276  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14277  // becase minBlockCount is 0.
14278  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14279 
14280  }
14281 }
14282 
14283 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14284 {
14285  VkResult res = VK_SUCCESS;
14286 
14287  if(pCreateInfo->pRecordSettings != VMA_NULL &&
14288  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14289  {
14290 #if VMA_RECORDING_ENABLED
14291  m_pRecorder = vma_new(this, VmaRecorder)();
14292  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14293  if(res != VK_SUCCESS)
14294  {
14295  return res;
14296  }
14297  m_pRecorder->WriteConfiguration(
14298  m_PhysicalDeviceProperties,
14299  m_MemProps,
14300  m_UseKhrDedicatedAllocation);
14301  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14302 #else
14303  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14304  return VK_ERROR_FEATURE_NOT_PRESENT;
14305 #endif
14306  }
14307 
14308  return res;
14309 }
14310 
14311 VmaAllocator_T::~VmaAllocator_T()
14312 {
14313 #if VMA_RECORDING_ENABLED
14314  if(m_pRecorder != VMA_NULL)
14315  {
14316  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14317  vma_delete(this, m_pRecorder);
14318  }
14319 #endif
14320 
14321  VMA_ASSERT(m_Pools.empty());
14322 
14323  for(size_t i = GetMemoryTypeCount(); i--; )
14324  {
14325  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
14326  {
14327  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
14328  }
14329 
14330  vma_delete(this, m_pDedicatedAllocations[i]);
14331  vma_delete(this, m_pBlockVectors[i]);
14332  }
14333 }
14334 
14335 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14336 {
14337 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14338  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
14339  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
14340  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
14341  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
14342  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
14343  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
14344  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
14345  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
14346  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
14347  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
14348  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
14349  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
14350  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
14351  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
14352  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
14353  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
14354  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
14355 #if VMA_DEDICATED_ALLOCATION
14356  if(m_UseKhrDedicatedAllocation)
14357  {
14358  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14359  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14360  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14361  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14362  }
14363 #endif // #if VMA_DEDICATED_ALLOCATION
14364 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14365 
14366 #define VMA_COPY_IF_NOT_NULL(funcName) \
14367  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14368 
14369  if(pVulkanFunctions != VMA_NULL)
14370  {
14371  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14372  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14373  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14374  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14375  VMA_COPY_IF_NOT_NULL(vkMapMemory);
14376  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14377  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14378  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14379  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14380  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14381  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14382  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14383  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14384  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14385  VMA_COPY_IF_NOT_NULL(vkCreateImage);
14386  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14387  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14388 #if VMA_DEDICATED_ALLOCATION
14389  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14390  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14391 #endif
14392  }
14393 
14394 #undef VMA_COPY_IF_NOT_NULL
14395 
14396  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14397  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14398  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14399  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14400  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14401  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14402  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14403  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14404  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14405  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14406  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14407  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14408  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14409  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14410  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14411  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14412  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14413  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14414  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14415 #if VMA_DEDICATED_ALLOCATION
14416  if(m_UseKhrDedicatedAllocation)
14417  {
14418  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14419  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14420  }
14421 #endif
14422 }
14423 
14424 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14425 {
14426  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14427  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14428  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14429  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
14430 }
14431 
14432 VkResult VmaAllocator_T::AllocateMemoryOfType(
14433  VkDeviceSize size,
14434  VkDeviceSize alignment,
14435  bool dedicatedAllocation,
14436  VkBuffer dedicatedBuffer,
14437  VkImage dedicatedImage,
14438  const VmaAllocationCreateInfo& createInfo,
14439  uint32_t memTypeIndex,
14440  VmaSuballocationType suballocType,
14441  size_t allocationCount,
14442  VmaAllocation* pAllocations)
14443 {
14444  VMA_ASSERT(pAllocations != VMA_NULL);
14445  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
14446 
14447  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14448 
14449  // If memory type is not HOST_VISIBLE, disable MAPPED.
14450  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14451  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14452  {
14453  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14454  }
14455 
14456  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14457  VMA_ASSERT(blockVector);
14458 
14459  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14460  bool preferDedicatedMemory =
14461  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14462  dedicatedAllocation ||
14463  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14464  size > preferredBlockSize / 2;
14465 
14466  if(preferDedicatedMemory &&
14467  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14468  finalCreateInfo.pool == VK_NULL_HANDLE)
14469  {
14471  }
14472 
14473  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14474  {
14475  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14476  {
14477  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14478  }
14479  else
14480  {
14481  return AllocateDedicatedMemory(
14482  size,
14483  suballocType,
14484  memTypeIndex,
14485  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14486  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14487  finalCreateInfo.pUserData,
14488  dedicatedBuffer,
14489  dedicatedImage,
14490  allocationCount,
14491  pAllocations);
14492  }
14493  }
14494  else
14495  {
14496  VkResult res = blockVector->Allocate(
14497  m_CurrentFrameIndex.load(),
14498  size,
14499  alignment,
14500  finalCreateInfo,
14501  suballocType,
14502  allocationCount,
14503  pAllocations);
14504  if(res == VK_SUCCESS)
14505  {
14506  return res;
14507  }
14508 
14509  // 5. Try dedicated memory.
14510  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14511  {
14512  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14513  }
14514  else
14515  {
14516  res = AllocateDedicatedMemory(
14517  size,
14518  suballocType,
14519  memTypeIndex,
14520  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14521  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14522  finalCreateInfo.pUserData,
14523  dedicatedBuffer,
14524  dedicatedImage,
14525  allocationCount,
14526  pAllocations);
14527  if(res == VK_SUCCESS)
14528  {
14529  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14530  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14531  return VK_SUCCESS;
14532  }
14533  else
14534  {
14535  // Everything failed: Return error code.
14536  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14537  return res;
14538  }
14539  }
14540  }
14541 }
14542 
14543 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14544  VkDeviceSize size,
14545  VmaSuballocationType suballocType,
14546  uint32_t memTypeIndex,
14547  bool map,
14548  bool isUserDataString,
14549  void* pUserData,
14550  VkBuffer dedicatedBuffer,
14551  VkImage dedicatedImage,
14552  size_t allocationCount,
14553  VmaAllocation* pAllocations)
14554 {
14555  VMA_ASSERT(allocationCount > 0 && pAllocations);
14556 
14557  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14558  allocInfo.memoryTypeIndex = memTypeIndex;
14559  allocInfo.allocationSize = size;
14560 
14561 #if VMA_DEDICATED_ALLOCATION
14562  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14563  if(m_UseKhrDedicatedAllocation)
14564  {
14565  if(dedicatedBuffer != VK_NULL_HANDLE)
14566  {
14567  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14568  dedicatedAllocInfo.buffer = dedicatedBuffer;
14569  allocInfo.pNext = &dedicatedAllocInfo;
14570  }
14571  else if(dedicatedImage != VK_NULL_HANDLE)
14572  {
14573  dedicatedAllocInfo.image = dedicatedImage;
14574  allocInfo.pNext = &dedicatedAllocInfo;
14575  }
14576  }
14577 #endif // #if VMA_DEDICATED_ALLOCATION
14578 
14579  size_t allocIndex;
14580  VkResult res = VK_SUCCESS;
14581  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14582  {
14583  res = AllocateDedicatedMemoryPage(
14584  size,
14585  suballocType,
14586  memTypeIndex,
14587  allocInfo,
14588  map,
14589  isUserDataString,
14590  pUserData,
14591  pAllocations + allocIndex);
14592  if(res != VK_SUCCESS)
14593  {
14594  break;
14595  }
14596  }
14597 
14598  if(res == VK_SUCCESS)
14599  {
14600  // Register them in m_pDedicatedAllocations.
14601  {
14602  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14603  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14604  VMA_ASSERT(pDedicatedAllocations);
14605  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14606  {
14607  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
14608  }
14609  }
14610 
14611  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14612  }
14613  else
14614  {
14615  // Free all already created allocations.
14616  while(allocIndex--)
14617  {
14618  VmaAllocation currAlloc = pAllocations[allocIndex];
14619  VkDeviceMemory hMemory = currAlloc->GetMemory();
14620 
14621  /*
14622  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14623  before vkFreeMemory.
14624 
14625  if(currAlloc->GetMappedData() != VMA_NULL)
14626  {
14627  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14628  }
14629  */
14630 
14631  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14632 
14633  currAlloc->SetUserData(this, VMA_NULL);
14634  currAlloc->Dtor();
14635  m_AllocationObjectAllocator.Free(currAlloc);
14636  }
14637 
14638  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14639  }
14640 
14641  return res;
14642 }
14643 
14644 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14645  VkDeviceSize size,
14646  VmaSuballocationType suballocType,
14647  uint32_t memTypeIndex,
14648  const VkMemoryAllocateInfo& allocInfo,
14649  bool map,
14650  bool isUserDataString,
14651  void* pUserData,
14652  VmaAllocation* pAllocation)
14653 {
14654  VkDeviceMemory hMemory = VK_NULL_HANDLE;
14655  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14656  if(res < 0)
14657  {
14658  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14659  return res;
14660  }
14661 
14662  void* pMappedData = VMA_NULL;
14663  if(map)
14664  {
14665  res = (*m_VulkanFunctions.vkMapMemory)(
14666  m_hDevice,
14667  hMemory,
14668  0,
14669  VK_WHOLE_SIZE,
14670  0,
14671  &pMappedData);
14672  if(res < 0)
14673  {
14674  VMA_DEBUG_LOG(" vkMapMemory FAILED");
14675  FreeVulkanMemory(memTypeIndex, size, hMemory);
14676  return res;
14677  }
14678  }
14679 
14680  *pAllocation = m_AllocationObjectAllocator.Allocate();
14681  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
14682  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14683  (*pAllocation)->SetUserData(this, pUserData);
14684  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14685  {
14686  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14687  }
14688 
14689  return VK_SUCCESS;
14690 }
14691 
14692 void VmaAllocator_T::GetBufferMemoryRequirements(
14693  VkBuffer hBuffer,
14694  VkMemoryRequirements& memReq,
14695  bool& requiresDedicatedAllocation,
14696  bool& prefersDedicatedAllocation) const
14697 {
14698 #if VMA_DEDICATED_ALLOCATION
14699  if(m_UseKhrDedicatedAllocation)
14700  {
14701  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14702  memReqInfo.buffer = hBuffer;
14703 
14704  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14705 
14706  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14707  memReq2.pNext = &memDedicatedReq;
14708 
14709  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14710 
14711  memReq = memReq2.memoryRequirements;
14712  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14713  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14714  }
14715  else
14716 #endif // #if VMA_DEDICATED_ALLOCATION
14717  {
14718  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14719  requiresDedicatedAllocation = false;
14720  prefersDedicatedAllocation = false;
14721  }
14722 }
14723 
14724 void VmaAllocator_T::GetImageMemoryRequirements(
14725  VkImage hImage,
14726  VkMemoryRequirements& memReq,
14727  bool& requiresDedicatedAllocation,
14728  bool& prefersDedicatedAllocation) const
14729 {
14730 #if VMA_DEDICATED_ALLOCATION
14731  if(m_UseKhrDedicatedAllocation)
14732  {
14733  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14734  memReqInfo.image = hImage;
14735 
14736  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14737 
14738  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14739  memReq2.pNext = &memDedicatedReq;
14740 
14741  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14742 
14743  memReq = memReq2.memoryRequirements;
14744  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14745  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14746  }
14747  else
14748 #endif // #if VMA_DEDICATED_ALLOCATION
14749  {
14750  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14751  requiresDedicatedAllocation = false;
14752  prefersDedicatedAllocation = false;
14753  }
14754 }
14755 
14756 VkResult VmaAllocator_T::AllocateMemory(
14757  const VkMemoryRequirements& vkMemReq,
14758  bool requiresDedicatedAllocation,
14759  bool prefersDedicatedAllocation,
14760  VkBuffer dedicatedBuffer,
14761  VkImage dedicatedImage,
14762  const VmaAllocationCreateInfo& createInfo,
14763  VmaSuballocationType suballocType,
14764  size_t allocationCount,
14765  VmaAllocation* pAllocations)
14766 {
14767  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14768 
14769  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14770 
14771  if(vkMemReq.size == 0)
14772  {
14773  return VK_ERROR_VALIDATION_FAILED_EXT;
14774  }
14775  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14776  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14777  {
14778  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14779  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14780  }
14781  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14783  {
14784  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14785  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14786  }
14787  if(requiresDedicatedAllocation)
14788  {
14789  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14790  {
14791  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14792  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14793  }
14794  if(createInfo.pool != VK_NULL_HANDLE)
14795  {
14796  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
14797  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14798  }
14799  }
14800  if((createInfo.pool != VK_NULL_HANDLE) &&
14801  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
14802  {
14803  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14804  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14805  }
14806 
14807  if(createInfo.pool != VK_NULL_HANDLE)
14808  {
14809  const VkDeviceSize alignmentForPool = VMA_MAX(
14810  vkMemReq.alignment,
14811  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
14812 
14813  VmaAllocationCreateInfo createInfoForPool = createInfo;
14814  // If memory type is not HOST_VISIBLE, disable MAPPED.
14815  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14816  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14817  {
14818  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14819  }
14820 
14821  return createInfo.pool->m_BlockVector.Allocate(
14822  m_CurrentFrameIndex.load(),
14823  vkMemReq.size,
14824  alignmentForPool,
14825  createInfoForPool,
14826  suballocType,
14827  allocationCount,
14828  pAllocations);
14829  }
14830  else
14831  {
14832  // Bit mask of memory Vulkan types acceptable for this allocation.
14833  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14834  uint32_t memTypeIndex = UINT32_MAX;
14835  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14836  if(res == VK_SUCCESS)
14837  {
14838  VkDeviceSize alignmentForMemType = VMA_MAX(
14839  vkMemReq.alignment,
14840  GetMemoryTypeMinAlignment(memTypeIndex));
14841 
14842  res = AllocateMemoryOfType(
14843  vkMemReq.size,
14844  alignmentForMemType,
14845  requiresDedicatedAllocation || prefersDedicatedAllocation,
14846  dedicatedBuffer,
14847  dedicatedImage,
14848  createInfo,
14849  memTypeIndex,
14850  suballocType,
14851  allocationCount,
14852  pAllocations);
14853  // Succeeded on first try.
14854  if(res == VK_SUCCESS)
14855  {
14856  return res;
14857  }
14858  // Allocation from this memory type failed. Try other compatible memory types.
14859  else
14860  {
14861  for(;;)
14862  {
14863  // Remove old memTypeIndex from list of possibilities.
14864  memoryTypeBits &= ~(1u << memTypeIndex);
14865  // Find alternative memTypeIndex.
14866  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14867  if(res == VK_SUCCESS)
14868  {
14869  alignmentForMemType = VMA_MAX(
14870  vkMemReq.alignment,
14871  GetMemoryTypeMinAlignment(memTypeIndex));
14872 
14873  res = AllocateMemoryOfType(
14874  vkMemReq.size,
14875  alignmentForMemType,
14876  requiresDedicatedAllocation || prefersDedicatedAllocation,
14877  dedicatedBuffer,
14878  dedicatedImage,
14879  createInfo,
14880  memTypeIndex,
14881  suballocType,
14882  allocationCount,
14883  pAllocations);
14884  // Allocation from this alternative memory type succeeded.
14885  if(res == VK_SUCCESS)
14886  {
14887  return res;
14888  }
14889  // else: Allocation from this memory type failed. Try next one - next loop iteration.
14890  }
14891  // No other matching memory type index could be found.
14892  else
14893  {
14894  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
14895  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14896  }
14897  }
14898  }
14899  }
14900  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
14901  else
14902  return res;
14903  }
14904 }
14905 
14906 void VmaAllocator_T::FreeMemory(
14907  size_t allocationCount,
14908  const VmaAllocation* pAllocations)
14909 {
14910  VMA_ASSERT(pAllocations);
14911 
14912  for(size_t allocIndex = allocationCount; allocIndex--; )
14913  {
14914  VmaAllocation allocation = pAllocations[allocIndex];
14915 
14916  if(allocation != VK_NULL_HANDLE)
14917  {
14918  if(TouchAllocation(allocation))
14919  {
14920  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14921  {
14922  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14923  }
14924 
14925  switch(allocation->GetType())
14926  {
14927  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14928  {
14929  VmaBlockVector* pBlockVector = VMA_NULL;
14930  VmaPool hPool = allocation->GetBlock()->GetParentPool();
14931  if(hPool != VK_NULL_HANDLE)
14932  {
14933  pBlockVector = &hPool->m_BlockVector;
14934  }
14935  else
14936  {
14937  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14938  pBlockVector = m_pBlockVectors[memTypeIndex];
14939  }
14940  pBlockVector->Free(allocation);
14941  }
14942  break;
14943  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14944  FreeDedicatedMemory(allocation);
14945  break;
14946  default:
14947  VMA_ASSERT(0);
14948  }
14949  }
14950 
14951  allocation->SetUserData(this, VMA_NULL);
14952  allocation->Dtor();
14953  m_AllocationObjectAllocator.Free(allocation);
14954  }
14955  }
14956 }
14957 
14958 VkResult VmaAllocator_T::ResizeAllocation(
14959  const VmaAllocation alloc,
14960  VkDeviceSize newSize)
14961 {
14962  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14963  {
14964  return VK_ERROR_VALIDATION_FAILED_EXT;
14965  }
14966  if(newSize == alloc->GetSize())
14967  {
14968  return VK_SUCCESS;
14969  }
14970 
14971  switch(alloc->GetType())
14972  {
14973  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14974  return VK_ERROR_FEATURE_NOT_PRESENT;
14975  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14976  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
14977  {
14978  alloc->ChangeSize(newSize);
14979  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
14980  return VK_SUCCESS;
14981  }
14982  else
14983  {
14984  return VK_ERROR_OUT_OF_POOL_MEMORY;
14985  }
14986  default:
14987  VMA_ASSERT(0);
14988  return VK_ERROR_VALIDATION_FAILED_EXT;
14989  }
14990 }
14991 
14992 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
14993 {
14994  // Initialize.
14995  InitStatInfo(pStats->total);
14996  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
14997  InitStatInfo(pStats->memoryType[i]);
14998  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14999  InitStatInfo(pStats->memoryHeap[i]);
15000 
15001  // Process default pools.
15002  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15003  {
15004  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15005  VMA_ASSERT(pBlockVector);
15006  pBlockVector->AddStats(pStats);
15007  }
15008 
15009  // Process custom pools.
15010  {
15011  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15012  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15013  {
15014  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
15015  }
15016  }
15017 
15018  // Process dedicated allocations.
15019  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15020  {
15021  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15022  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15023  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15024  VMA_ASSERT(pDedicatedAllocVector);
15025  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
15026  {
15027  VmaStatInfo allocationStatInfo;
15028  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
15029  VmaAddStatInfo(pStats->total, allocationStatInfo);
15030  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
15031  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
15032  }
15033  }
15034 
15035  // Postprocess.
15036  VmaPostprocessCalcStatInfo(pStats->total);
15037  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
15038  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
15039  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
15040  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
15041 }
15042 
15043 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
15044 
15045 VkResult VmaAllocator_T::DefragmentationBegin(
15046  const VmaDefragmentationInfo2& info,
15047  VmaDefragmentationStats* pStats,
15048  VmaDefragmentationContext* pContext)
15049 {
15050  if(info.pAllocationsChanged != VMA_NULL)
15051  {
15052  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
15053  }
15054 
15055  *pContext = vma_new(this, VmaDefragmentationContext_T)(
15056  this, m_CurrentFrameIndex.load(), info.flags, pStats);
15057 
15058  (*pContext)->AddPools(info.poolCount, info.pPools);
15059  (*pContext)->AddAllocations(
15061 
15062  VkResult res = (*pContext)->Defragment(
15065  info.commandBuffer, pStats);
15066 
15067  if(res != VK_NOT_READY)
15068  {
15069  vma_delete(this, *pContext);
15070  *pContext = VMA_NULL;
15071  }
15072 
15073  return res;
15074 }
15075 
15076 VkResult VmaAllocator_T::DefragmentationEnd(
15077  VmaDefragmentationContext context)
15078 {
15079  vma_delete(this, context);
15080  return VK_SUCCESS;
15081 }
15082 
15083 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
15084 {
15085  if(hAllocation->CanBecomeLost())
15086  {
15087  /*
15088  Warning: This is a carefully designed algorithm.
15089  Do not modify unless you really know what you're doing :)
15090  */
15091  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15092  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15093  for(;;)
15094  {
15095  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15096  {
15097  pAllocationInfo->memoryType = UINT32_MAX;
15098  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
15099  pAllocationInfo->offset = 0;
15100  pAllocationInfo->size = hAllocation->GetSize();
15101  pAllocationInfo->pMappedData = VMA_NULL;
15102  pAllocationInfo->pUserData = hAllocation->GetUserData();
15103  return;
15104  }
15105  else if(localLastUseFrameIndex == localCurrFrameIndex)
15106  {
15107  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15108  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15109  pAllocationInfo->offset = hAllocation->GetOffset();
15110  pAllocationInfo->size = hAllocation->GetSize();
15111  pAllocationInfo->pMappedData = VMA_NULL;
15112  pAllocationInfo->pUserData = hAllocation->GetUserData();
15113  return;
15114  }
15115  else // Last use time earlier than current time.
15116  {
15117  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15118  {
15119  localLastUseFrameIndex = localCurrFrameIndex;
15120  }
15121  }
15122  }
15123  }
15124  else
15125  {
15126 #if VMA_STATS_STRING_ENABLED
15127  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15128  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15129  for(;;)
15130  {
15131  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15132  if(localLastUseFrameIndex == localCurrFrameIndex)
15133  {
15134  break;
15135  }
15136  else // Last use time earlier than current time.
15137  {
15138  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15139  {
15140  localLastUseFrameIndex = localCurrFrameIndex;
15141  }
15142  }
15143  }
15144 #endif
15145 
15146  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15147  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15148  pAllocationInfo->offset = hAllocation->GetOffset();
15149  pAllocationInfo->size = hAllocation->GetSize();
15150  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
15151  pAllocationInfo->pUserData = hAllocation->GetUserData();
15152  }
15153 }
15154 
15155 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
15156 {
15157  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
15158  if(hAllocation->CanBecomeLost())
15159  {
15160  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15161  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15162  for(;;)
15163  {
15164  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15165  {
15166  return false;
15167  }
15168  else if(localLastUseFrameIndex == localCurrFrameIndex)
15169  {
15170  return true;
15171  }
15172  else // Last use time earlier than current time.
15173  {
15174  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15175  {
15176  localLastUseFrameIndex = localCurrFrameIndex;
15177  }
15178  }
15179  }
15180  }
15181  else
15182  {
15183 #if VMA_STATS_STRING_ENABLED
15184  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15185  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15186  for(;;)
15187  {
15188  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15189  if(localLastUseFrameIndex == localCurrFrameIndex)
15190  {
15191  break;
15192  }
15193  else // Last use time earlier than current time.
15194  {
15195  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15196  {
15197  localLastUseFrameIndex = localCurrFrameIndex;
15198  }
15199  }
15200  }
15201 #endif
15202 
15203  return true;
15204  }
15205 }
15206 
15207 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
15208 {
15209  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
15210 
15211  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
15212 
15213  if(newCreateInfo.maxBlockCount == 0)
15214  {
15215  newCreateInfo.maxBlockCount = SIZE_MAX;
15216  }
15217  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
15218  {
15219  return VK_ERROR_INITIALIZATION_FAILED;
15220  }
15221 
15222  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
15223 
15224  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
15225 
15226  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15227  if(res != VK_SUCCESS)
15228  {
15229  vma_delete(this, *pPool);
15230  *pPool = VMA_NULL;
15231  return res;
15232  }
15233 
15234  // Add to m_Pools.
15235  {
15236  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15237  (*pPool)->SetId(m_NextPoolId++);
15238  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
15239  }
15240 
15241  return VK_SUCCESS;
15242 }
15243 
15244 void VmaAllocator_T::DestroyPool(VmaPool pool)
15245 {
15246  // Remove from m_Pools.
15247  {
15248  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15249  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15250  VMA_ASSERT(success && "Pool not found in Allocator.");
15251  }
15252 
15253  vma_delete(this, pool);
15254 }
15255 
15256 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15257 {
15258  pool->m_BlockVector.GetPoolStats(pPoolStats);
15259 }
15260 
15261 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15262 {
15263  m_CurrentFrameIndex.store(frameIndex);
15264 }
15265 
15266 void VmaAllocator_T::MakePoolAllocationsLost(
15267  VmaPool hPool,
15268  size_t* pLostAllocationCount)
15269 {
15270  hPool->m_BlockVector.MakePoolAllocationsLost(
15271  m_CurrentFrameIndex.load(),
15272  pLostAllocationCount);
15273 }
15274 
15275 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15276 {
15277  return hPool->m_BlockVector.CheckCorruption();
15278 }
15279 
15280 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15281 {
15282  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15283 
15284  // Process default pools.
15285  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15286  {
15287  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15288  {
15289  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15290  VMA_ASSERT(pBlockVector);
15291  VkResult localRes = pBlockVector->CheckCorruption();
15292  switch(localRes)
15293  {
15294  case VK_ERROR_FEATURE_NOT_PRESENT:
15295  break;
15296  case VK_SUCCESS:
15297  finalRes = VK_SUCCESS;
15298  break;
15299  default:
15300  return localRes;
15301  }
15302  }
15303  }
15304 
15305  // Process custom pools.
15306  {
15307  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15308  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15309  {
15310  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15311  {
15312  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15313  switch(localRes)
15314  {
15315  case VK_ERROR_FEATURE_NOT_PRESENT:
15316  break;
15317  case VK_SUCCESS:
15318  finalRes = VK_SUCCESS;
15319  break;
15320  default:
15321  return localRes;
15322  }
15323  }
15324  }
15325  }
15326 
15327  return finalRes;
15328 }
15329 
15330 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15331 {
15332  *pAllocation = m_AllocationObjectAllocator.Allocate();
15333  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
15334  (*pAllocation)->InitLost();
15335 }
15336 
15337 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15338 {
15339  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15340 
15341  VkResult res;
15342  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15343  {
15344  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15345  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
15346  {
15347  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15348  if(res == VK_SUCCESS)
15349  {
15350  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
15351  }
15352  }
15353  else
15354  {
15355  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
15356  }
15357  }
15358  else
15359  {
15360  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15361  }
15362 
15363  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15364  {
15365  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15366  }
15367 
15368  return res;
15369 }
15370 
15371 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15372 {
15373  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15374  {
15375  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15376  }
15377 
15378  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15379 
15380  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15381  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15382  {
15383  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15384  m_HeapSizeLimit[heapIndex] += size;
15385  }
15386 }
15387 
15388 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15389 {
15390  if(hAllocation->CanBecomeLost())
15391  {
15392  return VK_ERROR_MEMORY_MAP_FAILED;
15393  }
15394 
15395  switch(hAllocation->GetType())
15396  {
15397  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15398  {
15399  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15400  char *pBytes = VMA_NULL;
15401  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15402  if(res == VK_SUCCESS)
15403  {
15404  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15405  hAllocation->BlockAllocMap();
15406  }
15407  return res;
15408  }
15409  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15410  return hAllocation->DedicatedAllocMap(this, ppData);
15411  default:
15412  VMA_ASSERT(0);
15413  return VK_ERROR_MEMORY_MAP_FAILED;
15414  }
15415 }
15416 
15417 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15418 {
15419  switch(hAllocation->GetType())
15420  {
15421  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15422  {
15423  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15424  hAllocation->BlockAllocUnmap();
15425  pBlock->Unmap(this, 1);
15426  }
15427  break;
15428  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15429  hAllocation->DedicatedAllocUnmap(this);
15430  break;
15431  default:
15432  VMA_ASSERT(0);
15433  }
15434 }
15435 
15436 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
15437 {
15438  VkResult res = VK_SUCCESS;
15439  switch(hAllocation->GetType())
15440  {
15441  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15442  res = GetVulkanFunctions().vkBindBufferMemory(
15443  m_hDevice,
15444  hBuffer,
15445  hAllocation->GetMemory(),
15446  0); //memoryOffset
15447  break;
15448  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15449  {
15450  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15451  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
15452  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
15453  break;
15454  }
15455  default:
15456  VMA_ASSERT(0);
15457  }
15458  return res;
15459 }
15460 
15461 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
15462 {
15463  VkResult res = VK_SUCCESS;
15464  switch(hAllocation->GetType())
15465  {
15466  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15467  res = GetVulkanFunctions().vkBindImageMemory(
15468  m_hDevice,
15469  hImage,
15470  hAllocation->GetMemory(),
15471  0); //memoryOffset
15472  break;
15473  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15474  {
15475  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15476  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
15477  res = pBlock->BindImageMemory(this, hAllocation, hImage);
15478  break;
15479  }
15480  default:
15481  VMA_ASSERT(0);
15482  }
15483  return res;
15484 }
15485 
15486 void VmaAllocator_T::FlushOrInvalidateAllocation(
15487  VmaAllocation hAllocation,
15488  VkDeviceSize offset, VkDeviceSize size,
15489  VMA_CACHE_OPERATION op)
15490 {
15491  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
15492  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15493  {
15494  const VkDeviceSize allocationSize = hAllocation->GetSize();
15495  VMA_ASSERT(offset <= allocationSize);
15496 
15497  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15498 
15499  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
15500  memRange.memory = hAllocation->GetMemory();
15501 
15502  switch(hAllocation->GetType())
15503  {
15504  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15505  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15506  if(size == VK_WHOLE_SIZE)
15507  {
15508  memRange.size = allocationSize - memRange.offset;
15509  }
15510  else
15511  {
15512  VMA_ASSERT(offset + size <= allocationSize);
15513  memRange.size = VMA_MIN(
15514  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
15515  allocationSize - memRange.offset);
15516  }
15517  break;
15518 
15519  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15520  {
15521  // 1. Still within this allocation.
15522  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15523  if(size == VK_WHOLE_SIZE)
15524  {
15525  size = allocationSize - offset;
15526  }
15527  else
15528  {
15529  VMA_ASSERT(offset + size <= allocationSize);
15530  }
15531  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
15532 
15533  // 2. Adjust to whole block.
15534  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
15535  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15536  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
15537  memRange.offset += allocationOffset;
15538  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
15539 
15540  break;
15541  }
15542 
15543  default:
15544  VMA_ASSERT(0);
15545  }
15546 
15547  switch(op)
15548  {
15549  case VMA_CACHE_FLUSH:
15550  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15551  break;
15552  case VMA_CACHE_INVALIDATE:
15553  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15554  break;
15555  default:
15556  VMA_ASSERT(0);
15557  }
15558  }
15559  // else: Just ignore this call.
15560 }
15561 
15562 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
15563 {
15564  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15565 
15566  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15567  {
15568  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15569  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15570  VMA_ASSERT(pDedicatedAllocations);
15571  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
15572  VMA_ASSERT(success);
15573  }
15574 
15575  VkDeviceMemory hMemory = allocation->GetMemory();
15576 
15577  /*
15578  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15579  before vkFreeMemory.
15580 
15581  if(allocation->GetMappedData() != VMA_NULL)
15582  {
15583  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15584  }
15585  */
15586 
15587  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15588 
15589  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15590 }
15591 
15592 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
15593 {
15594  VkBufferCreateInfo dummyBufCreateInfo;
15595  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
15596 
15597  uint32_t memoryTypeBits = 0;
15598 
15599  // Create buffer.
15600  VkBuffer buf = VK_NULL_HANDLE;
15601  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
15602  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
15603  if(res == VK_SUCCESS)
15604  {
15605  // Query for supported memory types.
15606  VkMemoryRequirements memReq;
15607  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
15608  memoryTypeBits = memReq.memoryTypeBits;
15609 
15610  // Destroy buffer.
15611  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
15612  }
15613 
15614  return memoryTypeBits;
15615 }
15616 
15617 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15618 {
15619  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15620  !hAllocation->CanBecomeLost() &&
15621  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15622  {
15623  void* pData = VMA_NULL;
15624  VkResult res = Map(hAllocation, &pData);
15625  if(res == VK_SUCCESS)
15626  {
15627  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15628  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15629  Unmap(hAllocation);
15630  }
15631  else
15632  {
15633  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15634  }
15635  }
15636 }
15637 
15638 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
15639 {
15640  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
15641  if(memoryTypeBits == UINT32_MAX)
15642  {
15643  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
15644  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
15645  }
15646  return memoryTypeBits;
15647 }
15648 
15649 #if VMA_STATS_STRING_ENABLED
15650 
15651 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15652 {
15653  bool dedicatedAllocationsStarted = false;
15654  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15655  {
15656  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15657  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15658  VMA_ASSERT(pDedicatedAllocVector);
15659  if(pDedicatedAllocVector->empty() == false)
15660  {
15661  if(dedicatedAllocationsStarted == false)
15662  {
15663  dedicatedAllocationsStarted = true;
15664  json.WriteString("DedicatedAllocations");
15665  json.BeginObject();
15666  }
15667 
15668  json.BeginString("Type ");
15669  json.ContinueString(memTypeIndex);
15670  json.EndString();
15671 
15672  json.BeginArray();
15673 
15674  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15675  {
15676  json.BeginObject(true);
15677  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
15678  hAlloc->PrintParameters(json);
15679  json.EndObject();
15680  }
15681 
15682  json.EndArray();
15683  }
15684  }
15685  if(dedicatedAllocationsStarted)
15686  {
15687  json.EndObject();
15688  }
15689 
15690  {
15691  bool allocationsStarted = false;
15692  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15693  {
15694  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
15695  {
15696  if(allocationsStarted == false)
15697  {
15698  allocationsStarted = true;
15699  json.WriteString("DefaultPools");
15700  json.BeginObject();
15701  }
15702 
15703  json.BeginString("Type ");
15704  json.ContinueString(memTypeIndex);
15705  json.EndString();
15706 
15707  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15708  }
15709  }
15710  if(allocationsStarted)
15711  {
15712  json.EndObject();
15713  }
15714  }
15715 
15716  // Custom pools
15717  {
15718  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15719  const size_t poolCount = m_Pools.size();
15720  if(poolCount > 0)
15721  {
15722  json.WriteString("Pools");
15723  json.BeginObject();
15724  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15725  {
15726  json.BeginString();
15727  json.ContinueString(m_Pools[poolIndex]->GetId());
15728  json.EndString();
15729 
15730  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15731  }
15732  json.EndObject();
15733  }
15734  }
15735 }
15736 
15737 #endif // #if VMA_STATS_STRING_ENABLED
15738 
15740 // Public interface
15741 
15742 VkResult vmaCreateAllocator(
15743  const VmaAllocatorCreateInfo* pCreateInfo,
15744  VmaAllocator* pAllocator)
15745 {
15746  VMA_ASSERT(pCreateInfo && pAllocator);
15747  VMA_DEBUG_LOG("vmaCreateAllocator");
15748  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
15749  return (*pAllocator)->Init(pCreateInfo);
15750 }
15751 
15752 void vmaDestroyAllocator(
15753  VmaAllocator allocator)
15754 {
15755  if(allocator != VK_NULL_HANDLE)
15756  {
15757  VMA_DEBUG_LOG("vmaDestroyAllocator");
15758  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15759  vma_delete(&allocationCallbacks, allocator);
15760  }
15761 }
15762 
15764  VmaAllocator allocator,
15765  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15766 {
15767  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15768  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15769 }
15770 
15772  VmaAllocator allocator,
15773  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15774 {
15775  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15776  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15777 }
15778 
15780  VmaAllocator allocator,
15781  uint32_t memoryTypeIndex,
15782  VkMemoryPropertyFlags* pFlags)
15783 {
15784  VMA_ASSERT(allocator && pFlags);
15785  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15786  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15787 }
15788 
15790  VmaAllocator allocator,
15791  uint32_t frameIndex)
15792 {
15793  VMA_ASSERT(allocator);
15794  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15795 
15796  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15797 
15798  allocator->SetCurrentFrameIndex(frameIndex);
15799 }
15800 
15801 void vmaCalculateStats(
15802  VmaAllocator allocator,
15803  VmaStats* pStats)
15804 {
15805  VMA_ASSERT(allocator && pStats);
15806  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15807  allocator->CalculateStats(pStats);
15808 }
15809 
15810 #if VMA_STATS_STRING_ENABLED
15811 
15812 void vmaBuildStatsString(
15813  VmaAllocator allocator,
15814  char** ppStatsString,
15815  VkBool32 detailedMap)
15816 {
15817  VMA_ASSERT(allocator && ppStatsString);
15818  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15819 
15820  VmaStringBuilder sb(allocator);
15821  {
15822  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15823  json.BeginObject();
15824 
15825  VmaStats stats;
15826  allocator->CalculateStats(&stats);
15827 
15828  json.WriteString("Total");
15829  VmaPrintStatInfo(json, stats.total);
15830 
15831  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15832  {
15833  json.BeginString("Heap ");
15834  json.ContinueString(heapIndex);
15835  json.EndString();
15836  json.BeginObject();
15837 
15838  json.WriteString("Size");
15839  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15840 
15841  json.WriteString("Flags");
15842  json.BeginArray(true);
15843  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15844  {
15845  json.WriteString("DEVICE_LOCAL");
15846  }
15847  json.EndArray();
15848 
15849  if(stats.memoryHeap[heapIndex].blockCount > 0)
15850  {
15851  json.WriteString("Stats");
15852  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
15853  }
15854 
15855  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15856  {
15857  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15858  {
15859  json.BeginString("Type ");
15860  json.ContinueString(typeIndex);
15861  json.EndString();
15862 
15863  json.BeginObject();
15864 
15865  json.WriteString("Flags");
15866  json.BeginArray(true);
15867  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15868  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15869  {
15870  json.WriteString("DEVICE_LOCAL");
15871  }
15872  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15873  {
15874  json.WriteString("HOST_VISIBLE");
15875  }
15876  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15877  {
15878  json.WriteString("HOST_COHERENT");
15879  }
15880  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15881  {
15882  json.WriteString("HOST_CACHED");
15883  }
15884  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15885  {
15886  json.WriteString("LAZILY_ALLOCATED");
15887  }
15888  json.EndArray();
15889 
15890  if(stats.memoryType[typeIndex].blockCount > 0)
15891  {
15892  json.WriteString("Stats");
15893  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
15894  }
15895 
15896  json.EndObject();
15897  }
15898  }
15899 
15900  json.EndObject();
15901  }
15902  if(detailedMap == VK_TRUE)
15903  {
15904  allocator->PrintDetailedMap(json);
15905  }
15906 
15907  json.EndObject();
15908  }
15909 
15910  const size_t len = sb.GetLength();
15911  char* const pChars = vma_new_array(allocator, char, len + 1);
15912  if(len > 0)
15913  {
15914  memcpy(pChars, sb.GetData(), len);
15915  }
15916  pChars[len] = '\0';
15917  *ppStatsString = pChars;
15918 }
15919 
15920 void vmaFreeStatsString(
15921  VmaAllocator allocator,
15922  char* pStatsString)
15923 {
15924  if(pStatsString != VMA_NULL)
15925  {
15926  VMA_ASSERT(allocator);
15927  size_t len = strlen(pStatsString);
15928  vma_delete_array(allocator, pStatsString, len + 1);
15929  }
15930 }
15931 
15932 #endif // #if VMA_STATS_STRING_ENABLED
15933 
15934 /*
15935 This function is not protected by any mutex because it just reads immutable data.
15936 */
15937 VkResult vmaFindMemoryTypeIndex(
15938  VmaAllocator allocator,
15939  uint32_t memoryTypeBits,
15940  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15941  uint32_t* pMemoryTypeIndex)
15942 {
15943  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15944  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15945  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15946 
15947  if(pAllocationCreateInfo->memoryTypeBits != 0)
15948  {
15949  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
15950  }
15951 
15952  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
15953  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
15954 
15955  // Convert usage to requiredFlags and preferredFlags.
15956  switch(pAllocationCreateInfo->usage)
15957  {
15959  break;
15961  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15962  {
15963  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15964  }
15965  break;
15967  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15968  break;
15970  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15971  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15972  {
15973  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15974  }
15975  break;
15977  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15978  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
15979  break;
15980  default:
15981  break;
15982  }
15983 
15984  *pMemoryTypeIndex = UINT32_MAX;
15985  uint32_t minCost = UINT32_MAX;
15986  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
15987  memTypeIndex < allocator->GetMemoryTypeCount();
15988  ++memTypeIndex, memTypeBit <<= 1)
15989  {
15990  // This memory type is acceptable according to memoryTypeBits bitmask.
15991  if((memTypeBit & memoryTypeBits) != 0)
15992  {
15993  const VkMemoryPropertyFlags currFlags =
15994  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
15995  // This memory type contains requiredFlags.
15996  if((requiredFlags & ~currFlags) == 0)
15997  {
15998  // Calculate cost as number of bits from preferredFlags not present in this memory type.
15999  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
16000  // Remember memory type with lowest cost.
16001  if(currCost < minCost)
16002  {
16003  *pMemoryTypeIndex = memTypeIndex;
16004  if(currCost == 0)
16005  {
16006  return VK_SUCCESS;
16007  }
16008  minCost = currCost;
16009  }
16010  }
16011  }
16012  }
16013  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
16014 }
16015 
16017  VmaAllocator allocator,
16018  const VkBufferCreateInfo* pBufferCreateInfo,
16019  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16020  uint32_t* pMemoryTypeIndex)
16021 {
16022  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16023  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
16024  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16025  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16026 
16027  const VkDevice hDev = allocator->m_hDevice;
16028  VkBuffer hBuffer = VK_NULL_HANDLE;
16029  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
16030  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
16031  if(res == VK_SUCCESS)
16032  {
16033  VkMemoryRequirements memReq = {};
16034  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
16035  hDev, hBuffer, &memReq);
16036 
16037  res = vmaFindMemoryTypeIndex(
16038  allocator,
16039  memReq.memoryTypeBits,
16040  pAllocationCreateInfo,
16041  pMemoryTypeIndex);
16042 
16043  allocator->GetVulkanFunctions().vkDestroyBuffer(
16044  hDev, hBuffer, allocator->GetAllocationCallbacks());
16045  }
16046  return res;
16047 }
16048 
16050  VmaAllocator allocator,
16051  const VkImageCreateInfo* pImageCreateInfo,
16052  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16053  uint32_t* pMemoryTypeIndex)
16054 {
16055  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16056  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
16057  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16058  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16059 
16060  const VkDevice hDev = allocator->m_hDevice;
16061  VkImage hImage = VK_NULL_HANDLE;
16062  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
16063  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
16064  if(res == VK_SUCCESS)
16065  {
16066  VkMemoryRequirements memReq = {};
16067  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
16068  hDev, hImage, &memReq);
16069 
16070  res = vmaFindMemoryTypeIndex(
16071  allocator,
16072  memReq.memoryTypeBits,
16073  pAllocationCreateInfo,
16074  pMemoryTypeIndex);
16075 
16076  allocator->GetVulkanFunctions().vkDestroyImage(
16077  hDev, hImage, allocator->GetAllocationCallbacks());
16078  }
16079  return res;
16080 }
16081 
16082 VkResult vmaCreatePool(
16083  VmaAllocator allocator,
16084  const VmaPoolCreateInfo* pCreateInfo,
16085  VmaPool* pPool)
16086 {
16087  VMA_ASSERT(allocator && pCreateInfo && pPool);
16088 
16089  VMA_DEBUG_LOG("vmaCreatePool");
16090 
16091  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16092 
16093  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
16094 
16095 #if VMA_RECORDING_ENABLED
16096  if(allocator->GetRecorder() != VMA_NULL)
16097  {
16098  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
16099  }
16100 #endif
16101 
16102  return res;
16103 }
16104 
16105 void vmaDestroyPool(
16106  VmaAllocator allocator,
16107  VmaPool pool)
16108 {
16109  VMA_ASSERT(allocator);
16110 
16111  if(pool == VK_NULL_HANDLE)
16112  {
16113  return;
16114  }
16115 
16116  VMA_DEBUG_LOG("vmaDestroyPool");
16117 
16118  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16119 
16120 #if VMA_RECORDING_ENABLED
16121  if(allocator->GetRecorder() != VMA_NULL)
16122  {
16123  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
16124  }
16125 #endif
16126 
16127  allocator->DestroyPool(pool);
16128 }
16129 
16130 void vmaGetPoolStats(
16131  VmaAllocator allocator,
16132  VmaPool pool,
16133  VmaPoolStats* pPoolStats)
16134 {
16135  VMA_ASSERT(allocator && pool && pPoolStats);
16136 
16137  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16138 
16139  allocator->GetPoolStats(pool, pPoolStats);
16140 }
16141 
16143  VmaAllocator allocator,
16144  VmaPool pool,
16145  size_t* pLostAllocationCount)
16146 {
16147  VMA_ASSERT(allocator && pool);
16148 
16149  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16150 
16151 #if VMA_RECORDING_ENABLED
16152  if(allocator->GetRecorder() != VMA_NULL)
16153  {
16154  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
16155  }
16156 #endif
16157 
16158  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
16159 }
16160 
16161 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
16162 {
16163  VMA_ASSERT(allocator && pool);
16164 
16165  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16166 
16167  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
16168 
16169  return allocator->CheckPoolCorruption(pool);
16170 }
16171 
16172 VkResult vmaAllocateMemory(
16173  VmaAllocator allocator,
16174  const VkMemoryRequirements* pVkMemoryRequirements,
16175  const VmaAllocationCreateInfo* pCreateInfo,
16176  VmaAllocation* pAllocation,
16177  VmaAllocationInfo* pAllocationInfo)
16178 {
16179  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
16180 
16181  VMA_DEBUG_LOG("vmaAllocateMemory");
16182 
16183  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16184 
16185  VkResult result = allocator->AllocateMemory(
16186  *pVkMemoryRequirements,
16187  false, // requiresDedicatedAllocation
16188  false, // prefersDedicatedAllocation
16189  VK_NULL_HANDLE, // dedicatedBuffer
16190  VK_NULL_HANDLE, // dedicatedImage
16191  *pCreateInfo,
16192  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16193  1, // allocationCount
16194  pAllocation);
16195 
16196 #if VMA_RECORDING_ENABLED
16197  if(allocator->GetRecorder() != VMA_NULL)
16198  {
16199  allocator->GetRecorder()->RecordAllocateMemory(
16200  allocator->GetCurrentFrameIndex(),
16201  *pVkMemoryRequirements,
16202  *pCreateInfo,
16203  *pAllocation);
16204  }
16205 #endif
16206 
16207  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16208  {
16209  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16210  }
16211 
16212  return result;
16213 }
16214 
16215 VkResult vmaAllocateMemoryPages(
16216  VmaAllocator allocator,
16217  const VkMemoryRequirements* pVkMemoryRequirements,
16218  const VmaAllocationCreateInfo* pCreateInfo,
16219  size_t allocationCount,
16220  VmaAllocation* pAllocations,
16221  VmaAllocationInfo* pAllocationInfo)
16222 {
16223  if(allocationCount == 0)
16224  {
16225  return VK_SUCCESS;
16226  }
16227 
16228  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
16229 
16230  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
16231 
16232  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16233 
16234  VkResult result = allocator->AllocateMemory(
16235  *pVkMemoryRequirements,
16236  false, // requiresDedicatedAllocation
16237  false, // prefersDedicatedAllocation
16238  VK_NULL_HANDLE, // dedicatedBuffer
16239  VK_NULL_HANDLE, // dedicatedImage
16240  *pCreateInfo,
16241  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16242  allocationCount,
16243  pAllocations);
16244 
16245 #if VMA_RECORDING_ENABLED
16246  if(allocator->GetRecorder() != VMA_NULL)
16247  {
16248  allocator->GetRecorder()->RecordAllocateMemoryPages(
16249  allocator->GetCurrentFrameIndex(),
16250  *pVkMemoryRequirements,
16251  *pCreateInfo,
16252  (uint64_t)allocationCount,
16253  pAllocations);
16254  }
16255 #endif
16256 
16257  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16258  {
16259  for(size_t i = 0; i < allocationCount; ++i)
16260  {
16261  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16262  }
16263  }
16264 
16265  return result;
16266 }
16267 
16269  VmaAllocator allocator,
16270  VkBuffer buffer,
16271  const VmaAllocationCreateInfo* pCreateInfo,
16272  VmaAllocation* pAllocation,
16273  VmaAllocationInfo* pAllocationInfo)
16274 {
16275  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16276 
16277  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16278 
16279  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16280 
16281  VkMemoryRequirements vkMemReq = {};
16282  bool requiresDedicatedAllocation = false;
16283  bool prefersDedicatedAllocation = false;
16284  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16285  requiresDedicatedAllocation,
16286  prefersDedicatedAllocation);
16287 
16288  VkResult result = allocator->AllocateMemory(
16289  vkMemReq,
16290  requiresDedicatedAllocation,
16291  prefersDedicatedAllocation,
16292  buffer, // dedicatedBuffer
16293  VK_NULL_HANDLE, // dedicatedImage
16294  *pCreateInfo,
16295  VMA_SUBALLOCATION_TYPE_BUFFER,
16296  1, // allocationCount
16297  pAllocation);
16298 
16299 #if VMA_RECORDING_ENABLED
16300  if(allocator->GetRecorder() != VMA_NULL)
16301  {
16302  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16303  allocator->GetCurrentFrameIndex(),
16304  vkMemReq,
16305  requiresDedicatedAllocation,
16306  prefersDedicatedAllocation,
16307  *pCreateInfo,
16308  *pAllocation);
16309  }
16310 #endif
16311 
16312  if(pAllocationInfo && result == VK_SUCCESS)
16313  {
16314  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16315  }
16316 
16317  return result;
16318 }
16319 
16320 VkResult vmaAllocateMemoryForImage(
16321  VmaAllocator allocator,
16322  VkImage image,
16323  const VmaAllocationCreateInfo* pCreateInfo,
16324  VmaAllocation* pAllocation,
16325  VmaAllocationInfo* pAllocationInfo)
16326 {
16327  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16328 
16329  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
16330 
16331  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16332 
16333  VkMemoryRequirements vkMemReq = {};
16334  bool requiresDedicatedAllocation = false;
16335  bool prefersDedicatedAllocation = false;
16336  allocator->GetImageMemoryRequirements(image, vkMemReq,
16337  requiresDedicatedAllocation, prefersDedicatedAllocation);
16338 
16339  VkResult result = allocator->AllocateMemory(
16340  vkMemReq,
16341  requiresDedicatedAllocation,
16342  prefersDedicatedAllocation,
16343  VK_NULL_HANDLE, // dedicatedBuffer
16344  image, // dedicatedImage
16345  *pCreateInfo,
16346  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16347  1, // allocationCount
16348  pAllocation);
16349 
16350 #if VMA_RECORDING_ENABLED
16351  if(allocator->GetRecorder() != VMA_NULL)
16352  {
16353  allocator->GetRecorder()->RecordAllocateMemoryForImage(
16354  allocator->GetCurrentFrameIndex(),
16355  vkMemReq,
16356  requiresDedicatedAllocation,
16357  prefersDedicatedAllocation,
16358  *pCreateInfo,
16359  *pAllocation);
16360  }
16361 #endif
16362 
16363  if(pAllocationInfo && result == VK_SUCCESS)
16364  {
16365  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16366  }
16367 
16368  return result;
16369 }
16370 
16371 void vmaFreeMemory(
16372  VmaAllocator allocator,
16373  VmaAllocation allocation)
16374 {
16375  VMA_ASSERT(allocator);
16376 
16377  if(allocation == VK_NULL_HANDLE)
16378  {
16379  return;
16380  }
16381 
16382  VMA_DEBUG_LOG("vmaFreeMemory");
16383 
16384  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16385 
16386 #if VMA_RECORDING_ENABLED
16387  if(allocator->GetRecorder() != VMA_NULL)
16388  {
16389  allocator->GetRecorder()->RecordFreeMemory(
16390  allocator->GetCurrentFrameIndex(),
16391  allocation);
16392  }
16393 #endif
16394 
16395  allocator->FreeMemory(
16396  1, // allocationCount
16397  &allocation);
16398 }
16399 
16400 void vmaFreeMemoryPages(
16401  VmaAllocator allocator,
16402  size_t allocationCount,
16403  VmaAllocation* pAllocations)
16404 {
16405  if(allocationCount == 0)
16406  {
16407  return;
16408  }
16409 
16410  VMA_ASSERT(allocator);
16411 
16412  VMA_DEBUG_LOG("vmaFreeMemoryPages");
16413 
16414  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16415 
16416 #if VMA_RECORDING_ENABLED
16417  if(allocator->GetRecorder() != VMA_NULL)
16418  {
16419  allocator->GetRecorder()->RecordFreeMemoryPages(
16420  allocator->GetCurrentFrameIndex(),
16421  (uint64_t)allocationCount,
16422  pAllocations);
16423  }
16424 #endif
16425 
16426  allocator->FreeMemory(allocationCount, pAllocations);
16427 }
16428 
16429 VkResult vmaResizeAllocation(
16430  VmaAllocator allocator,
16431  VmaAllocation allocation,
16432  VkDeviceSize newSize)
16433 {
16434  VMA_ASSERT(allocator && allocation);
16435 
16436  VMA_DEBUG_LOG("vmaResizeAllocation");
16437 
16438  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16439 
16440 #if VMA_RECORDING_ENABLED
16441  if(allocator->GetRecorder() != VMA_NULL)
16442  {
16443  allocator->GetRecorder()->RecordResizeAllocation(
16444  allocator->GetCurrentFrameIndex(),
16445  allocation,
16446  newSize);
16447  }
16448 #endif
16449 
16450  return allocator->ResizeAllocation(allocation, newSize);
16451 }
16452 
16454  VmaAllocator allocator,
16455  VmaAllocation allocation,
16456  VmaAllocationInfo* pAllocationInfo)
16457 {
16458  VMA_ASSERT(allocator && allocation && pAllocationInfo);
16459 
16460  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16461 
16462 #if VMA_RECORDING_ENABLED
16463  if(allocator->GetRecorder() != VMA_NULL)
16464  {
16465  allocator->GetRecorder()->RecordGetAllocationInfo(
16466  allocator->GetCurrentFrameIndex(),
16467  allocation);
16468  }
16469 #endif
16470 
16471  allocator->GetAllocationInfo(allocation, pAllocationInfo);
16472 }
16473 
16474 VkBool32 vmaTouchAllocation(
16475  VmaAllocator allocator,
16476  VmaAllocation allocation)
16477 {
16478  VMA_ASSERT(allocator && allocation);
16479 
16480  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16481 
16482 #if VMA_RECORDING_ENABLED
16483  if(allocator->GetRecorder() != VMA_NULL)
16484  {
16485  allocator->GetRecorder()->RecordTouchAllocation(
16486  allocator->GetCurrentFrameIndex(),
16487  allocation);
16488  }
16489 #endif
16490 
16491  return allocator->TouchAllocation(allocation);
16492 }
16493 
16495  VmaAllocator allocator,
16496  VmaAllocation allocation,
16497  void* pUserData)
16498 {
16499  VMA_ASSERT(allocator && allocation);
16500 
16501  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16502 
16503  allocation->SetUserData(allocator, pUserData);
16504 
16505 #if VMA_RECORDING_ENABLED
16506  if(allocator->GetRecorder() != VMA_NULL)
16507  {
16508  allocator->GetRecorder()->RecordSetAllocationUserData(
16509  allocator->GetCurrentFrameIndex(),
16510  allocation,
16511  pUserData);
16512  }
16513 #endif
16514 }
16515 
16517  VmaAllocator allocator,
16518  VmaAllocation* pAllocation)
16519 {
16520  VMA_ASSERT(allocator && pAllocation);
16521 
16522  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
16523 
16524  allocator->CreateLostAllocation(pAllocation);
16525 
16526 #if VMA_RECORDING_ENABLED
16527  if(allocator->GetRecorder() != VMA_NULL)
16528  {
16529  allocator->GetRecorder()->RecordCreateLostAllocation(
16530  allocator->GetCurrentFrameIndex(),
16531  *pAllocation);
16532  }
16533 #endif
16534 }
16535 
16536 VkResult vmaMapMemory(
16537  VmaAllocator allocator,
16538  VmaAllocation allocation,
16539  void** ppData)
16540 {
16541  VMA_ASSERT(allocator && allocation && ppData);
16542 
16543  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16544 
16545  VkResult res = allocator->Map(allocation, ppData);
16546 
16547 #if VMA_RECORDING_ENABLED
16548  if(allocator->GetRecorder() != VMA_NULL)
16549  {
16550  allocator->GetRecorder()->RecordMapMemory(
16551  allocator->GetCurrentFrameIndex(),
16552  allocation);
16553  }
16554 #endif
16555 
16556  return res;
16557 }
16558 
16559 void vmaUnmapMemory(
16560  VmaAllocator allocator,
16561  VmaAllocation allocation)
16562 {
16563  VMA_ASSERT(allocator && allocation);
16564 
16565  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16566 
16567 #if VMA_RECORDING_ENABLED
16568  if(allocator->GetRecorder() != VMA_NULL)
16569  {
16570  allocator->GetRecorder()->RecordUnmapMemory(
16571  allocator->GetCurrentFrameIndex(),
16572  allocation);
16573  }
16574 #endif
16575 
16576  allocator->Unmap(allocation);
16577 }
16578 
16579 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16580 {
16581  VMA_ASSERT(allocator && allocation);
16582 
16583  VMA_DEBUG_LOG("vmaFlushAllocation");
16584 
16585  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16586 
16587  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
16588 
16589 #if VMA_RECORDING_ENABLED
16590  if(allocator->GetRecorder() != VMA_NULL)
16591  {
16592  allocator->GetRecorder()->RecordFlushAllocation(
16593  allocator->GetCurrentFrameIndex(),
16594  allocation, offset, size);
16595  }
16596 #endif
16597 }
16598 
16599 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16600 {
16601  VMA_ASSERT(allocator && allocation);
16602 
16603  VMA_DEBUG_LOG("vmaInvalidateAllocation");
16604 
16605  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16606 
16607  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
16608 
16609 #if VMA_RECORDING_ENABLED
16610  if(allocator->GetRecorder() != VMA_NULL)
16611  {
16612  allocator->GetRecorder()->RecordInvalidateAllocation(
16613  allocator->GetCurrentFrameIndex(),
16614  allocation, offset, size);
16615  }
16616 #endif
16617 }
16618 
16619 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
16620 {
16621  VMA_ASSERT(allocator);
16622 
16623  VMA_DEBUG_LOG("vmaCheckCorruption");
16624 
16625  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16626 
16627  return allocator->CheckCorruption(memoryTypeBits);
16628 }
16629 
16630 VkResult vmaDefragment(
16631  VmaAllocator allocator,
16632  VmaAllocation* pAllocations,
16633  size_t allocationCount,
16634  VkBool32* pAllocationsChanged,
16635  const VmaDefragmentationInfo *pDefragmentationInfo,
16636  VmaDefragmentationStats* pDefragmentationStats)
16637 {
16638  // Deprecated interface, reimplemented using new one.
16639 
16640  VmaDefragmentationInfo2 info2 = {};
16641  info2.allocationCount = (uint32_t)allocationCount;
16642  info2.pAllocations = pAllocations;
16643  info2.pAllocationsChanged = pAllocationsChanged;
16644  if(pDefragmentationInfo != VMA_NULL)
16645  {
16646  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
16647  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
16648  }
16649  else
16650  {
16651  info2.maxCpuAllocationsToMove = UINT32_MAX;
16652  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
16653  }
16654  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
16655 
16657  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
16658  if(res == VK_NOT_READY)
16659  {
16660  res = vmaDefragmentationEnd( allocator, ctx);
16661  }
16662  return res;
16663 }
16664 
16665 VkResult vmaDefragmentationBegin(
16666  VmaAllocator allocator,
16667  const VmaDefragmentationInfo2* pInfo,
16668  VmaDefragmentationStats* pStats,
16669  VmaDefragmentationContext *pContext)
16670 {
16671  VMA_ASSERT(allocator && pInfo && pContext);
16672 
16673  // Degenerate case: Nothing to defragment.
16674  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
16675  {
16676  return VK_SUCCESS;
16677  }
16678 
16679  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
16680  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
16681  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
16682  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
16683 
16684  VMA_DEBUG_LOG("vmaDefragmentationBegin");
16685 
16686  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16687 
16688  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
16689 
16690 #if VMA_RECORDING_ENABLED
16691  if(allocator->GetRecorder() != VMA_NULL)
16692  {
16693  allocator->GetRecorder()->RecordDefragmentationBegin(
16694  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
16695  }
16696 #endif
16697 
16698  return res;
16699 }
16700 
16701 VkResult vmaDefragmentationEnd(
16702  VmaAllocator allocator,
16703  VmaDefragmentationContext context)
16704 {
16705  VMA_ASSERT(allocator);
16706 
16707  VMA_DEBUG_LOG("vmaDefragmentationEnd");
16708 
16709  if(context != VK_NULL_HANDLE)
16710  {
16711  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16712 
16713 #if VMA_RECORDING_ENABLED
16714  if(allocator->GetRecorder() != VMA_NULL)
16715  {
16716  allocator->GetRecorder()->RecordDefragmentationEnd(
16717  allocator->GetCurrentFrameIndex(), context);
16718  }
16719 #endif
16720 
16721  return allocator->DefragmentationEnd(context);
16722  }
16723  else
16724  {
16725  return VK_SUCCESS;
16726  }
16727 }
16728 
16729 VkResult vmaBindBufferMemory(
16730  VmaAllocator allocator,
16731  VmaAllocation allocation,
16732  VkBuffer buffer)
16733 {
16734  VMA_ASSERT(allocator && allocation && buffer);
16735 
16736  VMA_DEBUG_LOG("vmaBindBufferMemory");
16737 
16738  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16739 
16740  return allocator->BindBufferMemory(allocation, buffer);
16741 }
16742 
16743 VkResult vmaBindImageMemory(
16744  VmaAllocator allocator,
16745  VmaAllocation allocation,
16746  VkImage image)
16747 {
16748  VMA_ASSERT(allocator && allocation && image);
16749 
16750  VMA_DEBUG_LOG("vmaBindImageMemory");
16751 
16752  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16753 
16754  return allocator->BindImageMemory(allocation, image);
16755 }
16756 
16757 VkResult vmaCreateBuffer(
16758  VmaAllocator allocator,
16759  const VkBufferCreateInfo* pBufferCreateInfo,
16760  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16761  VkBuffer* pBuffer,
16762  VmaAllocation* pAllocation,
16763  VmaAllocationInfo* pAllocationInfo)
16764 {
16765  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16766 
16767  if(pBufferCreateInfo->size == 0)
16768  {
16769  return VK_ERROR_VALIDATION_FAILED_EXT;
16770  }
16771 
16772  VMA_DEBUG_LOG("vmaCreateBuffer");
16773 
16774  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16775 
16776  *pBuffer = VK_NULL_HANDLE;
16777  *pAllocation = VK_NULL_HANDLE;
16778 
16779  // 1. Create VkBuffer.
16780  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16781  allocator->m_hDevice,
16782  pBufferCreateInfo,
16783  allocator->GetAllocationCallbacks(),
16784  pBuffer);
16785  if(res >= 0)
16786  {
16787  // 2. vkGetBufferMemoryRequirements.
16788  VkMemoryRequirements vkMemReq = {};
16789  bool requiresDedicatedAllocation = false;
16790  bool prefersDedicatedAllocation = false;
16791  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16792  requiresDedicatedAllocation, prefersDedicatedAllocation);
16793 
16794  // Make sure alignment requirements for specific buffer usages reported
16795  // in Physical Device Properties are included in alignment reported by memory requirements.
16796  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16797  {
16798  VMA_ASSERT(vkMemReq.alignment %
16799  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16800  }
16801  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16802  {
16803  VMA_ASSERT(vkMemReq.alignment %
16804  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16805  }
16806  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16807  {
16808  VMA_ASSERT(vkMemReq.alignment %
16809  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16810  }
16811 
16812  // 3. Allocate memory using allocator.
16813  res = allocator->AllocateMemory(
16814  vkMemReq,
16815  requiresDedicatedAllocation,
16816  prefersDedicatedAllocation,
16817  *pBuffer, // dedicatedBuffer
16818  VK_NULL_HANDLE, // dedicatedImage
16819  *pAllocationCreateInfo,
16820  VMA_SUBALLOCATION_TYPE_BUFFER,
16821  1, // allocationCount
16822  pAllocation);
16823 
16824 #if VMA_RECORDING_ENABLED
16825  if(allocator->GetRecorder() != VMA_NULL)
16826  {
16827  allocator->GetRecorder()->RecordCreateBuffer(
16828  allocator->GetCurrentFrameIndex(),
16829  *pBufferCreateInfo,
16830  *pAllocationCreateInfo,
16831  *pAllocation);
16832  }
16833 #endif
16834 
16835  if(res >= 0)
16836  {
16837  // 3. Bind buffer with memory.
16838  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
16839  {
16840  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
16841  }
16842  if(res >= 0)
16843  {
16844  // All steps succeeded.
16845  #if VMA_STATS_STRING_ENABLED
16846  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16847  #endif
16848  if(pAllocationInfo != VMA_NULL)
16849  {
16850  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16851  }
16852 
16853  return VK_SUCCESS;
16854  }
16855  allocator->FreeMemory(
16856  1, // allocationCount
16857  pAllocation);
16858  *pAllocation = VK_NULL_HANDLE;
16859  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16860  *pBuffer = VK_NULL_HANDLE;
16861  return res;
16862  }
16863  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16864  *pBuffer = VK_NULL_HANDLE;
16865  return res;
16866  }
16867  return res;
16868 }
16869 
16870 void vmaDestroyBuffer(
16871  VmaAllocator allocator,
16872  VkBuffer buffer,
16873  VmaAllocation allocation)
16874 {
16875  VMA_ASSERT(allocator);
16876 
16877  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16878  {
16879  return;
16880  }
16881 
16882  VMA_DEBUG_LOG("vmaDestroyBuffer");
16883 
16884  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16885 
16886 #if VMA_RECORDING_ENABLED
16887  if(allocator->GetRecorder() != VMA_NULL)
16888  {
16889  allocator->GetRecorder()->RecordDestroyBuffer(
16890  allocator->GetCurrentFrameIndex(),
16891  allocation);
16892  }
16893 #endif
16894 
16895  if(buffer != VK_NULL_HANDLE)
16896  {
16897  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16898  }
16899 
16900  if(allocation != VK_NULL_HANDLE)
16901  {
16902  allocator->FreeMemory(
16903  1, // allocationCount
16904  &allocation);
16905  }
16906 }
16907 
16908 VkResult vmaCreateImage(
16909  VmaAllocator allocator,
16910  const VkImageCreateInfo* pImageCreateInfo,
16911  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16912  VkImage* pImage,
16913  VmaAllocation* pAllocation,
16914  VmaAllocationInfo* pAllocationInfo)
16915 {
16916  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16917 
16918  if(pImageCreateInfo->extent.width == 0 ||
16919  pImageCreateInfo->extent.height == 0 ||
16920  pImageCreateInfo->extent.depth == 0 ||
16921  pImageCreateInfo->mipLevels == 0 ||
16922  pImageCreateInfo->arrayLayers == 0)
16923  {
16924  return VK_ERROR_VALIDATION_FAILED_EXT;
16925  }
16926 
16927  VMA_DEBUG_LOG("vmaCreateImage");
16928 
16929  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16930 
16931  *pImage = VK_NULL_HANDLE;
16932  *pAllocation = VK_NULL_HANDLE;
16933 
16934  // 1. Create VkImage.
16935  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16936  allocator->m_hDevice,
16937  pImageCreateInfo,
16938  allocator->GetAllocationCallbacks(),
16939  pImage);
16940  if(res >= 0)
16941  {
16942  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16943  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16944  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16945 
16946  // 2. Allocate memory using allocator.
16947  VkMemoryRequirements vkMemReq = {};
16948  bool requiresDedicatedAllocation = false;
16949  bool prefersDedicatedAllocation = false;
16950  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16951  requiresDedicatedAllocation, prefersDedicatedAllocation);
16952 
16953  res = allocator->AllocateMemory(
16954  vkMemReq,
16955  requiresDedicatedAllocation,
16956  prefersDedicatedAllocation,
16957  VK_NULL_HANDLE, // dedicatedBuffer
16958  *pImage, // dedicatedImage
16959  *pAllocationCreateInfo,
16960  suballocType,
16961  1, // allocationCount
16962  pAllocation);
16963 
16964 #if VMA_RECORDING_ENABLED
16965  if(allocator->GetRecorder() != VMA_NULL)
16966  {
16967  allocator->GetRecorder()->RecordCreateImage(
16968  allocator->GetCurrentFrameIndex(),
16969  *pImageCreateInfo,
16970  *pAllocationCreateInfo,
16971  *pAllocation);
16972  }
16973 #endif
16974 
16975  if(res >= 0)
16976  {
16977  // 3. Bind image with memory.
16978  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
16979  {
16980  res = allocator->BindImageMemory(*pAllocation, *pImage);
16981  }
16982  if(res >= 0)
16983  {
16984  // All steps succeeded.
16985  #if VMA_STATS_STRING_ENABLED
16986  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
16987  #endif
16988  if(pAllocationInfo != VMA_NULL)
16989  {
16990  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16991  }
16992 
16993  return VK_SUCCESS;
16994  }
16995  allocator->FreeMemory(
16996  1, // allocationCount
16997  pAllocation);
16998  *pAllocation = VK_NULL_HANDLE;
16999  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17000  *pImage = VK_NULL_HANDLE;
17001  return res;
17002  }
17003  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17004  *pImage = VK_NULL_HANDLE;
17005  return res;
17006  }
17007  return res;
17008 }
17009 
17010 void vmaDestroyImage(
17011  VmaAllocator allocator,
17012  VkImage image,
17013  VmaAllocation allocation)
17014 {
17015  VMA_ASSERT(allocator);
17016 
17017  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
17018  {
17019  return;
17020  }
17021 
17022  VMA_DEBUG_LOG("vmaDestroyImage");
17023 
17024  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17025 
17026 #if VMA_RECORDING_ENABLED
17027  if(allocator->GetRecorder() != VMA_NULL)
17028  {
17029  allocator->GetRecorder()->RecordDestroyImage(
17030  allocator->GetCurrentFrameIndex(),
17031  allocation);
17032  }
17033 #endif
17034 
17035  if(image != VK_NULL_HANDLE)
17036  {
17037  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
17038  }
17039  if(allocation != VK_NULL_HANDLE)
17040  {
17041  allocator->FreeMemory(
17042  1, // allocationCount
17043  &allocation);
17044  }
17045 }
17046 
17047 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1764
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2064
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1822
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:2875
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1796
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2395
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1776
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2026
Definition: vk_mem_alloc.h:2130
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:2828
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1768
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2495
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1819
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2911
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2284
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1663
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2376
Definition: vk_mem_alloc.h:2101
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:2831
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1757
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2183
Definition: vk_mem_alloc.h:2053
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1831
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2312
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
Definition: vk_mem_alloc.h:1885
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1816
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2057
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1957
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1773
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:2865
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1956
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2915
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1848
VmaStatInfo total
Definition: vk_mem_alloc.h:1966
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2923
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2167
Definition: vk_mem_alloc.h:2125
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2906
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1774
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1699
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1825
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2326
Definition: vk_mem_alloc.h:2320
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:1780
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1892
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2505
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1769
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1794
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2204
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2346
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2382
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1755
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2329
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2880
VmaMemoryUsage
Definition: vk_mem_alloc.h:2004
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:2840
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2901
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:2919
Definition: vk_mem_alloc.h:2043
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2191
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1772
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1962
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1705
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:2819
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
Definition: vk_mem_alloc.h:2817
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:2846
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1726
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1798
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1731
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2921
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2178
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2392
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1765
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1945
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:2341
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1718
Definition: vk_mem_alloc.h:2316
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2108
Represents Opaque object that represents started defragmentation process.
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1958
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1722
Definition: vk_mem_alloc.h:2141
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2332
Definition: vk_mem_alloc.h:2052
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1771
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2173
Definition: vk_mem_alloc.h:2164
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1948
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1767
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2354
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1834
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2385
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2162
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2870
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2197
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1873
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1964
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2088
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1957
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1778
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1804
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:2816
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:2894
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1720
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1777
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2368
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1770
Definition: vk_mem_alloc.h:2119
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1812
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2519
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:1828
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1957
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1954
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2373
Parameters for defragmentation.
Definition: vk_mem_alloc.h:2825
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
Definition: vk_mem_alloc.h:2134
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:2500
Definition: vk_mem_alloc.h:2148
Definition: vk_mem_alloc.h:2160
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2917
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1763
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1952
Definition: vk_mem_alloc.h:2009
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2322
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1801
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1950
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1775
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1779
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2075
Definition: vk_mem_alloc.h:2155
Definition: vk_mem_alloc.h:2036
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2514
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1753
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1766
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2301
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Tries to resize an allocation in place, if there is enough free memory after it.
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2481
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2145
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2266
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1958
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1788
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1965
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2379
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1958
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:2885
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2486
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:2849