Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1677 /*
1678 Define this macro to 0/1 to disable/enable support for recording functionality,
1679 available through VmaAllocatorCreateInfo::pRecordSettings.
1680 */
1681 #ifndef VMA_RECORDING_ENABLED
1682  #ifdef _WIN32
1683  #define VMA_RECORDING_ENABLED 1
1684  #else
1685  #define VMA_RECORDING_ENABLED 0
1686  #endif
1687 #endif
1688 
1689 #ifndef NOMINMAX
1690  #define NOMINMAX // For windows.h
1691 #endif
1692 
1693 #ifndef VULKAN_H_
1694  #include <vulkan/vulkan.h>
1695 #endif
1696 
1697 #if VMA_RECORDING_ENABLED
1698  #include <windows.h>
1699 #endif
1700 
1701 #if !defined(VMA_DEDICATED_ALLOCATION)
1702  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1703  #define VMA_DEDICATED_ALLOCATION 1
1704  #else
1705  #define VMA_DEDICATED_ALLOCATION 0
1706  #endif
1707 #endif
1708 
1718 VK_DEFINE_HANDLE(VmaAllocator)
1719 
1720 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1722  VmaAllocator allocator,
1723  uint32_t memoryType,
1724  VkDeviceMemory memory,
1725  VkDeviceSize size);
1727 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1728  VmaAllocator allocator,
1729  uint32_t memoryType,
1730  VkDeviceMemory memory,
1731  VkDeviceSize size);
1732 
1746 
1776 
1779 typedef VkFlags VmaAllocatorCreateFlags;
1780 
1785 typedef struct VmaVulkanFunctions {
1786  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1787  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1788  PFN_vkAllocateMemory vkAllocateMemory;
1789  PFN_vkFreeMemory vkFreeMemory;
1790  PFN_vkMapMemory vkMapMemory;
1791  PFN_vkUnmapMemory vkUnmapMemory;
1792  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1793  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1794  PFN_vkBindBufferMemory vkBindBufferMemory;
1795  PFN_vkBindImageMemory vkBindImageMemory;
1796  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1797  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1798  PFN_vkCreateBuffer vkCreateBuffer;
1799  PFN_vkDestroyBuffer vkDestroyBuffer;
1800  PFN_vkCreateImage vkCreateImage;
1801  PFN_vkDestroyImage vkDestroyImage;
1802  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1803 #if VMA_DEDICATED_ALLOCATION
1804  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1805  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1806 #endif
1808 
1810 typedef enum VmaRecordFlagBits {
1817 
1820 typedef VkFlags VmaRecordFlags;
1821 
1823 typedef struct VmaRecordSettings
1824 {
1834  const char* pFilePath;
1836 
1839 {
1843 
1844  VkPhysicalDevice physicalDevice;
1846 
1847  VkDevice device;
1849 
1852 
1853  const VkAllocationCallbacks* pAllocationCallbacks;
1855 
1895  const VkDeviceSize* pHeapSizeLimit;
1916 
1918 VkResult vmaCreateAllocator(
1919  const VmaAllocatorCreateInfo* pCreateInfo,
1920  VmaAllocator* pAllocator);
1921 
1923 void vmaDestroyAllocator(
1924  VmaAllocator allocator);
1925 
1931  VmaAllocator allocator,
1932  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1933 
1939  VmaAllocator allocator,
1940  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1941 
1949  VmaAllocator allocator,
1950  uint32_t memoryTypeIndex,
1951  VkMemoryPropertyFlags* pFlags);
1952 
1962  VmaAllocator allocator,
1963  uint32_t frameIndex);
1964 
1967 typedef struct VmaStatInfo
1968 {
1970  uint32_t blockCount;
1976  VkDeviceSize usedBytes;
1978  VkDeviceSize unusedBytes;
1981 } VmaStatInfo;
1982 
1984 typedef struct VmaStats
1985 {
1986  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1987  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1989 } VmaStats;
1990 
1992 void vmaCalculateStats(
1993  VmaAllocator allocator,
1994  VmaStats* pStats);
1995 
1996 #ifndef VMA_STATS_STRING_ENABLED
1997 #define VMA_STATS_STRING_ENABLED 1
1998 #endif
1999 
2000 #if VMA_STATS_STRING_ENABLED
2001 
2003 
2005 void vmaBuildStatsString(
2006  VmaAllocator allocator,
2007  char** ppStatsString,
2008  VkBool32 detailedMap);
2009 
2010 void vmaFreeStatsString(
2011  VmaAllocator allocator,
2012  char* pStatsString);
2013 
2014 #endif // #if VMA_STATS_STRING_ENABLED
2015 
2024 VK_DEFINE_HANDLE(VmaPool)
2025 
2026 typedef enum VmaMemoryUsage
2027 {
2076 } VmaMemoryUsage;
2077 
2087 
2148 
2164 
2174 
2181 
2185 
2187 {
2200  VkMemoryPropertyFlags requiredFlags;
2205  VkMemoryPropertyFlags preferredFlags;
2213  uint32_t memoryTypeBits;
2226  void* pUserData;
2228 
2245 VkResult vmaFindMemoryTypeIndex(
2246  VmaAllocator allocator,
2247  uint32_t memoryTypeBits,
2248  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2249  uint32_t* pMemoryTypeIndex);
2250 
2264  VmaAllocator allocator,
2265  const VkBufferCreateInfo* pBufferCreateInfo,
2266  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2267  uint32_t* pMemoryTypeIndex);
2268 
2282  VmaAllocator allocator,
2283  const VkImageCreateInfo* pImageCreateInfo,
2284  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2285  uint32_t* pMemoryTypeIndex);
2286 
2307 
2324 
2335 
2341 
2344 typedef VkFlags VmaPoolCreateFlags;
2345 
2348 typedef struct VmaPoolCreateInfo {
2363  VkDeviceSize blockSize;
2392 
2395 typedef struct VmaPoolStats {
2398  VkDeviceSize size;
2401  VkDeviceSize unusedSize;
2414  VkDeviceSize unusedRangeSizeMax;
2417  size_t blockCount;
2418 } VmaPoolStats;
2419 
2426 VkResult vmaCreatePool(
2427  VmaAllocator allocator,
2428  const VmaPoolCreateInfo* pCreateInfo,
2429  VmaPool* pPool);
2430 
2433 void vmaDestroyPool(
2434  VmaAllocator allocator,
2435  VmaPool pool);
2436 
2443 void vmaGetPoolStats(
2444  VmaAllocator allocator,
2445  VmaPool pool,
2446  VmaPoolStats* pPoolStats);
2447 
2455  VmaAllocator allocator,
2456  VmaPool pool,
2457  size_t* pLostAllocationCount);
2458 
2473 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2474 
2499 VK_DEFINE_HANDLE(VmaAllocation)
2500 
2501 
2503 typedef struct VmaAllocationInfo {
2508  uint32_t memoryType;
2517  VkDeviceMemory deviceMemory;
2522  VkDeviceSize offset;
2527  VkDeviceSize size;
2541  void* pUserData;
2543 
2554 VkResult vmaAllocateMemory(
2555  VmaAllocator allocator,
2556  const VkMemoryRequirements* pVkMemoryRequirements,
2557  const VmaAllocationCreateInfo* pCreateInfo,
2558  VmaAllocation* pAllocation,
2559  VmaAllocationInfo* pAllocationInfo);
2560 
2580 VkResult vmaAllocateMemoryPages(
2581  VmaAllocator allocator,
2582  const VkMemoryRequirements* pVkMemoryRequirements,
2583  const VmaAllocationCreateInfo* pCreateInfo,
2584  size_t allocationCount,
2585  VmaAllocation* pAllocations,
2586  VmaAllocationInfo* pAllocationInfo);
2587 
2595  VmaAllocator allocator,
2596  VkBuffer buffer,
2597  const VmaAllocationCreateInfo* pCreateInfo,
2598  VmaAllocation* pAllocation,
2599  VmaAllocationInfo* pAllocationInfo);
2600 
2602 VkResult vmaAllocateMemoryForImage(
2603  VmaAllocator allocator,
2604  VkImage image,
2605  const VmaAllocationCreateInfo* pCreateInfo,
2606  VmaAllocation* pAllocation,
2607  VmaAllocationInfo* pAllocationInfo);
2608 
2613 void vmaFreeMemory(
2614  VmaAllocator allocator,
2615  VmaAllocation allocation);
2616 
2627 void vmaFreeMemoryPages(
2628  VmaAllocator allocator,
2629  size_t allocationCount,
2630  VmaAllocation* pAllocations);
2631 
2652 VkResult vmaResizeAllocation(
2653  VmaAllocator allocator,
2654  VmaAllocation allocation,
2655  VkDeviceSize newSize);
2656 
2674  VmaAllocator allocator,
2675  VmaAllocation allocation,
2676  VmaAllocationInfo* pAllocationInfo);
2677 
2692 VkBool32 vmaTouchAllocation(
2693  VmaAllocator allocator,
2694  VmaAllocation allocation);
2695 
2710  VmaAllocator allocator,
2711  VmaAllocation allocation,
2712  void* pUserData);
2713 
2725  VmaAllocator allocator,
2726  VmaAllocation* pAllocation);
2727 
2762 VkResult vmaMapMemory(
2763  VmaAllocator allocator,
2764  VmaAllocation allocation,
2765  void** ppData);
2766 
2771 void vmaUnmapMemory(
2772  VmaAllocator allocator,
2773  VmaAllocation allocation);
2774 
2791 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2792 
2809 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2810 
2827 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2828 
2835 VK_DEFINE_HANDLE(VmaDefragmentationContext)
2836 
2837 typedef enum VmaDefragmentationFlagBits {
2841 typedef VkFlags VmaDefragmentationFlags;
2842 
2847 typedef struct VmaDefragmentationInfo2 {
2871  uint32_t poolCount;
2892  VkDeviceSize maxCpuBytesToMove;
2902  VkDeviceSize maxGpuBytesToMove;
2916  VkCommandBuffer commandBuffer;
2918 
2923 typedef struct VmaDefragmentationInfo {
2928  VkDeviceSize maxBytesToMove;
2935 
2937 typedef struct VmaDefragmentationStats {
2939  VkDeviceSize bytesMoved;
2941  VkDeviceSize bytesFreed;
2947 
2977 VkResult vmaDefragmentationBegin(
2978  VmaAllocator allocator,
2979  const VmaDefragmentationInfo2* pInfo,
2980  VmaDefragmentationStats* pStats,
2981  VmaDefragmentationContext *pContext);
2982 
2988 VkResult vmaDefragmentationEnd(
2989  VmaAllocator allocator,
2990  VmaDefragmentationContext context);
2991 
3032 VkResult vmaDefragment(
3033  VmaAllocator allocator,
3034  VmaAllocation* pAllocations,
3035  size_t allocationCount,
3036  VkBool32* pAllocationsChanged,
3037  const VmaDefragmentationInfo *pDefragmentationInfo,
3038  VmaDefragmentationStats* pDefragmentationStats);
3039 
3052 VkResult vmaBindBufferMemory(
3053  VmaAllocator allocator,
3054  VmaAllocation allocation,
3055  VkBuffer buffer);
3056 
3069 VkResult vmaBindImageMemory(
3070  VmaAllocator allocator,
3071  VmaAllocation allocation,
3072  VkImage image);
3073 
3100 VkResult vmaCreateBuffer(
3101  VmaAllocator allocator,
3102  const VkBufferCreateInfo* pBufferCreateInfo,
3103  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3104  VkBuffer* pBuffer,
3105  VmaAllocation* pAllocation,
3106  VmaAllocationInfo* pAllocationInfo);
3107 
3119 void vmaDestroyBuffer(
3120  VmaAllocator allocator,
3121  VkBuffer buffer,
3122  VmaAllocation allocation);
3123 
3125 VkResult vmaCreateImage(
3126  VmaAllocator allocator,
3127  const VkImageCreateInfo* pImageCreateInfo,
3128  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3129  VkImage* pImage,
3130  VmaAllocation* pAllocation,
3131  VmaAllocationInfo* pAllocationInfo);
3132 
3144 void vmaDestroyImage(
3145  VmaAllocator allocator,
3146  VkImage image,
3147  VmaAllocation allocation);
3148 
3149 #ifdef __cplusplus
3150 }
3151 #endif
3152 
3153 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3154 
3155 // For Visual Studio IntelliSense.
3156 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3157 #define VMA_IMPLEMENTATION
3158 #endif
3159 
3160 #ifdef VMA_IMPLEMENTATION
3161 #undef VMA_IMPLEMENTATION
3162 
3163 #include <cstdint>
3164 #include <cstdlib>
3165 #include <cstring>
3166 
3167 /*******************************************************************************
3168 CONFIGURATION SECTION
3169 
3170 Define some of these macros before each #include of this header or change them
3171 here if you need other then default behavior depending on your environment.
3172 */
3173 
3174 /*
3175 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3176 internally, like:
3177 
3178  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3179 
3180 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3181 VmaAllocatorCreateInfo::pVulkanFunctions.
3182 */
3183 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3184 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3185 #endif
3186 
3187 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3188 //#define VMA_USE_STL_CONTAINERS 1
3189 
3190 /* Set this macro to 1 to make the library including and using STL containers:
3191 std::pair, std::vector, std::list, std::unordered_map.
3192 
3193 Set it to 0 or undefined to make the library using its own implementation of
3194 the containers.
3195 */
3196 #if VMA_USE_STL_CONTAINERS
3197  #define VMA_USE_STL_VECTOR 1
3198  #define VMA_USE_STL_UNORDERED_MAP 1
3199  #define VMA_USE_STL_LIST 1
3200 #endif
3201 
3202 #ifndef VMA_USE_STL_SHARED_MUTEX
3203  // Compiler conforms to C++17.
3204  #if __cplusplus >= 201703L
3205  #define VMA_USE_STL_SHARED_MUTEX 1
3206  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3207  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3208  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3209  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3210  #define VMA_USE_STL_SHARED_MUTEX 1
3211  #else
3212  #define VMA_USE_STL_SHARED_MUTEX 0
3213  #endif
3214 #endif
3215 
3216 /*
3217 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
3218 Library has its own container implementation.
3219 */
3220 #if VMA_USE_STL_VECTOR
3221  #include <vector>
3222 #endif
3223 
3224 #if VMA_USE_STL_UNORDERED_MAP
3225  #include <unordered_map>
3226 #endif
3227 
3228 #if VMA_USE_STL_LIST
3229  #include <list>
3230 #endif
3231 
3232 /*
3233 Following headers are used in this CONFIGURATION section only, so feel free to
3234 remove them if not needed.
3235 */
3236 #include <cassert> // for assert
3237 #include <algorithm> // for min, max
3238 #include <mutex>
3239 
3240 #ifndef VMA_NULL
3241  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3242  #define VMA_NULL nullptr
3243 #endif
3244 
3245 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3246 #include <cstdlib>
3247 void *aligned_alloc(size_t alignment, size_t size)
3248 {
3249  // alignment must be >= sizeof(void*)
3250  if(alignment < sizeof(void*))
3251  {
3252  alignment = sizeof(void*);
3253  }
3254 
3255  return memalign(alignment, size);
3256 }
3257 #elif defined(__APPLE__) || defined(__ANDROID__)
3258 #include <cstdlib>
3259 void *aligned_alloc(size_t alignment, size_t size)
3260 {
3261  // alignment must be >= sizeof(void*)
3262  if(alignment < sizeof(void*))
3263  {
3264  alignment = sizeof(void*);
3265  }
3266 
3267  void *pointer;
3268  if(posix_memalign(&pointer, alignment, size) == 0)
3269  return pointer;
3270  return VMA_NULL;
3271 }
3272 #endif
3273 
3274 // If your compiler is not compatible with C++11 and definition of
3275 // aligned_alloc() function is missing, uncommeting following line may help:
3276 
3277 //#include <malloc.h>
3278 
3279 // Normal assert to check for programmer's errors, especially in Debug configuration.
3280 #ifndef VMA_ASSERT
3281  #ifdef _DEBUG
3282  #define VMA_ASSERT(expr) assert(expr)
3283  #else
3284  #define VMA_ASSERT(expr)
3285  #endif
3286 #endif
3287 
3288 // Assert that will be called very often, like inside data structures e.g. operator[].
3289 // Making it non-empty can make program slow.
3290 #ifndef VMA_HEAVY_ASSERT
3291  #ifdef _DEBUG
3292  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3293  #else
3294  #define VMA_HEAVY_ASSERT(expr)
3295  #endif
3296 #endif
3297 
3298 #ifndef VMA_ALIGN_OF
3299  #define VMA_ALIGN_OF(type) (__alignof(type))
3300 #endif
3301 
3302 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3303  #if defined(_WIN32)
3304  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3305  #else
3306  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3307  #endif
3308 #endif
3309 
3310 #ifndef VMA_SYSTEM_FREE
3311  #if defined(_WIN32)
3312  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3313  #else
3314  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3315  #endif
3316 #endif
3317 
3318 #ifndef VMA_MIN
3319  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3320 #endif
3321 
3322 #ifndef VMA_MAX
3323  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3324 #endif
3325 
3326 #ifndef VMA_SWAP
3327  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3328 #endif
3329 
3330 #ifndef VMA_SORT
3331  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3332 #endif
3333 
3334 #ifndef VMA_DEBUG_LOG
3335  #define VMA_DEBUG_LOG(format, ...)
3336  /*
3337  #define VMA_DEBUG_LOG(format, ...) do { \
3338  printf(format, __VA_ARGS__); \
3339  printf("\n"); \
3340  } while(false)
3341  */
3342 #endif
3343 
3344 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3345 #if VMA_STATS_STRING_ENABLED
3346  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3347  {
3348  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3349  }
3350  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3351  {
3352  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3353  }
3354  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3355  {
3356  snprintf(outStr, strLen, "%p", ptr);
3357  }
3358 #endif
3359 
3360 #ifndef VMA_MUTEX
3361  class VmaMutex
3362  {
3363  public:
3364  void Lock() { m_Mutex.lock(); }
3365  void Unlock() { m_Mutex.unlock(); }
3366  private:
3367  std::mutex m_Mutex;
3368  };
3369  #define VMA_MUTEX VmaMutex
3370 #endif
3371 
3372 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3373 #ifndef VMA_RW_MUTEX
3374  #if VMA_USE_STL_SHARED_MUTEX
3375  // Use std::shared_mutex from C++17.
3376  #include <shared_mutex>
3377  class VmaRWMutex
3378  {
3379  public:
3380  void LockRead() { m_Mutex.lock_shared(); }
3381  void UnlockRead() { m_Mutex.unlock_shared(); }
3382  void LockWrite() { m_Mutex.lock(); }
3383  void UnlockWrite() { m_Mutex.unlock(); }
3384  private:
3385  std::shared_mutex m_Mutex;
3386  };
3387  #define VMA_RW_MUTEX VmaRWMutex
3388  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
3389  // Use SRWLOCK from WinAPI.
3390  // Minimum supported client = Windows Vista, server = Windows Server 2008.
3391  class VmaRWMutex
3392  {
3393  public:
3394  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3395  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3396  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3397  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3398  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3399  private:
3400  SRWLOCK m_Lock;
3401  };
3402  #define VMA_RW_MUTEX VmaRWMutex
3403  #else
3404  // Less efficient fallback: Use normal mutex.
3405  class VmaRWMutex
3406  {
3407  public:
3408  void LockRead() { m_Mutex.Lock(); }
3409  void UnlockRead() { m_Mutex.Unlock(); }
3410  void LockWrite() { m_Mutex.Lock(); }
3411  void UnlockWrite() { m_Mutex.Unlock(); }
3412  private:
3413  VMA_MUTEX m_Mutex;
3414  };
3415  #define VMA_RW_MUTEX VmaRWMutex
3416  #endif // #if VMA_USE_STL_SHARED_MUTEX
3417 #endif // #ifndef VMA_RW_MUTEX
3418 
3419 /*
3420 If providing your own implementation, you need to implement a subset of std::atomic:
3421 
3422 - Constructor(uint32_t desired)
3423 - uint32_t load() const
3424 - void store(uint32_t desired)
3425 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3426 */
3427 #ifndef VMA_ATOMIC_UINT32
3428  #include <atomic>
3429  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3430 #endif
3431 
3432 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3433 
3437  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3438 #endif
3439 
3440 #ifndef VMA_DEBUG_ALIGNMENT
3441 
3445  #define VMA_DEBUG_ALIGNMENT (1)
3446 #endif
3447 
3448 #ifndef VMA_DEBUG_MARGIN
3449 
3453  #define VMA_DEBUG_MARGIN (0)
3454 #endif
3455 
3456 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3457 
3461  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3462 #endif
3463 
3464 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3465 
3470  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3471 #endif
3472 
3473 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3474 
3478  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3479 #endif
3480 
3481 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3482 
3486  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3487 #endif
3488 
3489 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3490  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3492 #endif
3493 
3494 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3495  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3497 #endif
3498 
3499 #ifndef VMA_CLASS_NO_COPY
3500  #define VMA_CLASS_NO_COPY(className) \
3501  private: \
3502  className(const className&) = delete; \
3503  className& operator=(const className&) = delete;
3504 #endif
3505 
3506 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3507 
3508 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3509 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3510 
3511 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3512 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3513 
3514 /*******************************************************************************
3515 END OF CONFIGURATION
3516 */
3517 
3518 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3519 
3520 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3521  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3522 
3523 // Returns number of bits set to 1 in (v).
3524 static inline uint32_t VmaCountBitsSet(uint32_t v)
3525 {
3526  uint32_t c = v - ((v >> 1) & 0x55555555);
3527  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3528  c = ((c >> 4) + c) & 0x0F0F0F0F;
3529  c = ((c >> 8) + c) & 0x00FF00FF;
3530  c = ((c >> 16) + c) & 0x0000FFFF;
3531  return c;
3532 }
3533 
3534 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3535 // Use types like uint32_t, uint64_t as T.
3536 template <typename T>
3537 static inline T VmaAlignUp(T val, T align)
3538 {
3539  return (val + align - 1) / align * align;
3540 }
3541 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3542 // Use types like uint32_t, uint64_t as T.
3543 template <typename T>
3544 static inline T VmaAlignDown(T val, T align)
3545 {
3546  return val / align * align;
3547 }
3548 
3549 // Division with mathematical rounding to nearest number.
3550 template <typename T>
3551 static inline T VmaRoundDiv(T x, T y)
3552 {
3553  return (x + (y / (T)2)) / y;
3554 }
3555 
3556 /*
3557 Returns true if given number is a power of two.
3558 T must be unsigned integer number or signed integer but always nonnegative.
3559 For 0 returns true.
3560 */
3561 template <typename T>
3562 inline bool VmaIsPow2(T x)
3563 {
3564  return (x & (x-1)) == 0;
3565 }
3566 
3567 // Returns smallest power of 2 greater or equal to v.
3568 static inline uint32_t VmaNextPow2(uint32_t v)
3569 {
3570  v--;
3571  v |= v >> 1;
3572  v |= v >> 2;
3573  v |= v >> 4;
3574  v |= v >> 8;
3575  v |= v >> 16;
3576  v++;
3577  return v;
3578 }
3579 static inline uint64_t VmaNextPow2(uint64_t v)
3580 {
3581  v--;
3582  v |= v >> 1;
3583  v |= v >> 2;
3584  v |= v >> 4;
3585  v |= v >> 8;
3586  v |= v >> 16;
3587  v |= v >> 32;
3588  v++;
3589  return v;
3590 }
3591 
3592 // Returns largest power of 2 less or equal to v.
3593 static inline uint32_t VmaPrevPow2(uint32_t v)
3594 {
3595  v |= v >> 1;
3596  v |= v >> 2;
3597  v |= v >> 4;
3598  v |= v >> 8;
3599  v |= v >> 16;
3600  v = v ^ (v >> 1);
3601  return v;
3602 }
3603 static inline uint64_t VmaPrevPow2(uint64_t v)
3604 {
3605  v |= v >> 1;
3606  v |= v >> 2;
3607  v |= v >> 4;
3608  v |= v >> 8;
3609  v |= v >> 16;
3610  v |= v >> 32;
3611  v = v ^ (v >> 1);
3612  return v;
3613 }
3614 
3615 static inline bool VmaStrIsEmpty(const char* pStr)
3616 {
3617  return pStr == VMA_NULL || *pStr == '\0';
3618 }
3619 
3620 #if VMA_STATS_STRING_ENABLED
3621 
3622 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3623 {
3624  switch(algorithm)
3625  {
3627  return "Linear";
3629  return "Buddy";
3630  case 0:
3631  return "Default";
3632  default:
3633  VMA_ASSERT(0);
3634  return "";
3635  }
3636 }
3637 
3638 #endif // #if VMA_STATS_STRING_ENABLED
3639 
3640 #ifndef VMA_SORT
3641 
3642 template<typename Iterator, typename Compare>
3643 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3644 {
3645  Iterator centerValue = end; --centerValue;
3646  Iterator insertIndex = beg;
3647  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3648  {
3649  if(cmp(*memTypeIndex, *centerValue))
3650  {
3651  if(insertIndex != memTypeIndex)
3652  {
3653  VMA_SWAP(*memTypeIndex, *insertIndex);
3654  }
3655  ++insertIndex;
3656  }
3657  }
3658  if(insertIndex != centerValue)
3659  {
3660  VMA_SWAP(*insertIndex, *centerValue);
3661  }
3662  return insertIndex;
3663 }
3664 
3665 template<typename Iterator, typename Compare>
3666 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3667 {
3668  if(beg < end)
3669  {
3670  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3671  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3672  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3673  }
3674 }
3675 
3676 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3677 
3678 #endif // #ifndef VMA_SORT
3679 
3680 /*
3681 Returns true if two memory blocks occupy overlapping pages.
3682 ResourceA must be in less memory offset than ResourceB.
3683 
3684 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3685 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3686 */
3687 static inline bool VmaBlocksOnSamePage(
3688  VkDeviceSize resourceAOffset,
3689  VkDeviceSize resourceASize,
3690  VkDeviceSize resourceBOffset,
3691  VkDeviceSize pageSize)
3692 {
3693  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3694  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3695  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3696  VkDeviceSize resourceBStart = resourceBOffset;
3697  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3698  return resourceAEndPage == resourceBStartPage;
3699 }
3700 
3701 enum VmaSuballocationType
3702 {
3703  VMA_SUBALLOCATION_TYPE_FREE = 0,
3704  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3705  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3706  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3707  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3708  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3709  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3710 };
3711 
3712 /*
3713 Returns true if given suballocation types could conflict and must respect
3714 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3715 or linear image and another one is optimal image. If type is unknown, behave
3716 conservatively.
3717 */
3718 static inline bool VmaIsBufferImageGranularityConflict(
3719  VmaSuballocationType suballocType1,
3720  VmaSuballocationType suballocType2)
3721 {
3722  if(suballocType1 > suballocType2)
3723  {
3724  VMA_SWAP(suballocType1, suballocType2);
3725  }
3726 
3727  switch(suballocType1)
3728  {
3729  case VMA_SUBALLOCATION_TYPE_FREE:
3730  return false;
3731  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3732  return true;
3733  case VMA_SUBALLOCATION_TYPE_BUFFER:
3734  return
3735  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3736  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3737  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3738  return
3739  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3740  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3741  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3742  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3743  return
3744  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3745  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3746  return false;
3747  default:
3748  VMA_ASSERT(0);
3749  return true;
3750  }
3751 }
3752 
3753 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3754 {
3755 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
3756  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3757  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3758  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3759  {
3760  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3761  }
3762 #else
3763  // no-op
3764 #endif
3765 }
3766 
3767 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3768 {
3769 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
3770  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3771  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3772  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3773  {
3774  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3775  {
3776  return false;
3777  }
3778  }
3779 #endif
3780  return true;
3781 }
3782 
3783 /*
3784 Fills structure with parameters of an example buffer to be used for transfers
3785 during GPU memory defragmentation.
3786 */
3787 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
3788 {
3789  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
3790  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
3791  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
3792  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
3793 }
3794 
3795 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3796 struct VmaMutexLock
3797 {
3798  VMA_CLASS_NO_COPY(VmaMutexLock)
3799 public:
3800  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
3801  m_pMutex(useMutex ? &mutex : VMA_NULL)
3802  { if(m_pMutex) { m_pMutex->Lock(); } }
3803  ~VmaMutexLock()
3804  { if(m_pMutex) { m_pMutex->Unlock(); } }
3805 private:
3806  VMA_MUTEX* m_pMutex;
3807 };
3808 
3809 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3810 struct VmaMutexLockRead
3811 {
3812  VMA_CLASS_NO_COPY(VmaMutexLockRead)
3813 public:
3814  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3815  m_pMutex(useMutex ? &mutex : VMA_NULL)
3816  { if(m_pMutex) { m_pMutex->LockRead(); } }
3817  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
3818 private:
3819  VMA_RW_MUTEX* m_pMutex;
3820 };
3821 
3822 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3823 struct VmaMutexLockWrite
3824 {
3825  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3826 public:
3827  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
3828  m_pMutex(useMutex ? &mutex : VMA_NULL)
3829  { if(m_pMutex) { m_pMutex->LockWrite(); } }
3830  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3831 private:
3832  VMA_RW_MUTEX* m_pMutex;
3833 };
3834 
3835 #if VMA_DEBUG_GLOBAL_MUTEX
3836  static VMA_MUTEX gDebugGlobalMutex;
3837  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3838 #else
3839  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3840 #endif
3841 
3842 // Minimum size of a free suballocation to register it in the free suballocation collection.
3843 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3844 
3845 /*
3846 Performs binary search and returns iterator to first element that is greater or
3847 equal to (key), according to comparison (cmp).
3848 
3849 Cmp should return true if first argument is less than second argument.
3850 
3851 Returned value is the found element, if present in the collection or place where
3852 new element with value (key) should be inserted.
3853 */
3854 template <typename CmpLess, typename IterT, typename KeyT>
3855 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
3856 {
3857  size_t down = 0, up = (end - beg);
3858  while(down < up)
3859  {
3860  const size_t mid = (down + up) / 2;
3861  if(cmp(*(beg+mid), key))
3862  {
3863  down = mid + 1;
3864  }
3865  else
3866  {
3867  up = mid;
3868  }
3869  }
3870  return beg + down;
3871 }
3872 
3873 template<typename CmpLess, typename IterT, typename KeyT>
3874 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
3875 {
3876  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3877  beg, end, value, cmp);
3878  if(it == end ||
3879  (!cmp(*it, value) && !cmp(value, *it)))
3880  {
3881  return it;
3882  }
3883  return end;
3884 }
3885 
3886 /*
3887 Returns true if all pointers in the array are not-null and unique.
3888 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3889 T must be pointer type, e.g. VmaAllocation, VmaPool.
3890 */
3891 template<typename T>
3892 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3893 {
3894  for(uint32_t i = 0; i < count; ++i)
3895  {
3896  const T iPtr = arr[i];
3897  if(iPtr == VMA_NULL)
3898  {
3899  return false;
3900  }
3901  for(uint32_t j = i + 1; j < count; ++j)
3902  {
3903  if(iPtr == arr[j])
3904  {
3905  return false;
3906  }
3907  }
3908  }
3909  return true;
3910 }
3911 
3913 // Memory allocation
3914 
3915 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3916 {
3917  if((pAllocationCallbacks != VMA_NULL) &&
3918  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3919  {
3920  return (*pAllocationCallbacks->pfnAllocation)(
3921  pAllocationCallbacks->pUserData,
3922  size,
3923  alignment,
3924  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3925  }
3926  else
3927  {
3928  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3929  }
3930 }
3931 
3932 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3933 {
3934  if((pAllocationCallbacks != VMA_NULL) &&
3935  (pAllocationCallbacks->pfnFree != VMA_NULL))
3936  {
3937  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3938  }
3939  else
3940  {
3941  VMA_SYSTEM_FREE(ptr);
3942  }
3943 }
3944 
3945 template<typename T>
3946 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3947 {
3948  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3949 }
3950 
3951 template<typename T>
3952 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3953 {
3954  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3955 }
3956 
3957 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3958 
3959 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3960 
3961 template<typename T>
3962 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3963 {
3964  ptr->~T();
3965  VmaFree(pAllocationCallbacks, ptr);
3966 }
3967 
3968 template<typename T>
3969 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3970 {
3971  if(ptr != VMA_NULL)
3972  {
3973  for(size_t i = count; i--; )
3974  {
3975  ptr[i].~T();
3976  }
3977  VmaFree(pAllocationCallbacks, ptr);
3978  }
3979 }
3980 
3981 // STL-compatible allocator.
3982 template<typename T>
3983 class VmaStlAllocator
3984 {
3985 public:
3986  const VkAllocationCallbacks* const m_pCallbacks;
3987  typedef T value_type;
3988 
3989  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3990  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3991 
3992  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3993  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3994 
3995  template<typename U>
3996  bool operator==(const VmaStlAllocator<U>& rhs) const
3997  {
3998  return m_pCallbacks == rhs.m_pCallbacks;
3999  }
4000  template<typename U>
4001  bool operator!=(const VmaStlAllocator<U>& rhs) const
4002  {
4003  return m_pCallbacks != rhs.m_pCallbacks;
4004  }
4005 
4006  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
4007 };
4008 
4009 #if VMA_USE_STL_VECTOR
4010 
4011 #define VmaVector std::vector
4012 
4013 template<typename T, typename allocatorT>
4014 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
4015 {
4016  vec.insert(vec.begin() + index, item);
4017 }
4018 
4019 template<typename T, typename allocatorT>
4020 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
4021 {
4022  vec.erase(vec.begin() + index);
4023 }
4024 
4025 #else // #if VMA_USE_STL_VECTOR
4026 
4027 /* Class with interface compatible with subset of std::vector.
4028 T must be POD because constructors and destructors are not called and memcpy is
4029 used for these objects. */
4030 template<typename T, typename AllocatorT>
4031 class VmaVector
4032 {
4033 public:
4034  typedef T value_type;
4035 
4036  VmaVector(const AllocatorT& allocator) :
4037  m_Allocator(allocator),
4038  m_pArray(VMA_NULL),
4039  m_Count(0),
4040  m_Capacity(0)
4041  {
4042  }
4043 
4044  VmaVector(size_t count, const AllocatorT& allocator) :
4045  m_Allocator(allocator),
4046  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
4047  m_Count(count),
4048  m_Capacity(count)
4049  {
4050  }
4051 
4052  VmaVector(const VmaVector<T, AllocatorT>& src) :
4053  m_Allocator(src.m_Allocator),
4054  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
4055  m_Count(src.m_Count),
4056  m_Capacity(src.m_Count)
4057  {
4058  if(m_Count != 0)
4059  {
4060  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4061  }
4062  }
4063 
4064  ~VmaVector()
4065  {
4066  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4067  }
4068 
4069  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
4070  {
4071  if(&rhs != this)
4072  {
4073  resize(rhs.m_Count);
4074  if(m_Count != 0)
4075  {
4076  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4077  }
4078  }
4079  return *this;
4080  }
4081 
4082  bool empty() const { return m_Count == 0; }
4083  size_t size() const { return m_Count; }
4084  T* data() { return m_pArray; }
4085  const T* data() const { return m_pArray; }
4086 
4087  T& operator[](size_t index)
4088  {
4089  VMA_HEAVY_ASSERT(index < m_Count);
4090  return m_pArray[index];
4091  }
4092  const T& operator[](size_t index) const
4093  {
4094  VMA_HEAVY_ASSERT(index < m_Count);
4095  return m_pArray[index];
4096  }
4097 
4098  T& front()
4099  {
4100  VMA_HEAVY_ASSERT(m_Count > 0);
4101  return m_pArray[0];
4102  }
4103  const T& front() const
4104  {
4105  VMA_HEAVY_ASSERT(m_Count > 0);
4106  return m_pArray[0];
4107  }
4108  T& back()
4109  {
4110  VMA_HEAVY_ASSERT(m_Count > 0);
4111  return m_pArray[m_Count - 1];
4112  }
4113  const T& back() const
4114  {
4115  VMA_HEAVY_ASSERT(m_Count > 0);
4116  return m_pArray[m_Count - 1];
4117  }
4118 
4119  void reserve(size_t newCapacity, bool freeMemory = false)
4120  {
4121  newCapacity = VMA_MAX(newCapacity, m_Count);
4122 
4123  if((newCapacity < m_Capacity) && !freeMemory)
4124  {
4125  newCapacity = m_Capacity;
4126  }
4127 
4128  if(newCapacity != m_Capacity)
4129  {
4130  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4131  if(m_Count != 0)
4132  {
4133  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4134  }
4135  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4136  m_Capacity = newCapacity;
4137  m_pArray = newArray;
4138  }
4139  }
4140 
4141  void resize(size_t newCount, bool freeMemory = false)
4142  {
4143  size_t newCapacity = m_Capacity;
4144  if(newCount > m_Capacity)
4145  {
4146  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4147  }
4148  else if(freeMemory)
4149  {
4150  newCapacity = newCount;
4151  }
4152 
4153  if(newCapacity != m_Capacity)
4154  {
4155  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4156  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4157  if(elementsToCopy != 0)
4158  {
4159  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4160  }
4161  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4162  m_Capacity = newCapacity;
4163  m_pArray = newArray;
4164  }
4165 
4166  m_Count = newCount;
4167  }
4168 
4169  void clear(bool freeMemory = false)
4170  {
4171  resize(0, freeMemory);
4172  }
4173 
4174  void insert(size_t index, const T& src)
4175  {
4176  VMA_HEAVY_ASSERT(index <= m_Count);
4177  const size_t oldCount = size();
4178  resize(oldCount + 1);
4179  if(index < oldCount)
4180  {
4181  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4182  }
4183  m_pArray[index] = src;
4184  }
4185 
4186  void remove(size_t index)
4187  {
4188  VMA_HEAVY_ASSERT(index < m_Count);
4189  const size_t oldCount = size();
4190  if(index < oldCount - 1)
4191  {
4192  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4193  }
4194  resize(oldCount - 1);
4195  }
4196 
4197  void push_back(const T& src)
4198  {
4199  const size_t newIndex = size();
4200  resize(newIndex + 1);
4201  m_pArray[newIndex] = src;
4202  }
4203 
4204  void pop_back()
4205  {
4206  VMA_HEAVY_ASSERT(m_Count > 0);
4207  resize(size() - 1);
4208  }
4209 
4210  void push_front(const T& src)
4211  {
4212  insert(0, src);
4213  }
4214 
4215  void pop_front()
4216  {
4217  VMA_HEAVY_ASSERT(m_Count > 0);
4218  remove(0);
4219  }
4220 
4221  typedef T* iterator;
4222 
4223  iterator begin() { return m_pArray; }
4224  iterator end() { return m_pArray + m_Count; }
4225 
4226 private:
4227  AllocatorT m_Allocator;
4228  T* m_pArray;
4229  size_t m_Count;
4230  size_t m_Capacity;
4231 };
4232 
4233 template<typename T, typename allocatorT>
4234 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4235 {
4236  vec.insert(index, item);
4237 }
4238 
4239 template<typename T, typename allocatorT>
4240 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4241 {
4242  vec.remove(index);
4243 }
4244 
4245 #endif // #if VMA_USE_STL_VECTOR
4246 
4247 template<typename CmpLess, typename VectorT>
4248 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4249 {
4250  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4251  vector.data(),
4252  vector.data() + vector.size(),
4253  value,
4254  CmpLess()) - vector.data();
4255  VmaVectorInsert(vector, indexToInsert, value);
4256  return indexToInsert;
4257 }
4258 
4259 template<typename CmpLess, typename VectorT>
4260 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4261 {
4262  CmpLess comparator;
4263  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4264  vector.begin(),
4265  vector.end(),
4266  value,
4267  comparator);
4268  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4269  {
4270  size_t indexToRemove = it - vector.begin();
4271  VmaVectorRemove(vector, indexToRemove);
4272  return true;
4273  }
4274  return false;
4275 }
4276 
4278 // class VmaPoolAllocator
4279 
4280 /*
4281 Allocator for objects of type T using a list of arrays (pools) to speed up
4282 allocation. Number of elements that can be allocated is not bounded because
4283 allocator can create multiple blocks.
4284 */
4285 template<typename T>
4286 class VmaPoolAllocator
4287 {
4288  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4289 public:
4290  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4291  ~VmaPoolAllocator();
4292  void Clear();
4293  T* Alloc();
4294  void Free(T* ptr);
4295 
4296 private:
4297  union Item
4298  {
4299  uint32_t NextFreeIndex;
4300  T Value;
4301  };
4302 
4303  struct ItemBlock
4304  {
4305  Item* pItems;
4306  uint32_t Capacity;
4307  uint32_t FirstFreeIndex;
4308  };
4309 
4310  const VkAllocationCallbacks* m_pAllocationCallbacks;
4311  const uint32_t m_FirstBlockCapacity;
4312  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4313 
4314  ItemBlock& CreateNewBlock();
4315 };
4316 
4317 template<typename T>
4318 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
4319  m_pAllocationCallbacks(pAllocationCallbacks),
4320  m_FirstBlockCapacity(firstBlockCapacity),
4321  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4322 {
4323  VMA_ASSERT(m_FirstBlockCapacity > 1);
4324 }
4325 
4326 template<typename T>
4327 VmaPoolAllocator<T>::~VmaPoolAllocator()
4328 {
4329  Clear();
4330 }
4331 
4332 template<typename T>
4333 void VmaPoolAllocator<T>::Clear()
4334 {
4335  for(size_t i = m_ItemBlocks.size(); i--; )
4336  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4337  m_ItemBlocks.clear();
4338 }
4339 
4340 template<typename T>
4341 T* VmaPoolAllocator<T>::Alloc()
4342 {
4343  for(size_t i = m_ItemBlocks.size(); i--; )
4344  {
4345  ItemBlock& block = m_ItemBlocks[i];
4346  // This block has some free items: Use first one.
4347  if(block.FirstFreeIndex != UINT32_MAX)
4348  {
4349  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4350  block.FirstFreeIndex = pItem->NextFreeIndex;
4351  return &pItem->Value;
4352  }
4353  }
4354 
4355  // No block has free item: Create new one and use it.
4356  ItemBlock& newBlock = CreateNewBlock();
4357  Item* const pItem = &newBlock.pItems[0];
4358  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4359  return &pItem->Value;
4360 }
4361 
4362 template<typename T>
4363 void VmaPoolAllocator<T>::Free(T* ptr)
4364 {
4365  // Search all memory blocks to find ptr.
4366  for(size_t i = m_ItemBlocks.size(); i--; )
4367  {
4368  ItemBlock& block = m_ItemBlocks[i];
4369 
4370  // Casting to union.
4371  Item* pItemPtr;
4372  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4373 
4374  // Check if pItemPtr is in address range of this block.
4375  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4376  {
4377  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4378  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4379  block.FirstFreeIndex = index;
4380  return;
4381  }
4382  }
4383  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4384 }
4385 
4386 template<typename T>
4387 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4388 {
4389  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4390  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4391 
4392  const ItemBlock newBlock = {
4393  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4394  newBlockCapacity,
4395  0 };
4396 
4397  m_ItemBlocks.push_back(newBlock);
4398 
4399  // Setup singly-linked list of all free items in this block.
4400  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4401  newBlock.pItems[i].NextFreeIndex = i + 1;
4402  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4403  return m_ItemBlocks.back();
4404 }
4405 
4407 // class VmaRawList, VmaList
4408 
4409 #if VMA_USE_STL_LIST
4410 
4411 #define VmaList std::list
4412 
4413 #else // #if VMA_USE_STL_LIST
4414 
4415 template<typename T>
4416 struct VmaListItem
4417 {
4418  VmaListItem* pPrev;
4419  VmaListItem* pNext;
4420  T Value;
4421 };
4422 
4423 // Doubly linked list.
4424 template<typename T>
4425 class VmaRawList
4426 {
4427  VMA_CLASS_NO_COPY(VmaRawList)
4428 public:
4429  typedef VmaListItem<T> ItemType;
4430 
4431  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4432  ~VmaRawList();
4433  void Clear();
4434 
4435  size_t GetCount() const { return m_Count; }
4436  bool IsEmpty() const { return m_Count == 0; }
4437 
4438  ItemType* Front() { return m_pFront; }
4439  const ItemType* Front() const { return m_pFront; }
4440  ItemType* Back() { return m_pBack; }
4441  const ItemType* Back() const { return m_pBack; }
4442 
4443  ItemType* PushBack();
4444  ItemType* PushFront();
4445  ItemType* PushBack(const T& value);
4446  ItemType* PushFront(const T& value);
4447  void PopBack();
4448  void PopFront();
4449 
4450  // Item can be null - it means PushBack.
4451  ItemType* InsertBefore(ItemType* pItem);
4452  // Item can be null - it means PushFront.
4453  ItemType* InsertAfter(ItemType* pItem);
4454 
4455  ItemType* InsertBefore(ItemType* pItem, const T& value);
4456  ItemType* InsertAfter(ItemType* pItem, const T& value);
4457 
4458  void Remove(ItemType* pItem);
4459 
4460 private:
4461  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4462  VmaPoolAllocator<ItemType> m_ItemAllocator;
4463  ItemType* m_pFront;
4464  ItemType* m_pBack;
4465  size_t m_Count;
4466 };
4467 
4468 template<typename T>
4469 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4470  m_pAllocationCallbacks(pAllocationCallbacks),
4471  m_ItemAllocator(pAllocationCallbacks, 128),
4472  m_pFront(VMA_NULL),
4473  m_pBack(VMA_NULL),
4474  m_Count(0)
4475 {
4476 }
4477 
4478 template<typename T>
4479 VmaRawList<T>::~VmaRawList()
4480 {
4481  // Intentionally not calling Clear, because that would be unnecessary
4482  // computations to return all items to m_ItemAllocator as free.
4483 }
4484 
4485 template<typename T>
4486 void VmaRawList<T>::Clear()
4487 {
4488  if(IsEmpty() == false)
4489  {
4490  ItemType* pItem = m_pBack;
4491  while(pItem != VMA_NULL)
4492  {
4493  ItemType* const pPrevItem = pItem->pPrev;
4494  m_ItemAllocator.Free(pItem);
4495  pItem = pPrevItem;
4496  }
4497  m_pFront = VMA_NULL;
4498  m_pBack = VMA_NULL;
4499  m_Count = 0;
4500  }
4501 }
4502 
4503 template<typename T>
4504 VmaListItem<T>* VmaRawList<T>::PushBack()
4505 {
4506  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4507  pNewItem->pNext = VMA_NULL;
4508  if(IsEmpty())
4509  {
4510  pNewItem->pPrev = VMA_NULL;
4511  m_pFront = pNewItem;
4512  m_pBack = pNewItem;
4513  m_Count = 1;
4514  }
4515  else
4516  {
4517  pNewItem->pPrev = m_pBack;
4518  m_pBack->pNext = pNewItem;
4519  m_pBack = pNewItem;
4520  ++m_Count;
4521  }
4522  return pNewItem;
4523 }
4524 
4525 template<typename T>
4526 VmaListItem<T>* VmaRawList<T>::PushFront()
4527 {
4528  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4529  pNewItem->pPrev = VMA_NULL;
4530  if(IsEmpty())
4531  {
4532  pNewItem->pNext = VMA_NULL;
4533  m_pFront = pNewItem;
4534  m_pBack = pNewItem;
4535  m_Count = 1;
4536  }
4537  else
4538  {
4539  pNewItem->pNext = m_pFront;
4540  m_pFront->pPrev = pNewItem;
4541  m_pFront = pNewItem;
4542  ++m_Count;
4543  }
4544  return pNewItem;
4545 }
4546 
4547 template<typename T>
4548 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4549 {
4550  ItemType* const pNewItem = PushBack();
4551  pNewItem->Value = value;
4552  return pNewItem;
4553 }
4554 
4555 template<typename T>
4556 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4557 {
4558  ItemType* const pNewItem = PushFront();
4559  pNewItem->Value = value;
4560  return pNewItem;
4561 }
4562 
4563 template<typename T>
4564 void VmaRawList<T>::PopBack()
4565 {
4566  VMA_HEAVY_ASSERT(m_Count > 0);
4567  ItemType* const pBackItem = m_pBack;
4568  ItemType* const pPrevItem = pBackItem->pPrev;
4569  if(pPrevItem != VMA_NULL)
4570  {
4571  pPrevItem->pNext = VMA_NULL;
4572  }
4573  m_pBack = pPrevItem;
4574  m_ItemAllocator.Free(pBackItem);
4575  --m_Count;
4576 }
4577 
4578 template<typename T>
4579 void VmaRawList<T>::PopFront()
4580 {
4581  VMA_HEAVY_ASSERT(m_Count > 0);
4582  ItemType* const pFrontItem = m_pFront;
4583  ItemType* const pNextItem = pFrontItem->pNext;
4584  if(pNextItem != VMA_NULL)
4585  {
4586  pNextItem->pPrev = VMA_NULL;
4587  }
4588  m_pFront = pNextItem;
4589  m_ItemAllocator.Free(pFrontItem);
4590  --m_Count;
4591 }
4592 
4593 template<typename T>
4594 void VmaRawList<T>::Remove(ItemType* pItem)
4595 {
4596  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4597  VMA_HEAVY_ASSERT(m_Count > 0);
4598 
4599  if(pItem->pPrev != VMA_NULL)
4600  {
4601  pItem->pPrev->pNext = pItem->pNext;
4602  }
4603  else
4604  {
4605  VMA_HEAVY_ASSERT(m_pFront == pItem);
4606  m_pFront = pItem->pNext;
4607  }
4608 
4609  if(pItem->pNext != VMA_NULL)
4610  {
4611  pItem->pNext->pPrev = pItem->pPrev;
4612  }
4613  else
4614  {
4615  VMA_HEAVY_ASSERT(m_pBack == pItem);
4616  m_pBack = pItem->pPrev;
4617  }
4618 
4619  m_ItemAllocator.Free(pItem);
4620  --m_Count;
4621 }
4622 
4623 template<typename T>
4624 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4625 {
4626  if(pItem != VMA_NULL)
4627  {
4628  ItemType* const prevItem = pItem->pPrev;
4629  ItemType* const newItem = m_ItemAllocator.Alloc();
4630  newItem->pPrev = prevItem;
4631  newItem->pNext = pItem;
4632  pItem->pPrev = newItem;
4633  if(prevItem != VMA_NULL)
4634  {
4635  prevItem->pNext = newItem;
4636  }
4637  else
4638  {
4639  VMA_HEAVY_ASSERT(m_pFront == pItem);
4640  m_pFront = newItem;
4641  }
4642  ++m_Count;
4643  return newItem;
4644  }
4645  else
4646  return PushBack();
4647 }
4648 
4649 template<typename T>
4650 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4651 {
4652  if(pItem != VMA_NULL)
4653  {
4654  ItemType* const nextItem = pItem->pNext;
4655  ItemType* const newItem = m_ItemAllocator.Alloc();
4656  newItem->pNext = nextItem;
4657  newItem->pPrev = pItem;
4658  pItem->pNext = newItem;
4659  if(nextItem != VMA_NULL)
4660  {
4661  nextItem->pPrev = newItem;
4662  }
4663  else
4664  {
4665  VMA_HEAVY_ASSERT(m_pBack == pItem);
4666  m_pBack = newItem;
4667  }
4668  ++m_Count;
4669  return newItem;
4670  }
4671  else
4672  return PushFront();
4673 }
4674 
4675 template<typename T>
4676 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4677 {
4678  ItemType* const newItem = InsertBefore(pItem);
4679  newItem->Value = value;
4680  return newItem;
4681 }
4682 
4683 template<typename T>
4684 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4685 {
4686  ItemType* const newItem = InsertAfter(pItem);
4687  newItem->Value = value;
4688  return newItem;
4689 }
4690 
4691 template<typename T, typename AllocatorT>
4692 class VmaList
4693 {
4694  VMA_CLASS_NO_COPY(VmaList)
4695 public:
4696  class iterator
4697  {
4698  public:
4699  iterator() :
4700  m_pList(VMA_NULL),
4701  m_pItem(VMA_NULL)
4702  {
4703  }
4704 
4705  T& operator*() const
4706  {
4707  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4708  return m_pItem->Value;
4709  }
4710  T* operator->() const
4711  {
4712  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4713  return &m_pItem->Value;
4714  }
4715 
4716  iterator& operator++()
4717  {
4718  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4719  m_pItem = m_pItem->pNext;
4720  return *this;
4721  }
4722  iterator& operator--()
4723  {
4724  if(m_pItem != VMA_NULL)
4725  {
4726  m_pItem = m_pItem->pPrev;
4727  }
4728  else
4729  {
4730  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4731  m_pItem = m_pList->Back();
4732  }
4733  return *this;
4734  }
4735 
4736  iterator operator++(int)
4737  {
4738  iterator result = *this;
4739  ++*this;
4740  return result;
4741  }
4742  iterator operator--(int)
4743  {
4744  iterator result = *this;
4745  --*this;
4746  return result;
4747  }
4748 
4749  bool operator==(const iterator& rhs) const
4750  {
4751  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4752  return m_pItem == rhs.m_pItem;
4753  }
4754  bool operator!=(const iterator& rhs) const
4755  {
4756  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4757  return m_pItem != rhs.m_pItem;
4758  }
4759 
4760  private:
4761  VmaRawList<T>* m_pList;
4762  VmaListItem<T>* m_pItem;
4763 
4764  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4765  m_pList(pList),
4766  m_pItem(pItem)
4767  {
4768  }
4769 
4770  friend class VmaList<T, AllocatorT>;
4771  };
4772 
4773  class const_iterator
4774  {
4775  public:
4776  const_iterator() :
4777  m_pList(VMA_NULL),
4778  m_pItem(VMA_NULL)
4779  {
4780  }
4781 
4782  const_iterator(const iterator& src) :
4783  m_pList(src.m_pList),
4784  m_pItem(src.m_pItem)
4785  {
4786  }
4787 
4788  const T& operator*() const
4789  {
4790  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4791  return m_pItem->Value;
4792  }
4793  const T* operator->() const
4794  {
4795  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4796  return &m_pItem->Value;
4797  }
4798 
4799  const_iterator& operator++()
4800  {
4801  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4802  m_pItem = m_pItem->pNext;
4803  return *this;
4804  }
4805  const_iterator& operator--()
4806  {
4807  if(m_pItem != VMA_NULL)
4808  {
4809  m_pItem = m_pItem->pPrev;
4810  }
4811  else
4812  {
4813  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4814  m_pItem = m_pList->Back();
4815  }
4816  return *this;
4817  }
4818 
4819  const_iterator operator++(int)
4820  {
4821  const_iterator result = *this;
4822  ++*this;
4823  return result;
4824  }
4825  const_iterator operator--(int)
4826  {
4827  const_iterator result = *this;
4828  --*this;
4829  return result;
4830  }
4831 
4832  bool operator==(const const_iterator& rhs) const
4833  {
4834  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4835  return m_pItem == rhs.m_pItem;
4836  }
4837  bool operator!=(const const_iterator& rhs) const
4838  {
4839  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4840  return m_pItem != rhs.m_pItem;
4841  }
4842 
4843  private:
4844  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4845  m_pList(pList),
4846  m_pItem(pItem)
4847  {
4848  }
4849 
4850  const VmaRawList<T>* m_pList;
4851  const VmaListItem<T>* m_pItem;
4852 
4853  friend class VmaList<T, AllocatorT>;
4854  };
4855 
4856  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4857 
4858  bool empty() const { return m_RawList.IsEmpty(); }
4859  size_t size() const { return m_RawList.GetCount(); }
4860 
4861  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4862  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4863 
4864  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4865  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4866 
4867  void clear() { m_RawList.Clear(); }
4868  void push_back(const T& value) { m_RawList.PushBack(value); }
4869  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4870  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4871 
4872 private:
4873  VmaRawList<T> m_RawList;
4874 };
4875 
4876 #endif // #if VMA_USE_STL_LIST
4877 
4879 // class VmaMap
4880 
4881 // Unused in this version.
4882 #if 0
4883 
4884 #if VMA_USE_STL_UNORDERED_MAP
4885 
4886 #define VmaPair std::pair
4887 
4888 #define VMA_MAP_TYPE(KeyT, ValueT) \
4889  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4890 
4891 #else // #if VMA_USE_STL_UNORDERED_MAP
4892 
4893 template<typename T1, typename T2>
4894 struct VmaPair
4895 {
4896  T1 first;
4897  T2 second;
4898 
4899  VmaPair() : first(), second() { }
4900  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4901 };
4902 
4903 /* Class compatible with subset of interface of std::unordered_map.
4904 KeyT, ValueT must be POD because they will be stored in VmaVector.
4905 */
4906 template<typename KeyT, typename ValueT>
4907 class VmaMap
4908 {
4909 public:
4910  typedef VmaPair<KeyT, ValueT> PairType;
4911  typedef PairType* iterator;
4912 
4913  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4914 
4915  iterator begin() { return m_Vector.begin(); }
4916  iterator end() { return m_Vector.end(); }
4917 
4918  void insert(const PairType& pair);
4919  iterator find(const KeyT& key);
4920  void erase(iterator it);
4921 
4922 private:
4923  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4924 };
4925 
4926 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4927 
4928 template<typename FirstT, typename SecondT>
4929 struct VmaPairFirstLess
4930 {
4931  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4932  {
4933  return lhs.first < rhs.first;
4934  }
4935  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4936  {
4937  return lhs.first < rhsFirst;
4938  }
4939 };
4940 
4941 template<typename KeyT, typename ValueT>
4942 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4943 {
4944  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4945  m_Vector.data(),
4946  m_Vector.data() + m_Vector.size(),
4947  pair,
4948  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4949  VmaVectorInsert(m_Vector, indexToInsert, pair);
4950 }
4951 
4952 template<typename KeyT, typename ValueT>
4953 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4954 {
4955  PairType* it = VmaBinaryFindFirstNotLess(
4956  m_Vector.data(),
4957  m_Vector.data() + m_Vector.size(),
4958  key,
4959  VmaPairFirstLess<KeyT, ValueT>());
4960  if((it != m_Vector.end()) && (it->first == key))
4961  {
4962  return it;
4963  }
4964  else
4965  {
4966  return m_Vector.end();
4967  }
4968 }
4969 
4970 template<typename KeyT, typename ValueT>
4971 void VmaMap<KeyT, ValueT>::erase(iterator it)
4972 {
4973  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4974 }
4975 
4976 #endif // #if VMA_USE_STL_UNORDERED_MAP
4977 
4978 #endif // #if 0
4979 
4981 
4982 class VmaDeviceMemoryBlock;
4983 
4984 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4985 
4986 struct VmaAllocation_T
4987 {
4988 private:
4989  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4990 
4991  enum FLAGS
4992  {
4993  FLAG_USER_DATA_STRING = 0x01,
4994  };
4995 
4996 public:
4997  enum ALLOCATION_TYPE
4998  {
4999  ALLOCATION_TYPE_NONE,
5000  ALLOCATION_TYPE_BLOCK,
5001  ALLOCATION_TYPE_DEDICATED,
5002  };
5003 
5004  /*
5005  This struct cannot have constructor or destructor. It must be POD because it is
5006  allocated using VmaPoolAllocator.
5007  */
5008 
5009  void Ctor(uint32_t currentFrameIndex, bool userDataString)
5010  {
5011  m_Alignment = 1;
5012  m_Size = 0;
5013  m_pUserData = VMA_NULL;
5014  m_LastUseFrameIndex = currentFrameIndex;
5015  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
5016  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
5017  m_MapCount = 0;
5018  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
5019 
5020 #if VMA_STATS_STRING_ENABLED
5021  m_CreationFrameIndex = currentFrameIndex;
5022  m_BufferImageUsage = 0;
5023 #endif
5024  }
5025 
5026  void Dtor()
5027  {
5028  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
5029 
5030  // Check if owned string was freed.
5031  VMA_ASSERT(m_pUserData == VMA_NULL);
5032  }
5033 
5034  void InitBlockAllocation(
5035  VmaDeviceMemoryBlock* block,
5036  VkDeviceSize offset,
5037  VkDeviceSize alignment,
5038  VkDeviceSize size,
5039  VmaSuballocationType suballocationType,
5040  bool mapped,
5041  bool canBecomeLost)
5042  {
5043  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5044  VMA_ASSERT(block != VMA_NULL);
5045  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5046  m_Alignment = alignment;
5047  m_Size = size;
5048  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5049  m_SuballocationType = (uint8_t)suballocationType;
5050  m_BlockAllocation.m_Block = block;
5051  m_BlockAllocation.m_Offset = offset;
5052  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
5053  }
5054 
5055  void InitLost()
5056  {
5057  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5058  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
5059  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5060  m_BlockAllocation.m_Block = VMA_NULL;
5061  m_BlockAllocation.m_Offset = 0;
5062  m_BlockAllocation.m_CanBecomeLost = true;
5063  }
5064 
5065  void ChangeBlockAllocation(
5066  VmaAllocator hAllocator,
5067  VmaDeviceMemoryBlock* block,
5068  VkDeviceSize offset);
5069 
5070  void ChangeSize(VkDeviceSize newSize);
5071  void ChangeOffset(VkDeviceSize newOffset);
5072 
5073  // pMappedData not null means allocation is created with MAPPED flag.
5074  void InitDedicatedAllocation(
5075  uint32_t memoryTypeIndex,
5076  VkDeviceMemory hMemory,
5077  VmaSuballocationType suballocationType,
5078  void* pMappedData,
5079  VkDeviceSize size)
5080  {
5081  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5082  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
5083  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
5084  m_Alignment = 0;
5085  m_Size = size;
5086  m_SuballocationType = (uint8_t)suballocationType;
5087  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5088  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
5089  m_DedicatedAllocation.m_hMemory = hMemory;
5090  m_DedicatedAllocation.m_pMappedData = pMappedData;
5091  }
5092 
5093  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5094  VkDeviceSize GetAlignment() const { return m_Alignment; }
5095  VkDeviceSize GetSize() const { return m_Size; }
5096  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5097  void* GetUserData() const { return m_pUserData; }
5098  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5099  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5100 
5101  VmaDeviceMemoryBlock* GetBlock() const
5102  {
5103  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5104  return m_BlockAllocation.m_Block;
5105  }
5106  VkDeviceSize GetOffset() const;
5107  VkDeviceMemory GetMemory() const;
5108  uint32_t GetMemoryTypeIndex() const;
5109  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5110  void* GetMappedData() const;
5111  bool CanBecomeLost() const;
5112 
5113  uint32_t GetLastUseFrameIndex() const
5114  {
5115  return m_LastUseFrameIndex.load();
5116  }
5117  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5118  {
5119  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5120  }
5121  /*
5122  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5123  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5124  - Else, returns false.
5125 
5126  If hAllocation is already lost, assert - you should not call it then.
5127  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5128  */
5129  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5130 
5131  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5132  {
5133  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5134  outInfo.blockCount = 1;
5135  outInfo.allocationCount = 1;
5136  outInfo.unusedRangeCount = 0;
5137  outInfo.usedBytes = m_Size;
5138  outInfo.unusedBytes = 0;
5139  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5140  outInfo.unusedRangeSizeMin = UINT64_MAX;
5141  outInfo.unusedRangeSizeMax = 0;
5142  }
5143 
5144  void BlockAllocMap();
5145  void BlockAllocUnmap();
5146  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5147  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5148 
5149 #if VMA_STATS_STRING_ENABLED
5150  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5151  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5152 
5153  void InitBufferImageUsage(uint32_t bufferImageUsage)
5154  {
5155  VMA_ASSERT(m_BufferImageUsage == 0);
5156  m_BufferImageUsage = bufferImageUsage;
5157  }
5158 
5159  void PrintParameters(class VmaJsonWriter& json) const;
5160 #endif
5161 
5162 private:
5163  VkDeviceSize m_Alignment;
5164  VkDeviceSize m_Size;
5165  void* m_pUserData;
5166  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5167  uint8_t m_Type; // ALLOCATION_TYPE
5168  uint8_t m_SuballocationType; // VmaSuballocationType
5169  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5170  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5171  uint8_t m_MapCount;
5172  uint8_t m_Flags; // enum FLAGS
5173 
5174  // Allocation out of VmaDeviceMemoryBlock.
5175  struct BlockAllocation
5176  {
5177  VmaDeviceMemoryBlock* m_Block;
5178  VkDeviceSize m_Offset;
5179  bool m_CanBecomeLost;
5180  };
5181 
5182  // Allocation for an object that has its own private VkDeviceMemory.
5183  struct DedicatedAllocation
5184  {
5185  uint32_t m_MemoryTypeIndex;
5186  VkDeviceMemory m_hMemory;
5187  void* m_pMappedData; // Not null means memory is mapped.
5188  };
5189 
5190  union
5191  {
5192  // Allocation out of VmaDeviceMemoryBlock.
5193  BlockAllocation m_BlockAllocation;
5194  // Allocation for an object that has its own private VkDeviceMemory.
5195  DedicatedAllocation m_DedicatedAllocation;
5196  };
5197 
5198 #if VMA_STATS_STRING_ENABLED
5199  uint32_t m_CreationFrameIndex;
5200  uint32_t m_BufferImageUsage; // 0 if unknown.
5201 #endif
5202 
5203  void FreeUserDataString(VmaAllocator hAllocator);
5204 };
5205 
5206 /*
5207 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5208 allocated memory block or free.
5209 */
5210 struct VmaSuballocation
5211 {
5212  VkDeviceSize offset;
5213  VkDeviceSize size;
5214  VmaAllocation hAllocation;
5215  VmaSuballocationType type;
5216 };
5217 
5218 // Comparator for offsets.
5219 struct VmaSuballocationOffsetLess
5220 {
5221  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5222  {
5223  return lhs.offset < rhs.offset;
5224  }
5225 };
5226 struct VmaSuballocationOffsetGreater
5227 {
5228  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5229  {
5230  return lhs.offset > rhs.offset;
5231  }
5232 };
5233 
5234 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5235 
5236 // Cost of one additional allocation lost, as equivalent in bytes.
5237 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5238 
5239 enum class VmaAllocationRequestType
5240 {
5241  Normal,
5242  // Used by "Linear" algorithm.
5243  UpperAddress,
5244  EndOf1st,
5245  EndOf2nd,
5246 };
5247 
5248 /*
5249 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5250 
5251 If canMakeOtherLost was false:
5252 - item points to a FREE suballocation.
5253 - itemsToMakeLostCount is 0.
5254 
5255 If canMakeOtherLost was true:
5256 - item points to first of sequence of suballocations, which are either FREE,
5257  or point to VmaAllocations that can become lost.
5258 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5259  the requested allocation to succeed.
5260 */
5261 struct VmaAllocationRequest
5262 {
5263  VkDeviceSize offset;
5264  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5265  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5266  VmaSuballocationList::iterator item;
5267  size_t itemsToMakeLostCount;
5268  void* customData;
5269  VmaAllocationRequestType type;
5270 
5271  VkDeviceSize CalcCost() const
5272  {
5273  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5274  }
5275 };
5276 
5277 /*
5278 Data structure used for bookkeeping of allocations and unused ranges of memory
5279 in a single VkDeviceMemory block.
5280 */
5281 class VmaBlockMetadata
5282 {
5283 public:
5284  VmaBlockMetadata(VmaAllocator hAllocator);
5285  virtual ~VmaBlockMetadata() { }
5286  virtual void Init(VkDeviceSize size) { m_Size = size; }
5287 
5288  // Validates all data structures inside this object. If not valid, returns false.
5289  virtual bool Validate() const = 0;
5290  VkDeviceSize GetSize() const { return m_Size; }
5291  virtual size_t GetAllocationCount() const = 0;
5292  virtual VkDeviceSize GetSumFreeSize() const = 0;
5293  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5294  // Returns true if this block is empty - contains only single free suballocation.
5295  virtual bool IsEmpty() const = 0;
5296 
5297  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5298  // Shouldn't modify blockCount.
5299  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5300 
5301 #if VMA_STATS_STRING_ENABLED
5302  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5303 #endif
5304 
5305  // Tries to find a place for suballocation with given parameters inside this block.
5306  // If succeeded, fills pAllocationRequest and returns true.
5307  // If failed, returns false.
5308  virtual bool CreateAllocationRequest(
5309  uint32_t currentFrameIndex,
5310  uint32_t frameInUseCount,
5311  VkDeviceSize bufferImageGranularity,
5312  VkDeviceSize allocSize,
5313  VkDeviceSize allocAlignment,
5314  bool upperAddress,
5315  VmaSuballocationType allocType,
5316  bool canMakeOtherLost,
5317  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5318  uint32_t strategy,
5319  VmaAllocationRequest* pAllocationRequest) = 0;
5320 
5321  virtual bool MakeRequestedAllocationsLost(
5322  uint32_t currentFrameIndex,
5323  uint32_t frameInUseCount,
5324  VmaAllocationRequest* pAllocationRequest) = 0;
5325 
5326  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5327 
5328  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5329 
5330  // Makes actual allocation based on request. Request must already be checked and valid.
5331  virtual void Alloc(
5332  const VmaAllocationRequest& request,
5333  VmaSuballocationType type,
5334  VkDeviceSize allocSize,
5335  VmaAllocation hAllocation) = 0;
5336 
5337  // Frees suballocation assigned to given memory region.
5338  virtual void Free(const VmaAllocation allocation) = 0;
5339  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5340 
5341  // Tries to resize (grow or shrink) space for given allocation, in place.
5342  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
5343 
5344 protected:
5345  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5346 
5347 #if VMA_STATS_STRING_ENABLED
5348  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5349  VkDeviceSize unusedBytes,
5350  size_t allocationCount,
5351  size_t unusedRangeCount) const;
5352  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5353  VkDeviceSize offset,
5354  VmaAllocation hAllocation) const;
5355  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5356  VkDeviceSize offset,
5357  VkDeviceSize size) const;
5358  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5359 #endif
5360 
5361 private:
5362  VkDeviceSize m_Size;
5363  const VkAllocationCallbacks* m_pAllocationCallbacks;
5364 };
5365 
5366 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5367  VMA_ASSERT(0 && "Validation failed: " #cond); \
5368  return false; \
5369  } } while(false)
5370 
5371 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5372 {
5373  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5374 public:
5375  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5376  virtual ~VmaBlockMetadata_Generic();
5377  virtual void Init(VkDeviceSize size);
5378 
5379  virtual bool Validate() const;
5380  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5381  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5382  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5383  virtual bool IsEmpty() const;
5384 
5385  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5386  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5387 
5388 #if VMA_STATS_STRING_ENABLED
5389  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5390 #endif
5391 
5392  virtual bool CreateAllocationRequest(
5393  uint32_t currentFrameIndex,
5394  uint32_t frameInUseCount,
5395  VkDeviceSize bufferImageGranularity,
5396  VkDeviceSize allocSize,
5397  VkDeviceSize allocAlignment,
5398  bool upperAddress,
5399  VmaSuballocationType allocType,
5400  bool canMakeOtherLost,
5401  uint32_t strategy,
5402  VmaAllocationRequest* pAllocationRequest);
5403 
5404  virtual bool MakeRequestedAllocationsLost(
5405  uint32_t currentFrameIndex,
5406  uint32_t frameInUseCount,
5407  VmaAllocationRequest* pAllocationRequest);
5408 
5409  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5410 
5411  virtual VkResult CheckCorruption(const void* pBlockData);
5412 
5413  virtual void Alloc(
5414  const VmaAllocationRequest& request,
5415  VmaSuballocationType type,
5416  VkDeviceSize allocSize,
5417  VmaAllocation hAllocation);
5418 
5419  virtual void Free(const VmaAllocation allocation);
5420  virtual void FreeAtOffset(VkDeviceSize offset);
5421 
5422  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
5423 
5425  // For defragmentation
5426 
5427  bool IsBufferImageGranularityConflictPossible(
5428  VkDeviceSize bufferImageGranularity,
5429  VmaSuballocationType& inOutPrevSuballocType) const;
5430 
5431 private:
5432  friend class VmaDefragmentationAlgorithm_Generic;
5433  friend class VmaDefragmentationAlgorithm_Fast;
5434 
5435  uint32_t m_FreeCount;
5436  VkDeviceSize m_SumFreeSize;
5437  VmaSuballocationList m_Suballocations;
5438  // Suballocations that are free and have size greater than certain threshold.
5439  // Sorted by size, ascending.
5440  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5441 
5442  bool ValidateFreeSuballocationList() const;
5443 
5444  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5445  // If yes, fills pOffset and returns true. If no, returns false.
5446  bool CheckAllocation(
5447  uint32_t currentFrameIndex,
5448  uint32_t frameInUseCount,
5449  VkDeviceSize bufferImageGranularity,
5450  VkDeviceSize allocSize,
5451  VkDeviceSize allocAlignment,
5452  VmaSuballocationType allocType,
5453  VmaSuballocationList::const_iterator suballocItem,
5454  bool canMakeOtherLost,
5455  VkDeviceSize* pOffset,
5456  size_t* itemsToMakeLostCount,
5457  VkDeviceSize* pSumFreeSize,
5458  VkDeviceSize* pSumItemSize) const;
5459  // Given free suballocation, it merges it with following one, which must also be free.
5460  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5461  // Releases given suballocation, making it free.
5462  // Merges it with adjacent free suballocations if applicable.
5463  // Returns iterator to new free suballocation at this place.
5464  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5465  // Given free suballocation, it inserts it into sorted list of
5466  // m_FreeSuballocationsBySize if it's suitable.
5467  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5468  // Given free suballocation, it removes it from sorted list of
5469  // m_FreeSuballocationsBySize if it's suitable.
5470  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5471 };
5472 
5473 /*
5474 Allocations and their references in internal data structure look like this:
5475 
5476 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5477 
5478  0 +-------+
5479  | |
5480  | |
5481  | |
5482  +-------+
5483  | Alloc | 1st[m_1stNullItemsBeginCount]
5484  +-------+
5485  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5486  +-------+
5487  | ... |
5488  +-------+
5489  | Alloc | 1st[1st.size() - 1]
5490  +-------+
5491  | |
5492  | |
5493  | |
5494 GetSize() +-------+
5495 
5496 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5497 
5498  0 +-------+
5499  | Alloc | 2nd[0]
5500  +-------+
5501  | Alloc | 2nd[1]
5502  +-------+
5503  | ... |
5504  +-------+
5505  | Alloc | 2nd[2nd.size() - 1]
5506  +-------+
5507  | |
5508  | |
5509  | |
5510  +-------+
5511  | Alloc | 1st[m_1stNullItemsBeginCount]
5512  +-------+
5513  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5514  +-------+
5515  | ... |
5516  +-------+
5517  | Alloc | 1st[1st.size() - 1]
5518  +-------+
5519  | |
5520 GetSize() +-------+
5521 
5522 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5523 
5524  0 +-------+
5525  | |
5526  | |
5527  | |
5528  +-------+
5529  | Alloc | 1st[m_1stNullItemsBeginCount]
5530  +-------+
5531  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5532  +-------+
5533  | ... |
5534  +-------+
5535  | Alloc | 1st[1st.size() - 1]
5536  +-------+
5537  | |
5538  | |
5539  | |
5540  +-------+
5541  | Alloc | 2nd[2nd.size() - 1]
5542  +-------+
5543  | ... |
5544  +-------+
5545  | Alloc | 2nd[1]
5546  +-------+
5547  | Alloc | 2nd[0]
5548 GetSize() +-------+
5549 
5550 */
5551 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5552 {
5553  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5554 public:
5555  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5556  virtual ~VmaBlockMetadata_Linear();
5557  virtual void Init(VkDeviceSize size);
5558 
5559  virtual bool Validate() const;
5560  virtual size_t GetAllocationCount() const;
5561  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5562  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5563  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5564 
5565  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5566  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5567 
5568 #if VMA_STATS_STRING_ENABLED
5569  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5570 #endif
5571 
5572  virtual bool CreateAllocationRequest(
5573  uint32_t currentFrameIndex,
5574  uint32_t frameInUseCount,
5575  VkDeviceSize bufferImageGranularity,
5576  VkDeviceSize allocSize,
5577  VkDeviceSize allocAlignment,
5578  bool upperAddress,
5579  VmaSuballocationType allocType,
5580  bool canMakeOtherLost,
5581  uint32_t strategy,
5582  VmaAllocationRequest* pAllocationRequest);
5583 
5584  virtual bool MakeRequestedAllocationsLost(
5585  uint32_t currentFrameIndex,
5586  uint32_t frameInUseCount,
5587  VmaAllocationRequest* pAllocationRequest);
5588 
5589  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5590 
5591  virtual VkResult CheckCorruption(const void* pBlockData);
5592 
5593  virtual void Alloc(
5594  const VmaAllocationRequest& request,
5595  VmaSuballocationType type,
5596  VkDeviceSize allocSize,
5597  VmaAllocation hAllocation);
5598 
5599  virtual void Free(const VmaAllocation allocation);
5600  virtual void FreeAtOffset(VkDeviceSize offset);
5601 
5602 private:
5603  /*
5604  There are two suballocation vectors, used in ping-pong way.
5605  The one with index m_1stVectorIndex is called 1st.
5606  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5607  2nd can be non-empty only when 1st is not empty.
5608  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5609  */
5610  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5611 
5612  enum SECOND_VECTOR_MODE
5613  {
5614  SECOND_VECTOR_EMPTY,
5615  /*
5616  Suballocations in 2nd vector are created later than the ones in 1st, but they
5617  all have smaller offset.
5618  */
5619  SECOND_VECTOR_RING_BUFFER,
5620  /*
5621  Suballocations in 2nd vector are upper side of double stack.
5622  They all have offsets higher than those in 1st vector.
5623  Top of this stack means smaller offsets, but higher indices in this vector.
5624  */
5625  SECOND_VECTOR_DOUBLE_STACK,
5626  };
5627 
5628  VkDeviceSize m_SumFreeSize;
5629  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5630  uint32_t m_1stVectorIndex;
5631  SECOND_VECTOR_MODE m_2ndVectorMode;
5632 
5633  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5634  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5635  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5636  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5637 
5638  // Number of items in 1st vector with hAllocation = null at the beginning.
5639  size_t m_1stNullItemsBeginCount;
5640  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5641  size_t m_1stNullItemsMiddleCount;
5642  // Number of items in 2nd vector with hAllocation = null.
5643  size_t m_2ndNullItemsCount;
5644 
5645  bool ShouldCompact1st() const;
5646  void CleanupAfterFree();
5647 
5648  bool CreateAllocationRequest_LowerAddress(
5649  uint32_t currentFrameIndex,
5650  uint32_t frameInUseCount,
5651  VkDeviceSize bufferImageGranularity,
5652  VkDeviceSize allocSize,
5653  VkDeviceSize allocAlignment,
5654  VmaSuballocationType allocType,
5655  bool canMakeOtherLost,
5656  uint32_t strategy,
5657  VmaAllocationRequest* pAllocationRequest);
5658  bool CreateAllocationRequest_UpperAddress(
5659  uint32_t currentFrameIndex,
5660  uint32_t frameInUseCount,
5661  VkDeviceSize bufferImageGranularity,
5662  VkDeviceSize allocSize,
5663  VkDeviceSize allocAlignment,
5664  VmaSuballocationType allocType,
5665  bool canMakeOtherLost,
5666  uint32_t strategy,
5667  VmaAllocationRequest* pAllocationRequest);
5668 };
5669 
5670 /*
5671 - GetSize() is the original size of allocated memory block.
5672 - m_UsableSize is this size aligned down to a power of two.
5673  All allocations and calculations happen relative to m_UsableSize.
5674 - GetUnusableSize() is the difference between them.
5675  It is repoted as separate, unused range, not available for allocations.
5676 
5677 Node at level 0 has size = m_UsableSize.
5678 Each next level contains nodes with size 2 times smaller than current level.
5679 m_LevelCount is the maximum number of levels to use in the current object.
5680 */
5681 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5682 {
5683  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5684 public:
5685  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5686  virtual ~VmaBlockMetadata_Buddy();
5687  virtual void Init(VkDeviceSize size);
5688 
5689  virtual bool Validate() const;
5690  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5691  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5692  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5693  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5694 
5695  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5696  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5697 
5698 #if VMA_STATS_STRING_ENABLED
5699  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5700 #endif
5701 
5702  virtual bool CreateAllocationRequest(
5703  uint32_t currentFrameIndex,
5704  uint32_t frameInUseCount,
5705  VkDeviceSize bufferImageGranularity,
5706  VkDeviceSize allocSize,
5707  VkDeviceSize allocAlignment,
5708  bool upperAddress,
5709  VmaSuballocationType allocType,
5710  bool canMakeOtherLost,
5711  uint32_t strategy,
5712  VmaAllocationRequest* pAllocationRequest);
5713 
5714  virtual bool MakeRequestedAllocationsLost(
5715  uint32_t currentFrameIndex,
5716  uint32_t frameInUseCount,
5717  VmaAllocationRequest* pAllocationRequest);
5718 
5719  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5720 
5721  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5722 
5723  virtual void Alloc(
5724  const VmaAllocationRequest& request,
5725  VmaSuballocationType type,
5726  VkDeviceSize allocSize,
5727  VmaAllocation hAllocation);
5728 
5729  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5730  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5731 
5732 private:
5733  static const VkDeviceSize MIN_NODE_SIZE = 32;
5734  static const size_t MAX_LEVELS = 30;
5735 
5736  struct ValidationContext
5737  {
5738  size_t calculatedAllocationCount;
5739  size_t calculatedFreeCount;
5740  VkDeviceSize calculatedSumFreeSize;
5741 
5742  ValidationContext() :
5743  calculatedAllocationCount(0),
5744  calculatedFreeCount(0),
5745  calculatedSumFreeSize(0) { }
5746  };
5747 
5748  struct Node
5749  {
5750  VkDeviceSize offset;
5751  enum TYPE
5752  {
5753  TYPE_FREE,
5754  TYPE_ALLOCATION,
5755  TYPE_SPLIT,
5756  TYPE_COUNT
5757  } type;
5758  Node* parent;
5759  Node* buddy;
5760 
5761  union
5762  {
5763  struct
5764  {
5765  Node* prev;
5766  Node* next;
5767  } free;
5768  struct
5769  {
5770  VmaAllocation alloc;
5771  } allocation;
5772  struct
5773  {
5774  Node* leftChild;
5775  } split;
5776  };
5777  };
5778 
5779  // Size of the memory block aligned down to a power of two.
5780  VkDeviceSize m_UsableSize;
5781  uint32_t m_LevelCount;
5782 
5783  Node* m_Root;
5784  struct {
5785  Node* front;
5786  Node* back;
5787  } m_FreeList[MAX_LEVELS];
5788  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5789  size_t m_AllocationCount;
5790  // Number of nodes in the tree with type == TYPE_FREE.
5791  size_t m_FreeCount;
5792  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5793  VkDeviceSize m_SumFreeSize;
5794 
5795  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5796  void DeleteNode(Node* node);
5797  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5798  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5799  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5800  // Alloc passed just for validation. Can be null.
5801  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5802  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5803  // Adds node to the front of FreeList at given level.
5804  // node->type must be FREE.
5805  // node->free.prev, next can be undefined.
5806  void AddToFreeListFront(uint32_t level, Node* node);
5807  // Removes node from FreeList at given level.
5808  // node->type must be FREE.
5809  // node->free.prev, next stay untouched.
5810  void RemoveFromFreeList(uint32_t level, Node* node);
5811 
5812 #if VMA_STATS_STRING_ENABLED
5813  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5814 #endif
5815 };
5816 
5817 /*
5818 Represents a single block of device memory (`VkDeviceMemory`) with all the
5819 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5820 
5821 Thread-safety: This class must be externally synchronized.
5822 */
5823 class VmaDeviceMemoryBlock
5824 {
5825  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5826 public:
5827  VmaBlockMetadata* m_pMetadata;
5828 
5829  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5830 
5831  ~VmaDeviceMemoryBlock()
5832  {
5833  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5834  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5835  }
5836 
5837  // Always call after construction.
5838  void Init(
5839  VmaAllocator hAllocator,
5840  VmaPool hParentPool,
5841  uint32_t newMemoryTypeIndex,
5842  VkDeviceMemory newMemory,
5843  VkDeviceSize newSize,
5844  uint32_t id,
5845  uint32_t algorithm);
5846  // Always call before destruction.
5847  void Destroy(VmaAllocator allocator);
5848 
5849  VmaPool GetParentPool() const { return m_hParentPool; }
5850  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5851  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5852  uint32_t GetId() const { return m_Id; }
5853  void* GetMappedData() const { return m_pMappedData; }
5854 
5855  // Validates all data structures inside this object. If not valid, returns false.
5856  bool Validate() const;
5857 
5858  VkResult CheckCorruption(VmaAllocator hAllocator);
5859 
5860  // ppData can be null.
5861  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5862  void Unmap(VmaAllocator hAllocator, uint32_t count);
5863 
5864  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5865  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5866 
5867  VkResult BindBufferMemory(
5868  const VmaAllocator hAllocator,
5869  const VmaAllocation hAllocation,
5870  VkBuffer hBuffer);
5871  VkResult BindImageMemory(
5872  const VmaAllocator hAllocator,
5873  const VmaAllocation hAllocation,
5874  VkImage hImage);
5875 
5876 private:
5877  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
5878  uint32_t m_MemoryTypeIndex;
5879  uint32_t m_Id;
5880  VkDeviceMemory m_hMemory;
5881 
5882  /*
5883  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5884  Also protects m_MapCount, m_pMappedData.
5885  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5886  */
5887  VMA_MUTEX m_Mutex;
5888  uint32_t m_MapCount;
5889  void* m_pMappedData;
5890 };
5891 
5892 struct VmaPointerLess
5893 {
5894  bool operator()(const void* lhs, const void* rhs) const
5895  {
5896  return lhs < rhs;
5897  }
5898 };
5899 
5900 struct VmaDefragmentationMove
5901 {
5902  size_t srcBlockIndex;
5903  size_t dstBlockIndex;
5904  VkDeviceSize srcOffset;
5905  VkDeviceSize dstOffset;
5906  VkDeviceSize size;
5907 };
5908 
5909 class VmaDefragmentationAlgorithm;
5910 
5911 /*
5912 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5913 Vulkan memory type.
5914 
5915 Synchronized internally with a mutex.
5916 */
5917 struct VmaBlockVector
5918 {
5919  VMA_CLASS_NO_COPY(VmaBlockVector)
5920 public:
5921  VmaBlockVector(
5922  VmaAllocator hAllocator,
5923  VmaPool hParentPool,
5924  uint32_t memoryTypeIndex,
5925  VkDeviceSize preferredBlockSize,
5926  size_t minBlockCount,
5927  size_t maxBlockCount,
5928  VkDeviceSize bufferImageGranularity,
5929  uint32_t frameInUseCount,
5930  bool isCustomPool,
5931  bool explicitBlockSize,
5932  uint32_t algorithm);
5933  ~VmaBlockVector();
5934 
5935  VkResult CreateMinBlocks();
5936 
5937  VmaPool GetParentPool() const { return m_hParentPool; }
5938  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5939  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5940  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5941  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5942  uint32_t GetAlgorithm() const { return m_Algorithm; }
5943 
5944  void GetPoolStats(VmaPoolStats* pStats);
5945 
5946  bool IsEmpty() const { return m_Blocks.empty(); }
5947  bool IsCorruptionDetectionEnabled() const;
5948 
5949  VkResult Allocate(
5950  uint32_t currentFrameIndex,
5951  VkDeviceSize size,
5952  VkDeviceSize alignment,
5953  const VmaAllocationCreateInfo& createInfo,
5954  VmaSuballocationType suballocType,
5955  size_t allocationCount,
5956  VmaAllocation* pAllocations);
5957 
5958  void Free(
5959  VmaAllocation hAllocation);
5960 
5961  // Adds statistics of this BlockVector to pStats.
5962  void AddStats(VmaStats* pStats);
5963 
5964 #if VMA_STATS_STRING_ENABLED
5965  void PrintDetailedMap(class VmaJsonWriter& json);
5966 #endif
5967 
5968  void MakePoolAllocationsLost(
5969  uint32_t currentFrameIndex,
5970  size_t* pLostAllocationCount);
5971  VkResult CheckCorruption();
5972 
5973  // Saves results in pCtx->res.
5974  void Defragment(
5975  class VmaBlockVectorDefragmentationContext* pCtx,
5976  VmaDefragmentationStats* pStats,
5977  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
5978  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
5979  VkCommandBuffer commandBuffer);
5980  void DefragmentationEnd(
5981  class VmaBlockVectorDefragmentationContext* pCtx,
5982  VmaDefragmentationStats* pStats);
5983 
5985  // To be used only while the m_Mutex is locked. Used during defragmentation.
5986 
5987  size_t GetBlockCount() const { return m_Blocks.size(); }
5988  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
5989  size_t CalcAllocationCount() const;
5990  bool IsBufferImageGranularityConflictPossible() const;
5991 
5992 private:
5993  friend class VmaDefragmentationAlgorithm_Generic;
5994 
5995  const VmaAllocator m_hAllocator;
5996  const VmaPool m_hParentPool;
5997  const uint32_t m_MemoryTypeIndex;
5998  const VkDeviceSize m_PreferredBlockSize;
5999  const size_t m_MinBlockCount;
6000  const size_t m_MaxBlockCount;
6001  const VkDeviceSize m_BufferImageGranularity;
6002  const uint32_t m_FrameInUseCount;
6003  const bool m_IsCustomPool;
6004  const bool m_ExplicitBlockSize;
6005  const uint32_t m_Algorithm;
6006  /* There can be at most one allocation that is completely empty - a
6007  hysteresis to avoid pessimistic case of alternating creation and destruction
6008  of a VkDeviceMemory. */
6009  bool m_HasEmptyBlock;
6010  VMA_RW_MUTEX m_Mutex;
6011  // Incrementally sorted by sumFreeSize, ascending.
6012  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
6013  uint32_t m_NextBlockId;
6014 
6015  VkDeviceSize CalcMaxBlockSize() const;
6016 
6017  // Finds and removes given block from vector.
6018  void Remove(VmaDeviceMemoryBlock* pBlock);
6019 
6020  // Performs single step in sorting m_Blocks. They may not be fully sorted
6021  // after this call.
6022  void IncrementallySortBlocks();
6023 
6024  VkResult AllocatePage(
6025  uint32_t currentFrameIndex,
6026  VkDeviceSize size,
6027  VkDeviceSize alignment,
6028  const VmaAllocationCreateInfo& createInfo,
6029  VmaSuballocationType suballocType,
6030  VmaAllocation* pAllocation);
6031 
6032  // To be used only without CAN_MAKE_OTHER_LOST flag.
6033  VkResult AllocateFromBlock(
6034  VmaDeviceMemoryBlock* pBlock,
6035  uint32_t currentFrameIndex,
6036  VkDeviceSize size,
6037  VkDeviceSize alignment,
6038  VmaAllocationCreateFlags allocFlags,
6039  void* pUserData,
6040  VmaSuballocationType suballocType,
6041  uint32_t strategy,
6042  VmaAllocation* pAllocation);
6043 
6044  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
6045 
6046  // Saves result to pCtx->res.
6047  void ApplyDefragmentationMovesCpu(
6048  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6049  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
6050  // Saves result to pCtx->res.
6051  void ApplyDefragmentationMovesGpu(
6052  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6053  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6054  VkCommandBuffer commandBuffer);
6055 
6056  /*
6057  Used during defragmentation. pDefragmentationStats is optional. It's in/out
6058  - updated with new data.
6059  */
6060  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
6061 };
6062 
6063 struct VmaPool_T
6064 {
6065  VMA_CLASS_NO_COPY(VmaPool_T)
6066 public:
6067  VmaBlockVector m_BlockVector;
6068 
6069  VmaPool_T(
6070  VmaAllocator hAllocator,
6071  const VmaPoolCreateInfo& createInfo,
6072  VkDeviceSize preferredBlockSize);
6073  ~VmaPool_T();
6074 
6075  uint32_t GetId() const { return m_Id; }
6076  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
6077 
6078 #if VMA_STATS_STRING_ENABLED
6079  //void PrintDetailedMap(class VmaStringBuilder& sb);
6080 #endif
6081 
6082 private:
6083  uint32_t m_Id;
6084 };
6085 
6086 /*
6087 Performs defragmentation:
6088 
6089 - Updates `pBlockVector->m_pMetadata`.
6090 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
6091 - Does not move actual data, only returns requested moves as `moves`.
6092 */
6093 class VmaDefragmentationAlgorithm
6094 {
6095  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
6096 public:
6097  VmaDefragmentationAlgorithm(
6098  VmaAllocator hAllocator,
6099  VmaBlockVector* pBlockVector,
6100  uint32_t currentFrameIndex) :
6101  m_hAllocator(hAllocator),
6102  m_pBlockVector(pBlockVector),
6103  m_CurrentFrameIndex(currentFrameIndex)
6104  {
6105  }
6106  virtual ~VmaDefragmentationAlgorithm()
6107  {
6108  }
6109 
6110  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
6111  virtual void AddAll() = 0;
6112 
6113  virtual VkResult Defragment(
6114  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6115  VkDeviceSize maxBytesToMove,
6116  uint32_t maxAllocationsToMove) = 0;
6117 
6118  virtual VkDeviceSize GetBytesMoved() const = 0;
6119  virtual uint32_t GetAllocationsMoved() const = 0;
6120 
6121 protected:
6122  VmaAllocator const m_hAllocator;
6123  VmaBlockVector* const m_pBlockVector;
6124  const uint32_t m_CurrentFrameIndex;
6125 
6126  struct AllocationInfo
6127  {
6128  VmaAllocation m_hAllocation;
6129  VkBool32* m_pChanged;
6130 
6131  AllocationInfo() :
6132  m_hAllocation(VK_NULL_HANDLE),
6133  m_pChanged(VMA_NULL)
6134  {
6135  }
6136  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6137  m_hAllocation(hAlloc),
6138  m_pChanged(pChanged)
6139  {
6140  }
6141  };
6142 };
6143 
6144 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6145 {
6146  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6147 public:
6148  VmaDefragmentationAlgorithm_Generic(
6149  VmaAllocator hAllocator,
6150  VmaBlockVector* pBlockVector,
6151  uint32_t currentFrameIndex,
6152  bool overlappingMoveSupported);
6153  virtual ~VmaDefragmentationAlgorithm_Generic();
6154 
6155  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6156  virtual void AddAll() { m_AllAllocations = true; }
6157 
6158  virtual VkResult Defragment(
6159  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6160  VkDeviceSize maxBytesToMove,
6161  uint32_t maxAllocationsToMove);
6162 
6163  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6164  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6165 
6166 private:
6167  uint32_t m_AllocationCount;
6168  bool m_AllAllocations;
6169 
6170  VkDeviceSize m_BytesMoved;
6171  uint32_t m_AllocationsMoved;
6172 
6173  struct AllocationInfoSizeGreater
6174  {
6175  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6176  {
6177  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6178  }
6179  };
6180 
6181  struct AllocationInfoOffsetGreater
6182  {
6183  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6184  {
6185  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6186  }
6187  };
6188 
6189  struct BlockInfo
6190  {
6191  size_t m_OriginalBlockIndex;
6192  VmaDeviceMemoryBlock* m_pBlock;
6193  bool m_HasNonMovableAllocations;
6194  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6195 
6196  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6197  m_OriginalBlockIndex(SIZE_MAX),
6198  m_pBlock(VMA_NULL),
6199  m_HasNonMovableAllocations(true),
6200  m_Allocations(pAllocationCallbacks)
6201  {
6202  }
6203 
6204  void CalcHasNonMovableAllocations()
6205  {
6206  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6207  const size_t defragmentAllocCount = m_Allocations.size();
6208  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6209  }
6210 
6211  void SortAllocationsBySizeDescending()
6212  {
6213  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6214  }
6215 
6216  void SortAllocationsByOffsetDescending()
6217  {
6218  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6219  }
6220  };
6221 
6222  struct BlockPointerLess
6223  {
6224  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6225  {
6226  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6227  }
6228  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6229  {
6230  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6231  }
6232  };
6233 
6234  // 1. Blocks with some non-movable allocations go first.
6235  // 2. Blocks with smaller sumFreeSize go first.
6236  struct BlockInfoCompareMoveDestination
6237  {
6238  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6239  {
6240  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6241  {
6242  return true;
6243  }
6244  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6245  {
6246  return false;
6247  }
6248  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6249  {
6250  return true;
6251  }
6252  return false;
6253  }
6254  };
6255 
6256  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6257  BlockInfoVector m_Blocks;
6258 
6259  VkResult DefragmentRound(
6260  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6261  VkDeviceSize maxBytesToMove,
6262  uint32_t maxAllocationsToMove);
6263 
6264  size_t CalcBlocksWithNonMovableCount() const;
6265 
6266  static bool MoveMakesSense(
6267  size_t dstBlockIndex, VkDeviceSize dstOffset,
6268  size_t srcBlockIndex, VkDeviceSize srcOffset);
6269 };
6270 
6271 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6272 {
6273  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6274 public:
6275  VmaDefragmentationAlgorithm_Fast(
6276  VmaAllocator hAllocator,
6277  VmaBlockVector* pBlockVector,
6278  uint32_t currentFrameIndex,
6279  bool overlappingMoveSupported);
6280  virtual ~VmaDefragmentationAlgorithm_Fast();
6281 
6282  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6283  virtual void AddAll() { m_AllAllocations = true; }
6284 
6285  virtual VkResult Defragment(
6286  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6287  VkDeviceSize maxBytesToMove,
6288  uint32_t maxAllocationsToMove);
6289 
6290  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6291  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6292 
6293 private:
6294  struct BlockInfo
6295  {
6296  size_t origBlockIndex;
6297  };
6298 
6299  class FreeSpaceDatabase
6300  {
6301  public:
6302  FreeSpaceDatabase()
6303  {
6304  FreeSpace s = {};
6305  s.blockInfoIndex = SIZE_MAX;
6306  for(size_t i = 0; i < MAX_COUNT; ++i)
6307  {
6308  m_FreeSpaces[i] = s;
6309  }
6310  }
6311 
6312  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6313  {
6314  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6315  {
6316  return;
6317  }
6318 
6319  // Find first invalid or the smallest structure.
6320  size_t bestIndex = SIZE_MAX;
6321  for(size_t i = 0; i < MAX_COUNT; ++i)
6322  {
6323  // Empty structure.
6324  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6325  {
6326  bestIndex = i;
6327  break;
6328  }
6329  if(m_FreeSpaces[i].size < size &&
6330  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6331  {
6332  bestIndex = i;
6333  }
6334  }
6335 
6336  if(bestIndex != SIZE_MAX)
6337  {
6338  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6339  m_FreeSpaces[bestIndex].offset = offset;
6340  m_FreeSpaces[bestIndex].size = size;
6341  }
6342  }
6343 
6344  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6345  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6346  {
6347  size_t bestIndex = SIZE_MAX;
6348  VkDeviceSize bestFreeSpaceAfter = 0;
6349  for(size_t i = 0; i < MAX_COUNT; ++i)
6350  {
6351  // Structure is valid.
6352  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6353  {
6354  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6355  // Allocation fits into this structure.
6356  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6357  {
6358  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6359  (dstOffset + size);
6360  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6361  {
6362  bestIndex = i;
6363  bestFreeSpaceAfter = freeSpaceAfter;
6364  }
6365  }
6366  }
6367  }
6368 
6369  if(bestIndex != SIZE_MAX)
6370  {
6371  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6372  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6373 
6374  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6375  {
6376  // Leave this structure for remaining empty space.
6377  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6378  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6379  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6380  }
6381  else
6382  {
6383  // This structure becomes invalid.
6384  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6385  }
6386 
6387  return true;
6388  }
6389 
6390  return false;
6391  }
6392 
6393  private:
6394  static const size_t MAX_COUNT = 4;
6395 
6396  struct FreeSpace
6397  {
6398  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6399  VkDeviceSize offset;
6400  VkDeviceSize size;
6401  } m_FreeSpaces[MAX_COUNT];
6402  };
6403 
6404  const bool m_OverlappingMoveSupported;
6405 
6406  uint32_t m_AllocationCount;
6407  bool m_AllAllocations;
6408 
6409  VkDeviceSize m_BytesMoved;
6410  uint32_t m_AllocationsMoved;
6411 
6412  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6413 
6414  void PreprocessMetadata();
6415  void PostprocessMetadata();
6416  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6417 };
6418 
6419 struct VmaBlockDefragmentationContext
6420 {
6421  enum BLOCK_FLAG
6422  {
6423  BLOCK_FLAG_USED = 0x00000001,
6424  };
6425  uint32_t flags;
6426  VkBuffer hBuffer;
6427 };
6428 
6429 class VmaBlockVectorDefragmentationContext
6430 {
6431  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6432 public:
6433  VkResult res;
6434  bool mutexLocked;
6435  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6436 
6437  VmaBlockVectorDefragmentationContext(
6438  VmaAllocator hAllocator,
6439  VmaPool hCustomPool, // Optional.
6440  VmaBlockVector* pBlockVector,
6441  uint32_t currFrameIndex);
6442  ~VmaBlockVectorDefragmentationContext();
6443 
6444  VmaPool GetCustomPool() const { return m_hCustomPool; }
6445  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6446  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6447 
6448  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6449  void AddAll() { m_AllAllocations = true; }
6450 
6451  void Begin(bool overlappingMoveSupported);
6452 
6453 private:
6454  const VmaAllocator m_hAllocator;
6455  // Null if not from custom pool.
6456  const VmaPool m_hCustomPool;
6457  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6458  VmaBlockVector* const m_pBlockVector;
6459  const uint32_t m_CurrFrameIndex;
6460  // Owner of this object.
6461  VmaDefragmentationAlgorithm* m_pAlgorithm;
6462 
6463  struct AllocInfo
6464  {
6465  VmaAllocation hAlloc;
6466  VkBool32* pChanged;
6467  };
6468  // Used between constructor and Begin.
6469  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6470  bool m_AllAllocations;
6471 };
6472 
6473 struct VmaDefragmentationContext_T
6474 {
6475 private:
6476  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6477 public:
6478  VmaDefragmentationContext_T(
6479  VmaAllocator hAllocator,
6480  uint32_t currFrameIndex,
6481  uint32_t flags,
6482  VmaDefragmentationStats* pStats);
6483  ~VmaDefragmentationContext_T();
6484 
6485  void AddPools(uint32_t poolCount, VmaPool* pPools);
6486  void AddAllocations(
6487  uint32_t allocationCount,
6488  VmaAllocation* pAllocations,
6489  VkBool32* pAllocationsChanged);
6490 
6491  /*
6492  Returns:
6493  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6494  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6495  - Negative value if error occured and object can be destroyed immediately.
6496  */
6497  VkResult Defragment(
6498  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6499  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6500  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6501 
6502 private:
6503  const VmaAllocator m_hAllocator;
6504  const uint32_t m_CurrFrameIndex;
6505  const uint32_t m_Flags;
6506  VmaDefragmentationStats* const m_pStats;
6507  // Owner of these objects.
6508  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6509  // Owner of these objects.
6510  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6511 };
6512 
6513 #if VMA_RECORDING_ENABLED
6514 
6515 class VmaRecorder
6516 {
6517 public:
6518  VmaRecorder();
6519  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6520  void WriteConfiguration(
6521  const VkPhysicalDeviceProperties& devProps,
6522  const VkPhysicalDeviceMemoryProperties& memProps,
6523  bool dedicatedAllocationExtensionEnabled);
6524  ~VmaRecorder();
6525 
6526  void RecordCreateAllocator(uint32_t frameIndex);
6527  void RecordDestroyAllocator(uint32_t frameIndex);
6528  void RecordCreatePool(uint32_t frameIndex,
6529  const VmaPoolCreateInfo& createInfo,
6530  VmaPool pool);
6531  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6532  void RecordAllocateMemory(uint32_t frameIndex,
6533  const VkMemoryRequirements& vkMemReq,
6534  const VmaAllocationCreateInfo& createInfo,
6535  VmaAllocation allocation);
6536  void RecordAllocateMemoryPages(uint32_t frameIndex,
6537  const VkMemoryRequirements& vkMemReq,
6538  const VmaAllocationCreateInfo& createInfo,
6539  uint64_t allocationCount,
6540  const VmaAllocation* pAllocations);
6541  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6542  const VkMemoryRequirements& vkMemReq,
6543  bool requiresDedicatedAllocation,
6544  bool prefersDedicatedAllocation,
6545  const VmaAllocationCreateInfo& createInfo,
6546  VmaAllocation allocation);
6547  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6548  const VkMemoryRequirements& vkMemReq,
6549  bool requiresDedicatedAllocation,
6550  bool prefersDedicatedAllocation,
6551  const VmaAllocationCreateInfo& createInfo,
6552  VmaAllocation allocation);
6553  void RecordFreeMemory(uint32_t frameIndex,
6554  VmaAllocation allocation);
6555  void RecordFreeMemoryPages(uint32_t frameIndex,
6556  uint64_t allocationCount,
6557  const VmaAllocation* pAllocations);
6558  void RecordResizeAllocation(
6559  uint32_t frameIndex,
6560  VmaAllocation allocation,
6561  VkDeviceSize newSize);
6562  void RecordSetAllocationUserData(uint32_t frameIndex,
6563  VmaAllocation allocation,
6564  const void* pUserData);
6565  void RecordCreateLostAllocation(uint32_t frameIndex,
6566  VmaAllocation allocation);
6567  void RecordMapMemory(uint32_t frameIndex,
6568  VmaAllocation allocation);
6569  void RecordUnmapMemory(uint32_t frameIndex,
6570  VmaAllocation allocation);
6571  void RecordFlushAllocation(uint32_t frameIndex,
6572  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6573  void RecordInvalidateAllocation(uint32_t frameIndex,
6574  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6575  void RecordCreateBuffer(uint32_t frameIndex,
6576  const VkBufferCreateInfo& bufCreateInfo,
6577  const VmaAllocationCreateInfo& allocCreateInfo,
6578  VmaAllocation allocation);
6579  void RecordCreateImage(uint32_t frameIndex,
6580  const VkImageCreateInfo& imageCreateInfo,
6581  const VmaAllocationCreateInfo& allocCreateInfo,
6582  VmaAllocation allocation);
6583  void RecordDestroyBuffer(uint32_t frameIndex,
6584  VmaAllocation allocation);
6585  void RecordDestroyImage(uint32_t frameIndex,
6586  VmaAllocation allocation);
6587  void RecordTouchAllocation(uint32_t frameIndex,
6588  VmaAllocation allocation);
6589  void RecordGetAllocationInfo(uint32_t frameIndex,
6590  VmaAllocation allocation);
6591  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6592  VmaPool pool);
6593  void RecordDefragmentationBegin(uint32_t frameIndex,
6594  const VmaDefragmentationInfo2& info,
6596  void RecordDefragmentationEnd(uint32_t frameIndex,
6598 
6599 private:
6600  struct CallParams
6601  {
6602  uint32_t threadId;
6603  double time;
6604  };
6605 
6606  class UserDataString
6607  {
6608  public:
6609  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6610  const char* GetString() const { return m_Str; }
6611 
6612  private:
6613  char m_PtrStr[17];
6614  const char* m_Str;
6615  };
6616 
6617  bool m_UseMutex;
6618  VmaRecordFlags m_Flags;
6619  FILE* m_File;
6620  VMA_MUTEX m_FileMutex;
6621  int64_t m_Freq;
6622  int64_t m_StartCounter;
6623 
6624  void GetBasicParams(CallParams& outParams);
6625 
6626  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6627  template<typename T>
6628  void PrintPointerList(uint64_t count, const T* pItems)
6629  {
6630  if(count)
6631  {
6632  fprintf(m_File, "%p", pItems[0]);
6633  for(uint64_t i = 1; i < count; ++i)
6634  {
6635  fprintf(m_File, " %p", pItems[i]);
6636  }
6637  }
6638  }
6639 
6640  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6641  void Flush();
6642 };
6643 
6644 #endif // #if VMA_RECORDING_ENABLED
6645 
6646 /*
6647 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
6648 */
6649 class VmaAllocationObjectAllocator
6650 {
6651  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
6652 public:
6653  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
6654 
6655  VmaAllocation Allocate();
6656  void Free(VmaAllocation hAlloc);
6657 
6658 private:
6659  VMA_MUTEX m_Mutex;
6660  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
6661 };
6662 
6663 // Main allocator object.
6664 struct VmaAllocator_T
6665 {
6666  VMA_CLASS_NO_COPY(VmaAllocator_T)
6667 public:
6668  bool m_UseMutex;
6669  bool m_UseKhrDedicatedAllocation;
6670  VkDevice m_hDevice;
6671  bool m_AllocationCallbacksSpecified;
6672  VkAllocationCallbacks m_AllocationCallbacks;
6673  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6674  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
6675 
6676  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
6677  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6678  VMA_MUTEX m_HeapSizeLimitMutex;
6679 
6680  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6681  VkPhysicalDeviceMemoryProperties m_MemProps;
6682 
6683  // Default pools.
6684  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6685 
6686  // Each vector is sorted by memory (handle value).
6687  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6688  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6689  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6690 
6691  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6692  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6693  ~VmaAllocator_T();
6694 
6695  const VkAllocationCallbacks* GetAllocationCallbacks() const
6696  {
6697  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6698  }
6699  const VmaVulkanFunctions& GetVulkanFunctions() const
6700  {
6701  return m_VulkanFunctions;
6702  }
6703 
6704  VkDeviceSize GetBufferImageGranularity() const
6705  {
6706  return VMA_MAX(
6707  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6708  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6709  }
6710 
6711  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
6712  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6713 
6714  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6715  {
6716  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6717  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6718  }
6719  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
6720  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6721  {
6722  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6723  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6724  }
6725  // Minimum alignment for all allocations in specific memory type.
6726  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6727  {
6728  return IsMemoryTypeNonCoherent(memTypeIndex) ?
6729  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6730  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6731  }
6732 
6733  bool IsIntegratedGpu() const
6734  {
6735  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6736  }
6737 
6738 #if VMA_RECORDING_ENABLED
6739  VmaRecorder* GetRecorder() const { return m_pRecorder; }
6740 #endif
6741 
6742  void GetBufferMemoryRequirements(
6743  VkBuffer hBuffer,
6744  VkMemoryRequirements& memReq,
6745  bool& requiresDedicatedAllocation,
6746  bool& prefersDedicatedAllocation) const;
6747  void GetImageMemoryRequirements(
6748  VkImage hImage,
6749  VkMemoryRequirements& memReq,
6750  bool& requiresDedicatedAllocation,
6751  bool& prefersDedicatedAllocation) const;
6752 
6753  // Main allocation function.
6754  VkResult AllocateMemory(
6755  const VkMemoryRequirements& vkMemReq,
6756  bool requiresDedicatedAllocation,
6757  bool prefersDedicatedAllocation,
6758  VkBuffer dedicatedBuffer,
6759  VkImage dedicatedImage,
6760  const VmaAllocationCreateInfo& createInfo,
6761  VmaSuballocationType suballocType,
6762  size_t allocationCount,
6763  VmaAllocation* pAllocations);
6764 
6765  // Main deallocation function.
6766  void FreeMemory(
6767  size_t allocationCount,
6768  const VmaAllocation* pAllocations);
6769 
6770  VkResult ResizeAllocation(
6771  const VmaAllocation alloc,
6772  VkDeviceSize newSize);
6773 
6774  void CalculateStats(VmaStats* pStats);
6775 
6776 #if VMA_STATS_STRING_ENABLED
6777  void PrintDetailedMap(class VmaJsonWriter& json);
6778 #endif
6779 
6780  VkResult DefragmentationBegin(
6781  const VmaDefragmentationInfo2& info,
6782  VmaDefragmentationStats* pStats,
6783  VmaDefragmentationContext* pContext);
6784  VkResult DefragmentationEnd(
6785  VmaDefragmentationContext context);
6786 
6787  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
6788  bool TouchAllocation(VmaAllocation hAllocation);
6789 
6790  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
6791  void DestroyPool(VmaPool pool);
6792  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
6793 
6794  void SetCurrentFrameIndex(uint32_t frameIndex);
6795  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6796 
6797  void MakePoolAllocationsLost(
6798  VmaPool hPool,
6799  size_t* pLostAllocationCount);
6800  VkResult CheckPoolCorruption(VmaPool hPool);
6801  VkResult CheckCorruption(uint32_t memoryTypeBits);
6802 
6803  void CreateLostAllocation(VmaAllocation* pAllocation);
6804 
6805  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6806  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6807 
6808  VkResult Map(VmaAllocation hAllocation, void** ppData);
6809  void Unmap(VmaAllocation hAllocation);
6810 
6811  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
6812  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
6813 
6814  void FlushOrInvalidateAllocation(
6815  VmaAllocation hAllocation,
6816  VkDeviceSize offset, VkDeviceSize size,
6817  VMA_CACHE_OPERATION op);
6818 
6819  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
6820 
6821  /*
6822  Returns bit mask of memory types that can support defragmentation on GPU as
6823  they support creation of required buffer for copy operations.
6824  */
6825  uint32_t GetGpuDefragmentationMemoryTypeBits();
6826 
6827 private:
6828  VkDeviceSize m_PreferredLargeHeapBlockSize;
6829 
6830  VkPhysicalDevice m_PhysicalDevice;
6831  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6832  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
6833 
6834  VMA_RW_MUTEX m_PoolsMutex;
6835  // Protected by m_PoolsMutex. Sorted by pointer value.
6836  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6837  uint32_t m_NextPoolId;
6838 
6839  VmaVulkanFunctions m_VulkanFunctions;
6840 
6841 #if VMA_RECORDING_ENABLED
6842  VmaRecorder* m_pRecorder;
6843 #endif
6844 
6845  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
6846 
6847  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6848 
6849  VkResult AllocateMemoryOfType(
6850  VkDeviceSize size,
6851  VkDeviceSize alignment,
6852  bool dedicatedAllocation,
6853  VkBuffer dedicatedBuffer,
6854  VkImage dedicatedImage,
6855  const VmaAllocationCreateInfo& createInfo,
6856  uint32_t memTypeIndex,
6857  VmaSuballocationType suballocType,
6858  size_t allocationCount,
6859  VmaAllocation* pAllocations);
6860 
6861  // Helper function only to be used inside AllocateDedicatedMemory.
6862  VkResult AllocateDedicatedMemoryPage(
6863  VkDeviceSize size,
6864  VmaSuballocationType suballocType,
6865  uint32_t memTypeIndex,
6866  const VkMemoryAllocateInfo& allocInfo,
6867  bool map,
6868  bool isUserDataString,
6869  void* pUserData,
6870  VmaAllocation* pAllocation);
6871 
6872  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
6873  VkResult AllocateDedicatedMemory(
6874  VkDeviceSize size,
6875  VmaSuballocationType suballocType,
6876  uint32_t memTypeIndex,
6877  bool map,
6878  bool isUserDataString,
6879  void* pUserData,
6880  VkBuffer dedicatedBuffer,
6881  VkImage dedicatedImage,
6882  size_t allocationCount,
6883  VmaAllocation* pAllocations);
6884 
6885  void FreeDedicatedMemory(VmaAllocation allocation);
6886 
6887  /*
6888  Calculates and returns bit mask of memory types that can support defragmentation
6889  on GPU as they support creation of required buffer for copy operations.
6890  */
6891  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
6892 };
6893 
6895 // Memory allocation #2 after VmaAllocator_T definition
6896 
6897 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
6898 {
6899  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6900 }
6901 
6902 static void VmaFree(VmaAllocator hAllocator, void* ptr)
6903 {
6904  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6905 }
6906 
6907 template<typename T>
6908 static T* VmaAllocate(VmaAllocator hAllocator)
6909 {
6910  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
6911 }
6912 
6913 template<typename T>
6914 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
6915 {
6916  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
6917 }
6918 
6919 template<typename T>
6920 static void vma_delete(VmaAllocator hAllocator, T* ptr)
6921 {
6922  if(ptr != VMA_NULL)
6923  {
6924  ptr->~T();
6925  VmaFree(hAllocator, ptr);
6926  }
6927 }
6928 
6929 template<typename T>
6930 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6931 {
6932  if(ptr != VMA_NULL)
6933  {
6934  for(size_t i = count; i--; )
6935  ptr[i].~T();
6936  VmaFree(hAllocator, ptr);
6937  }
6938 }
6939 
6941 // VmaStringBuilder
6942 
6943 #if VMA_STATS_STRING_ENABLED
6944 
6945 class VmaStringBuilder
6946 {
6947 public:
6948  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
6949  size_t GetLength() const { return m_Data.size(); }
6950  const char* GetData() const { return m_Data.data(); }
6951 
6952  void Add(char ch) { m_Data.push_back(ch); }
6953  void Add(const char* pStr);
6954  void AddNewLine() { Add('\n'); }
6955  void AddNumber(uint32_t num);
6956  void AddNumber(uint64_t num);
6957  void AddPointer(const void* ptr);
6958 
6959 private:
6960  VmaVector< char, VmaStlAllocator<char> > m_Data;
6961 };
6962 
6963 void VmaStringBuilder::Add(const char* pStr)
6964 {
6965  const size_t strLen = strlen(pStr);
6966  if(strLen > 0)
6967  {
6968  const size_t oldCount = m_Data.size();
6969  m_Data.resize(oldCount + strLen);
6970  memcpy(m_Data.data() + oldCount, pStr, strLen);
6971  }
6972 }
6973 
6974 void VmaStringBuilder::AddNumber(uint32_t num)
6975 {
6976  char buf[11];
6977  VmaUint32ToStr(buf, sizeof(buf), num);
6978  Add(buf);
6979 }
6980 
6981 void VmaStringBuilder::AddNumber(uint64_t num)
6982 {
6983  char buf[21];
6984  VmaUint64ToStr(buf, sizeof(buf), num);
6985  Add(buf);
6986 }
6987 
6988 void VmaStringBuilder::AddPointer(const void* ptr)
6989 {
6990  char buf[21];
6991  VmaPtrToStr(buf, sizeof(buf), ptr);
6992  Add(buf);
6993 }
6994 
6995 #endif // #if VMA_STATS_STRING_ENABLED
6996 
6998 // VmaJsonWriter
6999 
7000 #if VMA_STATS_STRING_ENABLED
7001 
7002 class VmaJsonWriter
7003 {
7004  VMA_CLASS_NO_COPY(VmaJsonWriter)
7005 public:
7006  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
7007  ~VmaJsonWriter();
7008 
7009  void BeginObject(bool singleLine = false);
7010  void EndObject();
7011 
7012  void BeginArray(bool singleLine = false);
7013  void EndArray();
7014 
7015  void WriteString(const char* pStr);
7016  void BeginString(const char* pStr = VMA_NULL);
7017  void ContinueString(const char* pStr);
7018  void ContinueString(uint32_t n);
7019  void ContinueString(uint64_t n);
7020  void ContinueString_Pointer(const void* ptr);
7021  void EndString(const char* pStr = VMA_NULL);
7022 
7023  void WriteNumber(uint32_t n);
7024  void WriteNumber(uint64_t n);
7025  void WriteBool(bool b);
7026  void WriteNull();
7027 
7028 private:
7029  static const char* const INDENT;
7030 
7031  enum COLLECTION_TYPE
7032  {
7033  COLLECTION_TYPE_OBJECT,
7034  COLLECTION_TYPE_ARRAY,
7035  };
7036  struct StackItem
7037  {
7038  COLLECTION_TYPE type;
7039  uint32_t valueCount;
7040  bool singleLineMode;
7041  };
7042 
7043  VmaStringBuilder& m_SB;
7044  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
7045  bool m_InsideString;
7046 
7047  void BeginValue(bool isString);
7048  void WriteIndent(bool oneLess = false);
7049 };
7050 
7051 const char* const VmaJsonWriter::INDENT = " ";
7052 
7053 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
7054  m_SB(sb),
7055  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
7056  m_InsideString(false)
7057 {
7058 }
7059 
7060 VmaJsonWriter::~VmaJsonWriter()
7061 {
7062  VMA_ASSERT(!m_InsideString);
7063  VMA_ASSERT(m_Stack.empty());
7064 }
7065 
7066 void VmaJsonWriter::BeginObject(bool singleLine)
7067 {
7068  VMA_ASSERT(!m_InsideString);
7069 
7070  BeginValue(false);
7071  m_SB.Add('{');
7072 
7073  StackItem item;
7074  item.type = COLLECTION_TYPE_OBJECT;
7075  item.valueCount = 0;
7076  item.singleLineMode = singleLine;
7077  m_Stack.push_back(item);
7078 }
7079 
7080 void VmaJsonWriter::EndObject()
7081 {
7082  VMA_ASSERT(!m_InsideString);
7083 
7084  WriteIndent(true);
7085  m_SB.Add('}');
7086 
7087  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
7088  m_Stack.pop_back();
7089 }
7090 
7091 void VmaJsonWriter::BeginArray(bool singleLine)
7092 {
7093  VMA_ASSERT(!m_InsideString);
7094 
7095  BeginValue(false);
7096  m_SB.Add('[');
7097 
7098  StackItem item;
7099  item.type = COLLECTION_TYPE_ARRAY;
7100  item.valueCount = 0;
7101  item.singleLineMode = singleLine;
7102  m_Stack.push_back(item);
7103 }
7104 
7105 void VmaJsonWriter::EndArray()
7106 {
7107  VMA_ASSERT(!m_InsideString);
7108 
7109  WriteIndent(true);
7110  m_SB.Add(']');
7111 
7112  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
7113  m_Stack.pop_back();
7114 }
7115 
7116 void VmaJsonWriter::WriteString(const char* pStr)
7117 {
7118  BeginString(pStr);
7119  EndString();
7120 }
7121 
7122 void VmaJsonWriter::BeginString(const char* pStr)
7123 {
7124  VMA_ASSERT(!m_InsideString);
7125 
7126  BeginValue(true);
7127  m_SB.Add('"');
7128  m_InsideString = true;
7129  if(pStr != VMA_NULL && pStr[0] != '\0')
7130  {
7131  ContinueString(pStr);
7132  }
7133 }
7134 
7135 void VmaJsonWriter::ContinueString(const char* pStr)
7136 {
7137  VMA_ASSERT(m_InsideString);
7138 
7139  const size_t strLen = strlen(pStr);
7140  for(size_t i = 0; i < strLen; ++i)
7141  {
7142  char ch = pStr[i];
7143  if(ch == '\\')
7144  {
7145  m_SB.Add("\\\\");
7146  }
7147  else if(ch == '"')
7148  {
7149  m_SB.Add("\\\"");
7150  }
7151  else if(ch >= 32)
7152  {
7153  m_SB.Add(ch);
7154  }
7155  else switch(ch)
7156  {
7157  case '\b':
7158  m_SB.Add("\\b");
7159  break;
7160  case '\f':
7161  m_SB.Add("\\f");
7162  break;
7163  case '\n':
7164  m_SB.Add("\\n");
7165  break;
7166  case '\r':
7167  m_SB.Add("\\r");
7168  break;
7169  case '\t':
7170  m_SB.Add("\\t");
7171  break;
7172  default:
7173  VMA_ASSERT(0 && "Character not currently supported.");
7174  break;
7175  }
7176  }
7177 }
7178 
7179 void VmaJsonWriter::ContinueString(uint32_t n)
7180 {
7181  VMA_ASSERT(m_InsideString);
7182  m_SB.AddNumber(n);
7183 }
7184 
7185 void VmaJsonWriter::ContinueString(uint64_t n)
7186 {
7187  VMA_ASSERT(m_InsideString);
7188  m_SB.AddNumber(n);
7189 }
7190 
7191 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7192 {
7193  VMA_ASSERT(m_InsideString);
7194  m_SB.AddPointer(ptr);
7195 }
7196 
7197 void VmaJsonWriter::EndString(const char* pStr)
7198 {
7199  VMA_ASSERT(m_InsideString);
7200  if(pStr != VMA_NULL && pStr[0] != '\0')
7201  {
7202  ContinueString(pStr);
7203  }
7204  m_SB.Add('"');
7205  m_InsideString = false;
7206 }
7207 
7208 void VmaJsonWriter::WriteNumber(uint32_t n)
7209 {
7210  VMA_ASSERT(!m_InsideString);
7211  BeginValue(false);
7212  m_SB.AddNumber(n);
7213 }
7214 
7215 void VmaJsonWriter::WriteNumber(uint64_t n)
7216 {
7217  VMA_ASSERT(!m_InsideString);
7218  BeginValue(false);
7219  m_SB.AddNumber(n);
7220 }
7221 
7222 void VmaJsonWriter::WriteBool(bool b)
7223 {
7224  VMA_ASSERT(!m_InsideString);
7225  BeginValue(false);
7226  m_SB.Add(b ? "true" : "false");
7227 }
7228 
7229 void VmaJsonWriter::WriteNull()
7230 {
7231  VMA_ASSERT(!m_InsideString);
7232  BeginValue(false);
7233  m_SB.Add("null");
7234 }
7235 
7236 void VmaJsonWriter::BeginValue(bool isString)
7237 {
7238  if(!m_Stack.empty())
7239  {
7240  StackItem& currItem = m_Stack.back();
7241  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7242  currItem.valueCount % 2 == 0)
7243  {
7244  VMA_ASSERT(isString);
7245  }
7246 
7247  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7248  currItem.valueCount % 2 != 0)
7249  {
7250  m_SB.Add(": ");
7251  }
7252  else if(currItem.valueCount > 0)
7253  {
7254  m_SB.Add(", ");
7255  WriteIndent();
7256  }
7257  else
7258  {
7259  WriteIndent();
7260  }
7261  ++currItem.valueCount;
7262  }
7263 }
7264 
7265 void VmaJsonWriter::WriteIndent(bool oneLess)
7266 {
7267  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7268  {
7269  m_SB.AddNewLine();
7270 
7271  size_t count = m_Stack.size();
7272  if(count > 0 && oneLess)
7273  {
7274  --count;
7275  }
7276  for(size_t i = 0; i < count; ++i)
7277  {
7278  m_SB.Add(INDENT);
7279  }
7280  }
7281 }
7282 
7283 #endif // #if VMA_STATS_STRING_ENABLED
7284 
7286 
7287 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7288 {
7289  if(IsUserDataString())
7290  {
7291  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7292 
7293  FreeUserDataString(hAllocator);
7294 
7295  if(pUserData != VMA_NULL)
7296  {
7297  const char* const newStrSrc = (char*)pUserData;
7298  const size_t newStrLen = strlen(newStrSrc);
7299  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
7300  memcpy(newStrDst, newStrSrc, newStrLen + 1);
7301  m_pUserData = newStrDst;
7302  }
7303  }
7304  else
7305  {
7306  m_pUserData = pUserData;
7307  }
7308 }
7309 
7310 void VmaAllocation_T::ChangeBlockAllocation(
7311  VmaAllocator hAllocator,
7312  VmaDeviceMemoryBlock* block,
7313  VkDeviceSize offset)
7314 {
7315  VMA_ASSERT(block != VMA_NULL);
7316  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7317 
7318  // Move mapping reference counter from old block to new block.
7319  if(block != m_BlockAllocation.m_Block)
7320  {
7321  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7322  if(IsPersistentMap())
7323  ++mapRefCount;
7324  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7325  block->Map(hAllocator, mapRefCount, VMA_NULL);
7326  }
7327 
7328  m_BlockAllocation.m_Block = block;
7329  m_BlockAllocation.m_Offset = offset;
7330 }
7331 
7332 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
7333 {
7334  VMA_ASSERT(newSize > 0);
7335  m_Size = newSize;
7336 }
7337 
7338 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7339 {
7340  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7341  m_BlockAllocation.m_Offset = newOffset;
7342 }
7343 
7344 VkDeviceSize VmaAllocation_T::GetOffset() const
7345 {
7346  switch(m_Type)
7347  {
7348  case ALLOCATION_TYPE_BLOCK:
7349  return m_BlockAllocation.m_Offset;
7350  case ALLOCATION_TYPE_DEDICATED:
7351  return 0;
7352  default:
7353  VMA_ASSERT(0);
7354  return 0;
7355  }
7356 }
7357 
7358 VkDeviceMemory VmaAllocation_T::GetMemory() const
7359 {
7360  switch(m_Type)
7361  {
7362  case ALLOCATION_TYPE_BLOCK:
7363  return m_BlockAllocation.m_Block->GetDeviceMemory();
7364  case ALLOCATION_TYPE_DEDICATED:
7365  return m_DedicatedAllocation.m_hMemory;
7366  default:
7367  VMA_ASSERT(0);
7368  return VK_NULL_HANDLE;
7369  }
7370 }
7371 
7372 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7373 {
7374  switch(m_Type)
7375  {
7376  case ALLOCATION_TYPE_BLOCK:
7377  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7378  case ALLOCATION_TYPE_DEDICATED:
7379  return m_DedicatedAllocation.m_MemoryTypeIndex;
7380  default:
7381  VMA_ASSERT(0);
7382  return UINT32_MAX;
7383  }
7384 }
7385 
7386 void* VmaAllocation_T::GetMappedData() const
7387 {
7388  switch(m_Type)
7389  {
7390  case ALLOCATION_TYPE_BLOCK:
7391  if(m_MapCount != 0)
7392  {
7393  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7394  VMA_ASSERT(pBlockData != VMA_NULL);
7395  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7396  }
7397  else
7398  {
7399  return VMA_NULL;
7400  }
7401  break;
7402  case ALLOCATION_TYPE_DEDICATED:
7403  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7404  return m_DedicatedAllocation.m_pMappedData;
7405  default:
7406  VMA_ASSERT(0);
7407  return VMA_NULL;
7408  }
7409 }
7410 
7411 bool VmaAllocation_T::CanBecomeLost() const
7412 {
7413  switch(m_Type)
7414  {
7415  case ALLOCATION_TYPE_BLOCK:
7416  return m_BlockAllocation.m_CanBecomeLost;
7417  case ALLOCATION_TYPE_DEDICATED:
7418  return false;
7419  default:
7420  VMA_ASSERT(0);
7421  return false;
7422  }
7423 }
7424 
7425 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7426 {
7427  VMA_ASSERT(CanBecomeLost());
7428 
7429  /*
7430  Warning: This is a carefully designed algorithm.
7431  Do not modify unless you really know what you're doing :)
7432  */
7433  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7434  for(;;)
7435  {
7436  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7437  {
7438  VMA_ASSERT(0);
7439  return false;
7440  }
7441  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7442  {
7443  return false;
7444  }
7445  else // Last use time earlier than current time.
7446  {
7447  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7448  {
7449  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7450  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7451  return true;
7452  }
7453  }
7454  }
7455 }
7456 
7457 #if VMA_STATS_STRING_ENABLED
7458 
7459 // Correspond to values of enum VmaSuballocationType.
7460 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7461  "FREE",
7462  "UNKNOWN",
7463  "BUFFER",
7464  "IMAGE_UNKNOWN",
7465  "IMAGE_LINEAR",
7466  "IMAGE_OPTIMAL",
7467 };
7468 
7469 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7470 {
7471  json.WriteString("Type");
7472  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7473 
7474  json.WriteString("Size");
7475  json.WriteNumber(m_Size);
7476 
7477  if(m_pUserData != VMA_NULL)
7478  {
7479  json.WriteString("UserData");
7480  if(IsUserDataString())
7481  {
7482  json.WriteString((const char*)m_pUserData);
7483  }
7484  else
7485  {
7486  json.BeginString();
7487  json.ContinueString_Pointer(m_pUserData);
7488  json.EndString();
7489  }
7490  }
7491 
7492  json.WriteString("CreationFrameIndex");
7493  json.WriteNumber(m_CreationFrameIndex);
7494 
7495  json.WriteString("LastUseFrameIndex");
7496  json.WriteNumber(GetLastUseFrameIndex());
7497 
7498  if(m_BufferImageUsage != 0)
7499  {
7500  json.WriteString("Usage");
7501  json.WriteNumber(m_BufferImageUsage);
7502  }
7503 }
7504 
7505 #endif
7506 
7507 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7508 {
7509  VMA_ASSERT(IsUserDataString());
7510  if(m_pUserData != VMA_NULL)
7511  {
7512  char* const oldStr = (char*)m_pUserData;
7513  const size_t oldStrLen = strlen(oldStr);
7514  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7515  m_pUserData = VMA_NULL;
7516  }
7517 }
7518 
7519 void VmaAllocation_T::BlockAllocMap()
7520 {
7521  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7522 
7523  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7524  {
7525  ++m_MapCount;
7526  }
7527  else
7528  {
7529  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7530  }
7531 }
7532 
7533 void VmaAllocation_T::BlockAllocUnmap()
7534 {
7535  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7536 
7537  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7538  {
7539  --m_MapCount;
7540  }
7541  else
7542  {
7543  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7544  }
7545 }
7546 
7547 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7548 {
7549  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7550 
7551  if(m_MapCount != 0)
7552  {
7553  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7554  {
7555  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7556  *ppData = m_DedicatedAllocation.m_pMappedData;
7557  ++m_MapCount;
7558  return VK_SUCCESS;
7559  }
7560  else
7561  {
7562  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7563  return VK_ERROR_MEMORY_MAP_FAILED;
7564  }
7565  }
7566  else
7567  {
7568  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7569  hAllocator->m_hDevice,
7570  m_DedicatedAllocation.m_hMemory,
7571  0, // offset
7572  VK_WHOLE_SIZE,
7573  0, // flags
7574  ppData);
7575  if(result == VK_SUCCESS)
7576  {
7577  m_DedicatedAllocation.m_pMappedData = *ppData;
7578  m_MapCount = 1;
7579  }
7580  return result;
7581  }
7582 }
7583 
7584 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7585 {
7586  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7587 
7588  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7589  {
7590  --m_MapCount;
7591  if(m_MapCount == 0)
7592  {
7593  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7594  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7595  hAllocator->m_hDevice,
7596  m_DedicatedAllocation.m_hMemory);
7597  }
7598  }
7599  else
7600  {
7601  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7602  }
7603 }
7604 
7605 #if VMA_STATS_STRING_ENABLED
7606 
7607 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7608 {
7609  json.BeginObject();
7610 
7611  json.WriteString("Blocks");
7612  json.WriteNumber(stat.blockCount);
7613 
7614  json.WriteString("Allocations");
7615  json.WriteNumber(stat.allocationCount);
7616 
7617  json.WriteString("UnusedRanges");
7618  json.WriteNumber(stat.unusedRangeCount);
7619 
7620  json.WriteString("UsedBytes");
7621  json.WriteNumber(stat.usedBytes);
7622 
7623  json.WriteString("UnusedBytes");
7624  json.WriteNumber(stat.unusedBytes);
7625 
7626  if(stat.allocationCount > 1)
7627  {
7628  json.WriteString("AllocationSize");
7629  json.BeginObject(true);
7630  json.WriteString("Min");
7631  json.WriteNumber(stat.allocationSizeMin);
7632  json.WriteString("Avg");
7633  json.WriteNumber(stat.allocationSizeAvg);
7634  json.WriteString("Max");
7635  json.WriteNumber(stat.allocationSizeMax);
7636  json.EndObject();
7637  }
7638 
7639  if(stat.unusedRangeCount > 1)
7640  {
7641  json.WriteString("UnusedRangeSize");
7642  json.BeginObject(true);
7643  json.WriteString("Min");
7644  json.WriteNumber(stat.unusedRangeSizeMin);
7645  json.WriteString("Avg");
7646  json.WriteNumber(stat.unusedRangeSizeAvg);
7647  json.WriteString("Max");
7648  json.WriteNumber(stat.unusedRangeSizeMax);
7649  json.EndObject();
7650  }
7651 
7652  json.EndObject();
7653 }
7654 
7655 #endif // #if VMA_STATS_STRING_ENABLED
7656 
7657 struct VmaSuballocationItemSizeLess
7658 {
7659  bool operator()(
7660  const VmaSuballocationList::iterator lhs,
7661  const VmaSuballocationList::iterator rhs) const
7662  {
7663  return lhs->size < rhs->size;
7664  }
7665  bool operator()(
7666  const VmaSuballocationList::iterator lhs,
7667  VkDeviceSize rhsSize) const
7668  {
7669  return lhs->size < rhsSize;
7670  }
7671 };
7672 
7673 
7675 // class VmaBlockMetadata
7676 
7677 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7678  m_Size(0),
7679  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7680 {
7681 }
7682 
7683 #if VMA_STATS_STRING_ENABLED
7684 
7685 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7686  VkDeviceSize unusedBytes,
7687  size_t allocationCount,
7688  size_t unusedRangeCount) const
7689 {
7690  json.BeginObject();
7691 
7692  json.WriteString("TotalBytes");
7693  json.WriteNumber(GetSize());
7694 
7695  json.WriteString("UnusedBytes");
7696  json.WriteNumber(unusedBytes);
7697 
7698  json.WriteString("Allocations");
7699  json.WriteNumber((uint64_t)allocationCount);
7700 
7701  json.WriteString("UnusedRanges");
7702  json.WriteNumber((uint64_t)unusedRangeCount);
7703 
7704  json.WriteString("Suballocations");
7705  json.BeginArray();
7706 }
7707 
7708 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
7709  VkDeviceSize offset,
7710  VmaAllocation hAllocation) const
7711 {
7712  json.BeginObject(true);
7713 
7714  json.WriteString("Offset");
7715  json.WriteNumber(offset);
7716 
7717  hAllocation->PrintParameters(json);
7718 
7719  json.EndObject();
7720 }
7721 
7722 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
7723  VkDeviceSize offset,
7724  VkDeviceSize size) const
7725 {
7726  json.BeginObject(true);
7727 
7728  json.WriteString("Offset");
7729  json.WriteNumber(offset);
7730 
7731  json.WriteString("Type");
7732  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7733 
7734  json.WriteString("Size");
7735  json.WriteNumber(size);
7736 
7737  json.EndObject();
7738 }
7739 
7740 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
7741 {
7742  json.EndArray();
7743  json.EndObject();
7744 }
7745 
7746 #endif // #if VMA_STATS_STRING_ENABLED
7747 
7749 // class VmaBlockMetadata_Generic
7750 
7751 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
7752  VmaBlockMetadata(hAllocator),
7753  m_FreeCount(0),
7754  m_SumFreeSize(0),
7755  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7756  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7757 {
7758 }
7759 
7760 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7761 {
7762 }
7763 
7764 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7765 {
7766  VmaBlockMetadata::Init(size);
7767 
7768  m_FreeCount = 1;
7769  m_SumFreeSize = size;
7770 
7771  VmaSuballocation suballoc = {};
7772  suballoc.offset = 0;
7773  suballoc.size = size;
7774  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7775  suballoc.hAllocation = VK_NULL_HANDLE;
7776 
7777  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7778  m_Suballocations.push_back(suballoc);
7779  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7780  --suballocItem;
7781  m_FreeSuballocationsBySize.push_back(suballocItem);
7782 }
7783 
7784 bool VmaBlockMetadata_Generic::Validate() const
7785 {
7786  VMA_VALIDATE(!m_Suballocations.empty());
7787 
7788  // Expected offset of new suballocation as calculated from previous ones.
7789  VkDeviceSize calculatedOffset = 0;
7790  // Expected number of free suballocations as calculated from traversing their list.
7791  uint32_t calculatedFreeCount = 0;
7792  // Expected sum size of free suballocations as calculated from traversing their list.
7793  VkDeviceSize calculatedSumFreeSize = 0;
7794  // Expected number of free suballocations that should be registered in
7795  // m_FreeSuballocationsBySize calculated from traversing their list.
7796  size_t freeSuballocationsToRegister = 0;
7797  // True if previous visited suballocation was free.
7798  bool prevFree = false;
7799 
7800  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7801  suballocItem != m_Suballocations.cend();
7802  ++suballocItem)
7803  {
7804  const VmaSuballocation& subAlloc = *suballocItem;
7805 
7806  // Actual offset of this suballocation doesn't match expected one.
7807  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7808 
7809  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7810  // Two adjacent free suballocations are invalid. They should be merged.
7811  VMA_VALIDATE(!prevFree || !currFree);
7812 
7813  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7814 
7815  if(currFree)
7816  {
7817  calculatedSumFreeSize += subAlloc.size;
7818  ++calculatedFreeCount;
7819  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7820  {
7821  ++freeSuballocationsToRegister;
7822  }
7823 
7824  // Margin required between allocations - every free space must be at least that large.
7825  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7826  }
7827  else
7828  {
7829  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7830  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7831 
7832  // Margin required between allocations - previous allocation must be free.
7833  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7834  }
7835 
7836  calculatedOffset += subAlloc.size;
7837  prevFree = currFree;
7838  }
7839 
7840  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
7841  // match expected one.
7842  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7843 
7844  VkDeviceSize lastSize = 0;
7845  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7846  {
7847  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7848 
7849  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
7850  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7851  // They must be sorted by size ascending.
7852  VMA_VALIDATE(suballocItem->size >= lastSize);
7853 
7854  lastSize = suballocItem->size;
7855  }
7856 
7857  // Check if totals match calculacted values.
7858  VMA_VALIDATE(ValidateFreeSuballocationList());
7859  VMA_VALIDATE(calculatedOffset == GetSize());
7860  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7861  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7862 
7863  return true;
7864 }
7865 
7866 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
7867 {
7868  if(!m_FreeSuballocationsBySize.empty())
7869  {
7870  return m_FreeSuballocationsBySize.back()->size;
7871  }
7872  else
7873  {
7874  return 0;
7875  }
7876 }
7877 
7878 bool VmaBlockMetadata_Generic::IsEmpty() const
7879 {
7880  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7881 }
7882 
7883 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7884 {
7885  outInfo.blockCount = 1;
7886 
7887  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7888  outInfo.allocationCount = rangeCount - m_FreeCount;
7889  outInfo.unusedRangeCount = m_FreeCount;
7890 
7891  outInfo.unusedBytes = m_SumFreeSize;
7892  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
7893 
7894  outInfo.allocationSizeMin = UINT64_MAX;
7895  outInfo.allocationSizeMax = 0;
7896  outInfo.unusedRangeSizeMin = UINT64_MAX;
7897  outInfo.unusedRangeSizeMax = 0;
7898 
7899  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7900  suballocItem != m_Suballocations.cend();
7901  ++suballocItem)
7902  {
7903  const VmaSuballocation& suballoc = *suballocItem;
7904  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7905  {
7906  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7907  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
7908  }
7909  else
7910  {
7911  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
7912  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
7913  }
7914  }
7915 }
7916 
7917 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
7918 {
7919  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7920 
7921  inoutStats.size += GetSize();
7922  inoutStats.unusedSize += m_SumFreeSize;
7923  inoutStats.allocationCount += rangeCount - m_FreeCount;
7924  inoutStats.unusedRangeCount += m_FreeCount;
7925  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
7926 }
7927 
7928 #if VMA_STATS_STRING_ENABLED
7929 
7930 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7931 {
7932  PrintDetailedMap_Begin(json,
7933  m_SumFreeSize, // unusedBytes
7934  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7935  m_FreeCount); // unusedRangeCount
7936 
7937  size_t i = 0;
7938  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7939  suballocItem != m_Suballocations.cend();
7940  ++suballocItem, ++i)
7941  {
7942  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7943  {
7944  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
7945  }
7946  else
7947  {
7948  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
7949  }
7950  }
7951 
7952  PrintDetailedMap_End(json);
7953 }
7954 
7955 #endif // #if VMA_STATS_STRING_ENABLED
7956 
7957 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7958  uint32_t currentFrameIndex,
7959  uint32_t frameInUseCount,
7960  VkDeviceSize bufferImageGranularity,
7961  VkDeviceSize allocSize,
7962  VkDeviceSize allocAlignment,
7963  bool upperAddress,
7964  VmaSuballocationType allocType,
7965  bool canMakeOtherLost,
7966  uint32_t strategy,
7967  VmaAllocationRequest* pAllocationRequest)
7968 {
7969  VMA_ASSERT(allocSize > 0);
7970  VMA_ASSERT(!upperAddress);
7971  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7972  VMA_ASSERT(pAllocationRequest != VMA_NULL);
7973  VMA_HEAVY_ASSERT(Validate());
7974 
7975  pAllocationRequest->type = VmaAllocationRequestType::Normal;
7976 
7977  // There is not enough total free space in this block to fullfill the request: Early return.
7978  if(canMakeOtherLost == false &&
7979  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
7980  {
7981  return false;
7982  }
7983 
7984  // New algorithm, efficiently searching freeSuballocationsBySize.
7985  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7986  if(freeSuballocCount > 0)
7987  {
7989  {
7990  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7991  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7992  m_FreeSuballocationsBySize.data(),
7993  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7994  allocSize + 2 * VMA_DEBUG_MARGIN,
7995  VmaSuballocationItemSizeLess());
7996  size_t index = it - m_FreeSuballocationsBySize.data();
7997  for(; index < freeSuballocCount; ++index)
7998  {
7999  if(CheckAllocation(
8000  currentFrameIndex,
8001  frameInUseCount,
8002  bufferImageGranularity,
8003  allocSize,
8004  allocAlignment,
8005  allocType,
8006  m_FreeSuballocationsBySize[index],
8007  false, // canMakeOtherLost
8008  &pAllocationRequest->offset,
8009  &pAllocationRequest->itemsToMakeLostCount,
8010  &pAllocationRequest->sumFreeSize,
8011  &pAllocationRequest->sumItemSize))
8012  {
8013  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8014  return true;
8015  }
8016  }
8017  }
8018  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
8019  {
8020  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8021  it != m_Suballocations.end();
8022  ++it)
8023  {
8024  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
8025  currentFrameIndex,
8026  frameInUseCount,
8027  bufferImageGranularity,
8028  allocSize,
8029  allocAlignment,
8030  allocType,
8031  it,
8032  false, // canMakeOtherLost
8033  &pAllocationRequest->offset,
8034  &pAllocationRequest->itemsToMakeLostCount,
8035  &pAllocationRequest->sumFreeSize,
8036  &pAllocationRequest->sumItemSize))
8037  {
8038  pAllocationRequest->item = it;
8039  return true;
8040  }
8041  }
8042  }
8043  else // WORST_FIT, FIRST_FIT
8044  {
8045  // Search staring from biggest suballocations.
8046  for(size_t index = freeSuballocCount; index--; )
8047  {
8048  if(CheckAllocation(
8049  currentFrameIndex,
8050  frameInUseCount,
8051  bufferImageGranularity,
8052  allocSize,
8053  allocAlignment,
8054  allocType,
8055  m_FreeSuballocationsBySize[index],
8056  false, // canMakeOtherLost
8057  &pAllocationRequest->offset,
8058  &pAllocationRequest->itemsToMakeLostCount,
8059  &pAllocationRequest->sumFreeSize,
8060  &pAllocationRequest->sumItemSize))
8061  {
8062  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8063  return true;
8064  }
8065  }
8066  }
8067  }
8068 
8069  if(canMakeOtherLost)
8070  {
8071  // Brute-force algorithm. TODO: Come up with something better.
8072 
8073  bool found = false;
8074  VmaAllocationRequest tmpAllocRequest = {};
8075  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
8076  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
8077  suballocIt != m_Suballocations.end();
8078  ++suballocIt)
8079  {
8080  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
8081  suballocIt->hAllocation->CanBecomeLost())
8082  {
8083  if(CheckAllocation(
8084  currentFrameIndex,
8085  frameInUseCount,
8086  bufferImageGranularity,
8087  allocSize,
8088  allocAlignment,
8089  allocType,
8090  suballocIt,
8091  canMakeOtherLost,
8092  &tmpAllocRequest.offset,
8093  &tmpAllocRequest.itemsToMakeLostCount,
8094  &tmpAllocRequest.sumFreeSize,
8095  &tmpAllocRequest.sumItemSize))
8096  {
8098  {
8099  *pAllocationRequest = tmpAllocRequest;
8100  pAllocationRequest->item = suballocIt;
8101  break;
8102  }
8103  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
8104  {
8105  *pAllocationRequest = tmpAllocRequest;
8106  pAllocationRequest->item = suballocIt;
8107  found = true;
8108  }
8109  }
8110  }
8111  }
8112 
8113  return found;
8114  }
8115 
8116  return false;
8117 }
8118 
8119 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
8120  uint32_t currentFrameIndex,
8121  uint32_t frameInUseCount,
8122  VmaAllocationRequest* pAllocationRequest)
8123 {
8124  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
8125 
8126  while(pAllocationRequest->itemsToMakeLostCount > 0)
8127  {
8128  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
8129  {
8130  ++pAllocationRequest->item;
8131  }
8132  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8133  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
8134  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
8135  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8136  {
8137  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
8138  --pAllocationRequest->itemsToMakeLostCount;
8139  }
8140  else
8141  {
8142  return false;
8143  }
8144  }
8145 
8146  VMA_HEAVY_ASSERT(Validate());
8147  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8148  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8149 
8150  return true;
8151 }
8152 
8153 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8154 {
8155  uint32_t lostAllocationCount = 0;
8156  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8157  it != m_Suballocations.end();
8158  ++it)
8159  {
8160  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8161  it->hAllocation->CanBecomeLost() &&
8162  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8163  {
8164  it = FreeSuballocation(it);
8165  ++lostAllocationCount;
8166  }
8167  }
8168  return lostAllocationCount;
8169 }
8170 
8171 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8172 {
8173  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8174  it != m_Suballocations.end();
8175  ++it)
8176  {
8177  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8178  {
8179  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8180  {
8181  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8182  return VK_ERROR_VALIDATION_FAILED_EXT;
8183  }
8184  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8185  {
8186  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8187  return VK_ERROR_VALIDATION_FAILED_EXT;
8188  }
8189  }
8190  }
8191 
8192  return VK_SUCCESS;
8193 }
8194 
8195 void VmaBlockMetadata_Generic::Alloc(
8196  const VmaAllocationRequest& request,
8197  VmaSuballocationType type,
8198  VkDeviceSize allocSize,
8199  VmaAllocation hAllocation)
8200 {
8201  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
8202  VMA_ASSERT(request.item != m_Suballocations.end());
8203  VmaSuballocation& suballoc = *request.item;
8204  // Given suballocation is a free block.
8205  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8206  // Given offset is inside this suballocation.
8207  VMA_ASSERT(request.offset >= suballoc.offset);
8208  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8209  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8210  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8211 
8212  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8213  // it to become used.
8214  UnregisterFreeSuballocation(request.item);
8215 
8216  suballoc.offset = request.offset;
8217  suballoc.size = allocSize;
8218  suballoc.type = type;
8219  suballoc.hAllocation = hAllocation;
8220 
8221  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8222  if(paddingEnd)
8223  {
8224  VmaSuballocation paddingSuballoc = {};
8225  paddingSuballoc.offset = request.offset + allocSize;
8226  paddingSuballoc.size = paddingEnd;
8227  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8228  VmaSuballocationList::iterator next = request.item;
8229  ++next;
8230  const VmaSuballocationList::iterator paddingEndItem =
8231  m_Suballocations.insert(next, paddingSuballoc);
8232  RegisterFreeSuballocation(paddingEndItem);
8233  }
8234 
8235  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8236  if(paddingBegin)
8237  {
8238  VmaSuballocation paddingSuballoc = {};
8239  paddingSuballoc.offset = request.offset - paddingBegin;
8240  paddingSuballoc.size = paddingBegin;
8241  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8242  const VmaSuballocationList::iterator paddingBeginItem =
8243  m_Suballocations.insert(request.item, paddingSuballoc);
8244  RegisterFreeSuballocation(paddingBeginItem);
8245  }
8246 
8247  // Update totals.
8248  m_FreeCount = m_FreeCount - 1;
8249  if(paddingBegin > 0)
8250  {
8251  ++m_FreeCount;
8252  }
8253  if(paddingEnd > 0)
8254  {
8255  ++m_FreeCount;
8256  }
8257  m_SumFreeSize -= allocSize;
8258 }
8259 
8260 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8261 {
8262  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8263  suballocItem != m_Suballocations.end();
8264  ++suballocItem)
8265  {
8266  VmaSuballocation& suballoc = *suballocItem;
8267  if(suballoc.hAllocation == allocation)
8268  {
8269  FreeSuballocation(suballocItem);
8270  VMA_HEAVY_ASSERT(Validate());
8271  return;
8272  }
8273  }
8274  VMA_ASSERT(0 && "Not found!");
8275 }
8276 
8277 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8278 {
8279  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8280  suballocItem != m_Suballocations.end();
8281  ++suballocItem)
8282  {
8283  VmaSuballocation& suballoc = *suballocItem;
8284  if(suballoc.offset == offset)
8285  {
8286  FreeSuballocation(suballocItem);
8287  return;
8288  }
8289  }
8290  VMA_ASSERT(0 && "Not found!");
8291 }
8292 
8293 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
8294 {
8295  typedef VmaSuballocationList::iterator iter_type;
8296  for(iter_type suballocItem = m_Suballocations.begin();
8297  suballocItem != m_Suballocations.end();
8298  ++suballocItem)
8299  {
8300  VmaSuballocation& suballoc = *suballocItem;
8301  if(suballoc.hAllocation == alloc)
8302  {
8303  iter_type nextItem = suballocItem;
8304  ++nextItem;
8305 
8306  // Should have been ensured on higher level.
8307  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
8308 
8309  // Shrinking.
8310  if(newSize < alloc->GetSize())
8311  {
8312  const VkDeviceSize sizeDiff = suballoc.size - newSize;
8313 
8314  // There is next item.
8315  if(nextItem != m_Suballocations.end())
8316  {
8317  // Next item is free.
8318  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8319  {
8320  // Grow this next item backward.
8321  UnregisterFreeSuballocation(nextItem);
8322  nextItem->offset -= sizeDiff;
8323  nextItem->size += sizeDiff;
8324  RegisterFreeSuballocation(nextItem);
8325  }
8326  // Next item is not free.
8327  else
8328  {
8329  // Create free item after current one.
8330  VmaSuballocation newFreeSuballoc;
8331  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8332  newFreeSuballoc.offset = suballoc.offset + newSize;
8333  newFreeSuballoc.size = sizeDiff;
8334  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8335  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
8336  RegisterFreeSuballocation(newFreeSuballocIt);
8337 
8338  ++m_FreeCount;
8339  }
8340  }
8341  // This is the last item.
8342  else
8343  {
8344  // Create free item at the end.
8345  VmaSuballocation newFreeSuballoc;
8346  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8347  newFreeSuballoc.offset = suballoc.offset + newSize;
8348  newFreeSuballoc.size = sizeDiff;
8349  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8350  m_Suballocations.push_back(newFreeSuballoc);
8351 
8352  iter_type newFreeSuballocIt = m_Suballocations.end();
8353  RegisterFreeSuballocation(--newFreeSuballocIt);
8354 
8355  ++m_FreeCount;
8356  }
8357 
8358  suballoc.size = newSize;
8359  m_SumFreeSize += sizeDiff;
8360  }
8361  // Growing.
8362  else
8363  {
8364  const VkDeviceSize sizeDiff = newSize - suballoc.size;
8365 
8366  // There is next item.
8367  if(nextItem != m_Suballocations.end())
8368  {
8369  // Next item is free.
8370  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8371  {
8372  // There is not enough free space, including margin.
8373  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
8374  {
8375  return false;
8376  }
8377 
8378  // There is more free space than required.
8379  if(nextItem->size > sizeDiff)
8380  {
8381  // Move and shrink this next item.
8382  UnregisterFreeSuballocation(nextItem);
8383  nextItem->offset += sizeDiff;
8384  nextItem->size -= sizeDiff;
8385  RegisterFreeSuballocation(nextItem);
8386  }
8387  // There is exactly the amount of free space required.
8388  else
8389  {
8390  // Remove this next free item.
8391  UnregisterFreeSuballocation(nextItem);
8392  m_Suballocations.erase(nextItem);
8393  --m_FreeCount;
8394  }
8395  }
8396  // Next item is not free - there is no space to grow.
8397  else
8398  {
8399  return false;
8400  }
8401  }
8402  // This is the last item - there is no space to grow.
8403  else
8404  {
8405  return false;
8406  }
8407 
8408  suballoc.size = newSize;
8409  m_SumFreeSize -= sizeDiff;
8410  }
8411 
8412  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
8413  return true;
8414  }
8415  }
8416  VMA_ASSERT(0 && "Not found!");
8417  return false;
8418 }
8419 
8420 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8421 {
8422  VkDeviceSize lastSize = 0;
8423  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8424  {
8425  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8426 
8427  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8428  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8429  VMA_VALIDATE(it->size >= lastSize);
8430  lastSize = it->size;
8431  }
8432  return true;
8433 }
8434 
8435 bool VmaBlockMetadata_Generic::CheckAllocation(
8436  uint32_t currentFrameIndex,
8437  uint32_t frameInUseCount,
8438  VkDeviceSize bufferImageGranularity,
8439  VkDeviceSize allocSize,
8440  VkDeviceSize allocAlignment,
8441  VmaSuballocationType allocType,
8442  VmaSuballocationList::const_iterator suballocItem,
8443  bool canMakeOtherLost,
8444  VkDeviceSize* pOffset,
8445  size_t* itemsToMakeLostCount,
8446  VkDeviceSize* pSumFreeSize,
8447  VkDeviceSize* pSumItemSize) const
8448 {
8449  VMA_ASSERT(allocSize > 0);
8450  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8451  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8452  VMA_ASSERT(pOffset != VMA_NULL);
8453 
8454  *itemsToMakeLostCount = 0;
8455  *pSumFreeSize = 0;
8456  *pSumItemSize = 0;
8457 
8458  if(canMakeOtherLost)
8459  {
8460  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8461  {
8462  *pSumFreeSize = suballocItem->size;
8463  }
8464  else
8465  {
8466  if(suballocItem->hAllocation->CanBecomeLost() &&
8467  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8468  {
8469  ++*itemsToMakeLostCount;
8470  *pSumItemSize = suballocItem->size;
8471  }
8472  else
8473  {
8474  return false;
8475  }
8476  }
8477 
8478  // Remaining size is too small for this request: Early return.
8479  if(GetSize() - suballocItem->offset < allocSize)
8480  {
8481  return false;
8482  }
8483 
8484  // Start from offset equal to beginning of this suballocation.
8485  *pOffset = suballocItem->offset;
8486 
8487  // Apply VMA_DEBUG_MARGIN at the beginning.
8488  if(VMA_DEBUG_MARGIN > 0)
8489  {
8490  *pOffset += VMA_DEBUG_MARGIN;
8491  }
8492 
8493  // Apply alignment.
8494  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8495 
8496  // Check previous suballocations for BufferImageGranularity conflicts.
8497  // Make bigger alignment if necessary.
8498  if(bufferImageGranularity > 1)
8499  {
8500  bool bufferImageGranularityConflict = false;
8501  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8502  while(prevSuballocItem != m_Suballocations.cbegin())
8503  {
8504  --prevSuballocItem;
8505  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8506  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8507  {
8508  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8509  {
8510  bufferImageGranularityConflict = true;
8511  break;
8512  }
8513  }
8514  else
8515  // Already on previous page.
8516  break;
8517  }
8518  if(bufferImageGranularityConflict)
8519  {
8520  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8521  }
8522  }
8523 
8524  // Now that we have final *pOffset, check if we are past suballocItem.
8525  // If yes, return false - this function should be called for another suballocItem as starting point.
8526  if(*pOffset >= suballocItem->offset + suballocItem->size)
8527  {
8528  return false;
8529  }
8530 
8531  // Calculate padding at the beginning based on current offset.
8532  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8533 
8534  // Calculate required margin at the end.
8535  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8536 
8537  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8538  // Another early return check.
8539  if(suballocItem->offset + totalSize > GetSize())
8540  {
8541  return false;
8542  }
8543 
8544  // Advance lastSuballocItem until desired size is reached.
8545  // Update itemsToMakeLostCount.
8546  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8547  if(totalSize > suballocItem->size)
8548  {
8549  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8550  while(remainingSize > 0)
8551  {
8552  ++lastSuballocItem;
8553  if(lastSuballocItem == m_Suballocations.cend())
8554  {
8555  return false;
8556  }
8557  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8558  {
8559  *pSumFreeSize += lastSuballocItem->size;
8560  }
8561  else
8562  {
8563  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8564  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8565  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8566  {
8567  ++*itemsToMakeLostCount;
8568  *pSumItemSize += lastSuballocItem->size;
8569  }
8570  else
8571  {
8572  return false;
8573  }
8574  }
8575  remainingSize = (lastSuballocItem->size < remainingSize) ?
8576  remainingSize - lastSuballocItem->size : 0;
8577  }
8578  }
8579 
8580  // Check next suballocations for BufferImageGranularity conflicts.
8581  // If conflict exists, we must mark more allocations lost or fail.
8582  if(bufferImageGranularity > 1)
8583  {
8584  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8585  ++nextSuballocItem;
8586  while(nextSuballocItem != m_Suballocations.cend())
8587  {
8588  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8589  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8590  {
8591  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8592  {
8593  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8594  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8595  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8596  {
8597  ++*itemsToMakeLostCount;
8598  }
8599  else
8600  {
8601  return false;
8602  }
8603  }
8604  }
8605  else
8606  {
8607  // Already on next page.
8608  break;
8609  }
8610  ++nextSuballocItem;
8611  }
8612  }
8613  }
8614  else
8615  {
8616  const VmaSuballocation& suballoc = *suballocItem;
8617  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8618 
8619  *pSumFreeSize = suballoc.size;
8620 
8621  // Size of this suballocation is too small for this request: Early return.
8622  if(suballoc.size < allocSize)
8623  {
8624  return false;
8625  }
8626 
8627  // Start from offset equal to beginning of this suballocation.
8628  *pOffset = suballoc.offset;
8629 
8630  // Apply VMA_DEBUG_MARGIN at the beginning.
8631  if(VMA_DEBUG_MARGIN > 0)
8632  {
8633  *pOffset += VMA_DEBUG_MARGIN;
8634  }
8635 
8636  // Apply alignment.
8637  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8638 
8639  // Check previous suballocations for BufferImageGranularity conflicts.
8640  // Make bigger alignment if necessary.
8641  if(bufferImageGranularity > 1)
8642  {
8643  bool bufferImageGranularityConflict = false;
8644  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8645  while(prevSuballocItem != m_Suballocations.cbegin())
8646  {
8647  --prevSuballocItem;
8648  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8649  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8650  {
8651  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8652  {
8653  bufferImageGranularityConflict = true;
8654  break;
8655  }
8656  }
8657  else
8658  // Already on previous page.
8659  break;
8660  }
8661  if(bufferImageGranularityConflict)
8662  {
8663  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8664  }
8665  }
8666 
8667  // Calculate padding at the beginning based on current offset.
8668  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8669 
8670  // Calculate required margin at the end.
8671  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8672 
8673  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8674  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8675  {
8676  return false;
8677  }
8678 
8679  // Check next suballocations for BufferImageGranularity conflicts.
8680  // If conflict exists, allocation cannot be made here.
8681  if(bufferImageGranularity > 1)
8682  {
8683  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8684  ++nextSuballocItem;
8685  while(nextSuballocItem != m_Suballocations.cend())
8686  {
8687  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8688  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8689  {
8690  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8691  {
8692  return false;
8693  }
8694  }
8695  else
8696  {
8697  // Already on next page.
8698  break;
8699  }
8700  ++nextSuballocItem;
8701  }
8702  }
8703  }
8704 
8705  // All tests passed: Success. pOffset is already filled.
8706  return true;
8707 }
8708 
8709 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8710 {
8711  VMA_ASSERT(item != m_Suballocations.end());
8712  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8713 
8714  VmaSuballocationList::iterator nextItem = item;
8715  ++nextItem;
8716  VMA_ASSERT(nextItem != m_Suballocations.end());
8717  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8718 
8719  item->size += nextItem->size;
8720  --m_FreeCount;
8721  m_Suballocations.erase(nextItem);
8722 }
8723 
8724 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8725 {
8726  // Change this suballocation to be marked as free.
8727  VmaSuballocation& suballoc = *suballocItem;
8728  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8729  suballoc.hAllocation = VK_NULL_HANDLE;
8730 
8731  // Update totals.
8732  ++m_FreeCount;
8733  m_SumFreeSize += suballoc.size;
8734 
8735  // Merge with previous and/or next suballocation if it's also free.
8736  bool mergeWithNext = false;
8737  bool mergeWithPrev = false;
8738 
8739  VmaSuballocationList::iterator nextItem = suballocItem;
8740  ++nextItem;
8741  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8742  {
8743  mergeWithNext = true;
8744  }
8745 
8746  VmaSuballocationList::iterator prevItem = suballocItem;
8747  if(suballocItem != m_Suballocations.begin())
8748  {
8749  --prevItem;
8750  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8751  {
8752  mergeWithPrev = true;
8753  }
8754  }
8755 
8756  if(mergeWithNext)
8757  {
8758  UnregisterFreeSuballocation(nextItem);
8759  MergeFreeWithNext(suballocItem);
8760  }
8761 
8762  if(mergeWithPrev)
8763  {
8764  UnregisterFreeSuballocation(prevItem);
8765  MergeFreeWithNext(prevItem);
8766  RegisterFreeSuballocation(prevItem);
8767  return prevItem;
8768  }
8769  else
8770  {
8771  RegisterFreeSuballocation(suballocItem);
8772  return suballocItem;
8773  }
8774 }
8775 
8776 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8777 {
8778  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8779  VMA_ASSERT(item->size > 0);
8780 
8781  // You may want to enable this validation at the beginning or at the end of
8782  // this function, depending on what do you want to check.
8783  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8784 
8785  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8786  {
8787  if(m_FreeSuballocationsBySize.empty())
8788  {
8789  m_FreeSuballocationsBySize.push_back(item);
8790  }
8791  else
8792  {
8793  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8794  }
8795  }
8796 
8797  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8798 }
8799 
8800 
8801 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8802 {
8803  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8804  VMA_ASSERT(item->size > 0);
8805 
8806  // You may want to enable this validation at the beginning or at the end of
8807  // this function, depending on what do you want to check.
8808  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8809 
8810  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8811  {
8812  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8813  m_FreeSuballocationsBySize.data(),
8814  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8815  item,
8816  VmaSuballocationItemSizeLess());
8817  for(size_t index = it - m_FreeSuballocationsBySize.data();
8818  index < m_FreeSuballocationsBySize.size();
8819  ++index)
8820  {
8821  if(m_FreeSuballocationsBySize[index] == item)
8822  {
8823  VmaVectorRemove(m_FreeSuballocationsBySize, index);
8824  return;
8825  }
8826  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8827  }
8828  VMA_ASSERT(0 && "Not found.");
8829  }
8830 
8831  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8832 }
8833 
8834 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8835  VkDeviceSize bufferImageGranularity,
8836  VmaSuballocationType& inOutPrevSuballocType) const
8837 {
8838  if(bufferImageGranularity == 1 || IsEmpty())
8839  {
8840  return false;
8841  }
8842 
8843  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8844  bool typeConflictFound = false;
8845  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8846  it != m_Suballocations.cend();
8847  ++it)
8848  {
8849  const VmaSuballocationType suballocType = it->type;
8850  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8851  {
8852  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8853  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8854  {
8855  typeConflictFound = true;
8856  }
8857  inOutPrevSuballocType = suballocType;
8858  }
8859  }
8860 
8861  return typeConflictFound || minAlignment >= bufferImageGranularity;
8862 }
8863 
8865 // class VmaBlockMetadata_Linear
8866 
8867 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
8868  VmaBlockMetadata(hAllocator),
8869  m_SumFreeSize(0),
8870  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8871  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8872  m_1stVectorIndex(0),
8873  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8874  m_1stNullItemsBeginCount(0),
8875  m_1stNullItemsMiddleCount(0),
8876  m_2ndNullItemsCount(0)
8877 {
8878 }
8879 
8880 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8881 {
8882 }
8883 
8884 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8885 {
8886  VmaBlockMetadata::Init(size);
8887  m_SumFreeSize = size;
8888 }
8889 
8890 bool VmaBlockMetadata_Linear::Validate() const
8891 {
8892  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8893  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8894 
8895  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8896  VMA_VALIDATE(!suballocations1st.empty() ||
8897  suballocations2nd.empty() ||
8898  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8899 
8900  if(!suballocations1st.empty())
8901  {
8902  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
8903  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8904  // Null item at the end should be just pop_back().
8905  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8906  }
8907  if(!suballocations2nd.empty())
8908  {
8909  // Null item at the end should be just pop_back().
8910  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8911  }
8912 
8913  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8914  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8915 
8916  VkDeviceSize sumUsedSize = 0;
8917  const size_t suballoc1stCount = suballocations1st.size();
8918  VkDeviceSize offset = VMA_DEBUG_MARGIN;
8919 
8920  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8921  {
8922  const size_t suballoc2ndCount = suballocations2nd.size();
8923  size_t nullItem2ndCount = 0;
8924  for(size_t i = 0; i < suballoc2ndCount; ++i)
8925  {
8926  const VmaSuballocation& suballoc = suballocations2nd[i];
8927  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8928 
8929  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8930  VMA_VALIDATE(suballoc.offset >= offset);
8931 
8932  if(!currFree)
8933  {
8934  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8935  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8936  sumUsedSize += suballoc.size;
8937  }
8938  else
8939  {
8940  ++nullItem2ndCount;
8941  }
8942 
8943  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8944  }
8945 
8946  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8947  }
8948 
8949  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8950  {
8951  const VmaSuballocation& suballoc = suballocations1st[i];
8952  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8953  suballoc.hAllocation == VK_NULL_HANDLE);
8954  }
8955 
8956  size_t nullItem1stCount = m_1stNullItemsBeginCount;
8957 
8958  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8959  {
8960  const VmaSuballocation& suballoc = suballocations1st[i];
8961  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8962 
8963  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8964  VMA_VALIDATE(suballoc.offset >= offset);
8965  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8966 
8967  if(!currFree)
8968  {
8969  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8970  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8971  sumUsedSize += suballoc.size;
8972  }
8973  else
8974  {
8975  ++nullItem1stCount;
8976  }
8977 
8978  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8979  }
8980  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8981 
8982  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8983  {
8984  const size_t suballoc2ndCount = suballocations2nd.size();
8985  size_t nullItem2ndCount = 0;
8986  for(size_t i = suballoc2ndCount; i--; )
8987  {
8988  const VmaSuballocation& suballoc = suballocations2nd[i];
8989  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8990 
8991  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8992  VMA_VALIDATE(suballoc.offset >= offset);
8993 
8994  if(!currFree)
8995  {
8996  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8997  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8998  sumUsedSize += suballoc.size;
8999  }
9000  else
9001  {
9002  ++nullItem2ndCount;
9003  }
9004 
9005  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9006  }
9007 
9008  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9009  }
9010 
9011  VMA_VALIDATE(offset <= GetSize());
9012  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
9013 
9014  return true;
9015 }
9016 
9017 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
9018 {
9019  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
9020  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
9021 }
9022 
9023 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
9024 {
9025  const VkDeviceSize size = GetSize();
9026 
9027  /*
9028  We don't consider gaps inside allocation vectors with freed allocations because
9029  they are not suitable for reuse in linear allocator. We consider only space that
9030  is available for new allocations.
9031  */
9032  if(IsEmpty())
9033  {
9034  return size;
9035  }
9036 
9037  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9038 
9039  switch(m_2ndVectorMode)
9040  {
9041  case SECOND_VECTOR_EMPTY:
9042  /*
9043  Available space is after end of 1st, as well as before beginning of 1st (which
9044  whould make it a ring buffer).
9045  */
9046  {
9047  const size_t suballocations1stCount = suballocations1st.size();
9048  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
9049  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9050  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
9051  return VMA_MAX(
9052  firstSuballoc.offset,
9053  size - (lastSuballoc.offset + lastSuballoc.size));
9054  }
9055  break;
9056 
9057  case SECOND_VECTOR_RING_BUFFER:
9058  /*
9059  Available space is only between end of 2nd and beginning of 1st.
9060  */
9061  {
9062  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9063  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
9064  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
9065  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
9066  }
9067  break;
9068 
9069  case SECOND_VECTOR_DOUBLE_STACK:
9070  /*
9071  Available space is only between end of 1st and top of 2nd.
9072  */
9073  {
9074  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9075  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
9076  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
9077  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
9078  }
9079  break;
9080 
9081  default:
9082  VMA_ASSERT(0);
9083  return 0;
9084  }
9085 }
9086 
9087 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9088 {
9089  const VkDeviceSize size = GetSize();
9090  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9091  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9092  const size_t suballoc1stCount = suballocations1st.size();
9093  const size_t suballoc2ndCount = suballocations2nd.size();
9094 
9095  outInfo.blockCount = 1;
9096  outInfo.allocationCount = (uint32_t)GetAllocationCount();
9097  outInfo.unusedRangeCount = 0;
9098  outInfo.usedBytes = 0;
9099  outInfo.allocationSizeMin = UINT64_MAX;
9100  outInfo.allocationSizeMax = 0;
9101  outInfo.unusedRangeSizeMin = UINT64_MAX;
9102  outInfo.unusedRangeSizeMax = 0;
9103 
9104  VkDeviceSize lastOffset = 0;
9105 
9106  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9107  {
9108  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9109  size_t nextAlloc2ndIndex = 0;
9110  while(lastOffset < freeSpace2ndTo1stEnd)
9111  {
9112  // Find next non-null allocation or move nextAllocIndex to the end.
9113  while(nextAlloc2ndIndex < suballoc2ndCount &&
9114  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9115  {
9116  ++nextAlloc2ndIndex;
9117  }
9118 
9119  // Found non-null allocation.
9120  if(nextAlloc2ndIndex < suballoc2ndCount)
9121  {
9122  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9123 
9124  // 1. Process free space before this allocation.
9125  if(lastOffset < suballoc.offset)
9126  {
9127  // There is free space from lastOffset to suballoc.offset.
9128  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9129  ++outInfo.unusedRangeCount;
9130  outInfo.unusedBytes += unusedRangeSize;
9131  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9132  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9133  }
9134 
9135  // 2. Process this allocation.
9136  // There is allocation with suballoc.offset, suballoc.size.
9137  outInfo.usedBytes += suballoc.size;
9138  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9139  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9140 
9141  // 3. Prepare for next iteration.
9142  lastOffset = suballoc.offset + suballoc.size;
9143  ++nextAlloc2ndIndex;
9144  }
9145  // We are at the end.
9146  else
9147  {
9148  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9149  if(lastOffset < freeSpace2ndTo1stEnd)
9150  {
9151  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9152  ++outInfo.unusedRangeCount;
9153  outInfo.unusedBytes += unusedRangeSize;
9154  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9155  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9156  }
9157 
9158  // End of loop.
9159  lastOffset = freeSpace2ndTo1stEnd;
9160  }
9161  }
9162  }
9163 
9164  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9165  const VkDeviceSize freeSpace1stTo2ndEnd =
9166  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9167  while(lastOffset < freeSpace1stTo2ndEnd)
9168  {
9169  // Find next non-null allocation or move nextAllocIndex to the end.
9170  while(nextAlloc1stIndex < suballoc1stCount &&
9171  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9172  {
9173  ++nextAlloc1stIndex;
9174  }
9175 
9176  // Found non-null allocation.
9177  if(nextAlloc1stIndex < suballoc1stCount)
9178  {
9179  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9180 
9181  // 1. Process free space before this allocation.
9182  if(lastOffset < suballoc.offset)
9183  {
9184  // There is free space from lastOffset to suballoc.offset.
9185  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9186  ++outInfo.unusedRangeCount;
9187  outInfo.unusedBytes += unusedRangeSize;
9188  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9189  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9190  }
9191 
9192  // 2. Process this allocation.
9193  // There is allocation with suballoc.offset, suballoc.size.
9194  outInfo.usedBytes += suballoc.size;
9195  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9196  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9197 
9198  // 3. Prepare for next iteration.
9199  lastOffset = suballoc.offset + suballoc.size;
9200  ++nextAlloc1stIndex;
9201  }
9202  // We are at the end.
9203  else
9204  {
9205  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9206  if(lastOffset < freeSpace1stTo2ndEnd)
9207  {
9208  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9209  ++outInfo.unusedRangeCount;
9210  outInfo.unusedBytes += unusedRangeSize;
9211  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9212  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9213  }
9214 
9215  // End of loop.
9216  lastOffset = freeSpace1stTo2ndEnd;
9217  }
9218  }
9219 
9220  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9221  {
9222  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9223  while(lastOffset < size)
9224  {
9225  // Find next non-null allocation or move nextAllocIndex to the end.
9226  while(nextAlloc2ndIndex != SIZE_MAX &&
9227  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9228  {
9229  --nextAlloc2ndIndex;
9230  }
9231 
9232  // Found non-null allocation.
9233  if(nextAlloc2ndIndex != SIZE_MAX)
9234  {
9235  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9236 
9237  // 1. Process free space before this allocation.
9238  if(lastOffset < suballoc.offset)
9239  {
9240  // There is free space from lastOffset to suballoc.offset.
9241  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9242  ++outInfo.unusedRangeCount;
9243  outInfo.unusedBytes += unusedRangeSize;
9244  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9245  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9246  }
9247 
9248  // 2. Process this allocation.
9249  // There is allocation with suballoc.offset, suballoc.size.
9250  outInfo.usedBytes += suballoc.size;
9251  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9252  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9253 
9254  // 3. Prepare for next iteration.
9255  lastOffset = suballoc.offset + suballoc.size;
9256  --nextAlloc2ndIndex;
9257  }
9258  // We are at the end.
9259  else
9260  {
9261  // There is free space from lastOffset to size.
9262  if(lastOffset < size)
9263  {
9264  const VkDeviceSize unusedRangeSize = size - lastOffset;
9265  ++outInfo.unusedRangeCount;
9266  outInfo.unusedBytes += unusedRangeSize;
9267  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9268  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9269  }
9270 
9271  // End of loop.
9272  lastOffset = size;
9273  }
9274  }
9275  }
9276 
9277  outInfo.unusedBytes = size - outInfo.usedBytes;
9278 }
9279 
9280 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9281 {
9282  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9283  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9284  const VkDeviceSize size = GetSize();
9285  const size_t suballoc1stCount = suballocations1st.size();
9286  const size_t suballoc2ndCount = suballocations2nd.size();
9287 
9288  inoutStats.size += size;
9289 
9290  VkDeviceSize lastOffset = 0;
9291 
9292  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9293  {
9294  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9295  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9296  while(lastOffset < freeSpace2ndTo1stEnd)
9297  {
9298  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9299  while(nextAlloc2ndIndex < suballoc2ndCount &&
9300  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9301  {
9302  ++nextAlloc2ndIndex;
9303  }
9304 
9305  // Found non-null allocation.
9306  if(nextAlloc2ndIndex < suballoc2ndCount)
9307  {
9308  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9309 
9310  // 1. Process free space before this allocation.
9311  if(lastOffset < suballoc.offset)
9312  {
9313  // There is free space from lastOffset to suballoc.offset.
9314  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9315  inoutStats.unusedSize += unusedRangeSize;
9316  ++inoutStats.unusedRangeCount;
9317  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9318  }
9319 
9320  // 2. Process this allocation.
9321  // There is allocation with suballoc.offset, suballoc.size.
9322  ++inoutStats.allocationCount;
9323 
9324  // 3. Prepare for next iteration.
9325  lastOffset = suballoc.offset + suballoc.size;
9326  ++nextAlloc2ndIndex;
9327  }
9328  // We are at the end.
9329  else
9330  {
9331  if(lastOffset < freeSpace2ndTo1stEnd)
9332  {
9333  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9334  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9335  inoutStats.unusedSize += unusedRangeSize;
9336  ++inoutStats.unusedRangeCount;
9337  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9338  }
9339 
9340  // End of loop.
9341  lastOffset = freeSpace2ndTo1stEnd;
9342  }
9343  }
9344  }
9345 
9346  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9347  const VkDeviceSize freeSpace1stTo2ndEnd =
9348  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9349  while(lastOffset < freeSpace1stTo2ndEnd)
9350  {
9351  // Find next non-null allocation or move nextAllocIndex to the end.
9352  while(nextAlloc1stIndex < suballoc1stCount &&
9353  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9354  {
9355  ++nextAlloc1stIndex;
9356  }
9357 
9358  // Found non-null allocation.
9359  if(nextAlloc1stIndex < suballoc1stCount)
9360  {
9361  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9362 
9363  // 1. Process free space before this allocation.
9364  if(lastOffset < suballoc.offset)
9365  {
9366  // There is free space from lastOffset to suballoc.offset.
9367  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9368  inoutStats.unusedSize += unusedRangeSize;
9369  ++inoutStats.unusedRangeCount;
9370  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9371  }
9372 
9373  // 2. Process this allocation.
9374  // There is allocation with suballoc.offset, suballoc.size.
9375  ++inoutStats.allocationCount;
9376 
9377  // 3. Prepare for next iteration.
9378  lastOffset = suballoc.offset + suballoc.size;
9379  ++nextAlloc1stIndex;
9380  }
9381  // We are at the end.
9382  else
9383  {
9384  if(lastOffset < freeSpace1stTo2ndEnd)
9385  {
9386  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9387  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9388  inoutStats.unusedSize += unusedRangeSize;
9389  ++inoutStats.unusedRangeCount;
9390  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9391  }
9392 
9393  // End of loop.
9394  lastOffset = freeSpace1stTo2ndEnd;
9395  }
9396  }
9397 
9398  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9399  {
9400  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9401  while(lastOffset < size)
9402  {
9403  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9404  while(nextAlloc2ndIndex != SIZE_MAX &&
9405  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9406  {
9407  --nextAlloc2ndIndex;
9408  }
9409 
9410  // Found non-null allocation.
9411  if(nextAlloc2ndIndex != SIZE_MAX)
9412  {
9413  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9414 
9415  // 1. Process free space before this allocation.
9416  if(lastOffset < suballoc.offset)
9417  {
9418  // There is free space from lastOffset to suballoc.offset.
9419  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9420  inoutStats.unusedSize += unusedRangeSize;
9421  ++inoutStats.unusedRangeCount;
9422  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9423  }
9424 
9425  // 2. Process this allocation.
9426  // There is allocation with suballoc.offset, suballoc.size.
9427  ++inoutStats.allocationCount;
9428 
9429  // 3. Prepare for next iteration.
9430  lastOffset = suballoc.offset + suballoc.size;
9431  --nextAlloc2ndIndex;
9432  }
9433  // We are at the end.
9434  else
9435  {
9436  if(lastOffset < size)
9437  {
9438  // There is free space from lastOffset to size.
9439  const VkDeviceSize unusedRangeSize = size - lastOffset;
9440  inoutStats.unusedSize += unusedRangeSize;
9441  ++inoutStats.unusedRangeCount;
9442  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9443  }
9444 
9445  // End of loop.
9446  lastOffset = size;
9447  }
9448  }
9449  }
9450 }
9451 
9452 #if VMA_STATS_STRING_ENABLED
9453 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9454 {
9455  const VkDeviceSize size = GetSize();
9456  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9457  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9458  const size_t suballoc1stCount = suballocations1st.size();
9459  const size_t suballoc2ndCount = suballocations2nd.size();
9460 
9461  // FIRST PASS
9462 
9463  size_t unusedRangeCount = 0;
9464  VkDeviceSize usedBytes = 0;
9465 
9466  VkDeviceSize lastOffset = 0;
9467 
9468  size_t alloc2ndCount = 0;
9469  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9470  {
9471  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9472  size_t nextAlloc2ndIndex = 0;
9473  while(lastOffset < freeSpace2ndTo1stEnd)
9474  {
9475  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9476  while(nextAlloc2ndIndex < suballoc2ndCount &&
9477  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9478  {
9479  ++nextAlloc2ndIndex;
9480  }
9481 
9482  // Found non-null allocation.
9483  if(nextAlloc2ndIndex < suballoc2ndCount)
9484  {
9485  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9486 
9487  // 1. Process free space before this allocation.
9488  if(lastOffset < suballoc.offset)
9489  {
9490  // There is free space from lastOffset to suballoc.offset.
9491  ++unusedRangeCount;
9492  }
9493 
9494  // 2. Process this allocation.
9495  // There is allocation with suballoc.offset, suballoc.size.
9496  ++alloc2ndCount;
9497  usedBytes += suballoc.size;
9498 
9499  // 3. Prepare for next iteration.
9500  lastOffset = suballoc.offset + suballoc.size;
9501  ++nextAlloc2ndIndex;
9502  }
9503  // We are at the end.
9504  else
9505  {
9506  if(lastOffset < freeSpace2ndTo1stEnd)
9507  {
9508  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9509  ++unusedRangeCount;
9510  }
9511 
9512  // End of loop.
9513  lastOffset = freeSpace2ndTo1stEnd;
9514  }
9515  }
9516  }
9517 
9518  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9519  size_t alloc1stCount = 0;
9520  const VkDeviceSize freeSpace1stTo2ndEnd =
9521  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9522  while(lastOffset < freeSpace1stTo2ndEnd)
9523  {
9524  // Find next non-null allocation or move nextAllocIndex to the end.
9525  while(nextAlloc1stIndex < suballoc1stCount &&
9526  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9527  {
9528  ++nextAlloc1stIndex;
9529  }
9530 
9531  // Found non-null allocation.
9532  if(nextAlloc1stIndex < suballoc1stCount)
9533  {
9534  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9535 
9536  // 1. Process free space before this allocation.
9537  if(lastOffset < suballoc.offset)
9538  {
9539  // There is free space from lastOffset to suballoc.offset.
9540  ++unusedRangeCount;
9541  }
9542 
9543  // 2. Process this allocation.
9544  // There is allocation with suballoc.offset, suballoc.size.
9545  ++alloc1stCount;
9546  usedBytes += suballoc.size;
9547 
9548  // 3. Prepare for next iteration.
9549  lastOffset = suballoc.offset + suballoc.size;
9550  ++nextAlloc1stIndex;
9551  }
9552  // We are at the end.
9553  else
9554  {
9555  if(lastOffset < size)
9556  {
9557  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9558  ++unusedRangeCount;
9559  }
9560 
9561  // End of loop.
9562  lastOffset = freeSpace1stTo2ndEnd;
9563  }
9564  }
9565 
9566  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9567  {
9568  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9569  while(lastOffset < size)
9570  {
9571  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9572  while(nextAlloc2ndIndex != SIZE_MAX &&
9573  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9574  {
9575  --nextAlloc2ndIndex;
9576  }
9577 
9578  // Found non-null allocation.
9579  if(nextAlloc2ndIndex != SIZE_MAX)
9580  {
9581  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9582 
9583  // 1. Process free space before this allocation.
9584  if(lastOffset < suballoc.offset)
9585  {
9586  // There is free space from lastOffset to suballoc.offset.
9587  ++unusedRangeCount;
9588  }
9589 
9590  // 2. Process this allocation.
9591  // There is allocation with suballoc.offset, suballoc.size.
9592  ++alloc2ndCount;
9593  usedBytes += suballoc.size;
9594 
9595  // 3. Prepare for next iteration.
9596  lastOffset = suballoc.offset + suballoc.size;
9597  --nextAlloc2ndIndex;
9598  }
9599  // We are at the end.
9600  else
9601  {
9602  if(lastOffset < size)
9603  {
9604  // There is free space from lastOffset to size.
9605  ++unusedRangeCount;
9606  }
9607 
9608  // End of loop.
9609  lastOffset = size;
9610  }
9611  }
9612  }
9613 
9614  const VkDeviceSize unusedBytes = size - usedBytes;
9615  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9616 
9617  // SECOND PASS
9618  lastOffset = 0;
9619 
9620  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9621  {
9622  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9623  size_t nextAlloc2ndIndex = 0;
9624  while(lastOffset < freeSpace2ndTo1stEnd)
9625  {
9626  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9627  while(nextAlloc2ndIndex < suballoc2ndCount &&
9628  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9629  {
9630  ++nextAlloc2ndIndex;
9631  }
9632 
9633  // Found non-null allocation.
9634  if(nextAlloc2ndIndex < suballoc2ndCount)
9635  {
9636  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9637 
9638  // 1. Process free space before this allocation.
9639  if(lastOffset < suballoc.offset)
9640  {
9641  // There is free space from lastOffset to suballoc.offset.
9642  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9643  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9644  }
9645 
9646  // 2. Process this allocation.
9647  // There is allocation with suballoc.offset, suballoc.size.
9648  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9649 
9650  // 3. Prepare for next iteration.
9651  lastOffset = suballoc.offset + suballoc.size;
9652  ++nextAlloc2ndIndex;
9653  }
9654  // We are at the end.
9655  else
9656  {
9657  if(lastOffset < freeSpace2ndTo1stEnd)
9658  {
9659  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9660  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9661  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9662  }
9663 
9664  // End of loop.
9665  lastOffset = freeSpace2ndTo1stEnd;
9666  }
9667  }
9668  }
9669 
9670  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9671  while(lastOffset < freeSpace1stTo2ndEnd)
9672  {
9673  // Find next non-null allocation or move nextAllocIndex to the end.
9674  while(nextAlloc1stIndex < suballoc1stCount &&
9675  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9676  {
9677  ++nextAlloc1stIndex;
9678  }
9679 
9680  // Found non-null allocation.
9681  if(nextAlloc1stIndex < suballoc1stCount)
9682  {
9683  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9684 
9685  // 1. Process free space before this allocation.
9686  if(lastOffset < suballoc.offset)
9687  {
9688  // There is free space from lastOffset to suballoc.offset.
9689  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9690  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9691  }
9692 
9693  // 2. Process this allocation.
9694  // There is allocation with suballoc.offset, suballoc.size.
9695  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9696 
9697  // 3. Prepare for next iteration.
9698  lastOffset = suballoc.offset + suballoc.size;
9699  ++nextAlloc1stIndex;
9700  }
9701  // We are at the end.
9702  else
9703  {
9704  if(lastOffset < freeSpace1stTo2ndEnd)
9705  {
9706  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9707  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9708  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9709  }
9710 
9711  // End of loop.
9712  lastOffset = freeSpace1stTo2ndEnd;
9713  }
9714  }
9715 
9716  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9717  {
9718  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9719  while(lastOffset < size)
9720  {
9721  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9722  while(nextAlloc2ndIndex != SIZE_MAX &&
9723  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9724  {
9725  --nextAlloc2ndIndex;
9726  }
9727 
9728  // Found non-null allocation.
9729  if(nextAlloc2ndIndex != SIZE_MAX)
9730  {
9731  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9732 
9733  // 1. Process free space before this allocation.
9734  if(lastOffset < suballoc.offset)
9735  {
9736  // There is free space from lastOffset to suballoc.offset.
9737  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9738  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9739  }
9740 
9741  // 2. Process this allocation.
9742  // There is allocation with suballoc.offset, suballoc.size.
9743  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9744 
9745  // 3. Prepare for next iteration.
9746  lastOffset = suballoc.offset + suballoc.size;
9747  --nextAlloc2ndIndex;
9748  }
9749  // We are at the end.
9750  else
9751  {
9752  if(lastOffset < size)
9753  {
9754  // There is free space from lastOffset to size.
9755  const VkDeviceSize unusedRangeSize = size - lastOffset;
9756  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9757  }
9758 
9759  // End of loop.
9760  lastOffset = size;
9761  }
9762  }
9763  }
9764 
9765  PrintDetailedMap_End(json);
9766 }
9767 #endif // #if VMA_STATS_STRING_ENABLED
9768 
9769 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9770  uint32_t currentFrameIndex,
9771  uint32_t frameInUseCount,
9772  VkDeviceSize bufferImageGranularity,
9773  VkDeviceSize allocSize,
9774  VkDeviceSize allocAlignment,
9775  bool upperAddress,
9776  VmaSuballocationType allocType,
9777  bool canMakeOtherLost,
9778  uint32_t strategy,
9779  VmaAllocationRequest* pAllocationRequest)
9780 {
9781  VMA_ASSERT(allocSize > 0);
9782  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9783  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9784  VMA_HEAVY_ASSERT(Validate());
9785  return upperAddress ?
9786  CreateAllocationRequest_UpperAddress(
9787  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9788  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
9789  CreateAllocationRequest_LowerAddress(
9790  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9791  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
9792 }
9793 
9794 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
9795  uint32_t currentFrameIndex,
9796  uint32_t frameInUseCount,
9797  VkDeviceSize bufferImageGranularity,
9798  VkDeviceSize allocSize,
9799  VkDeviceSize allocAlignment,
9800  VmaSuballocationType allocType,
9801  bool canMakeOtherLost,
9802  uint32_t strategy,
9803  VmaAllocationRequest* pAllocationRequest)
9804 {
9805  const VkDeviceSize size = GetSize();
9806  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9807  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9808 
9809  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9810  {
9811  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9812  return false;
9813  }
9814 
9815  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9816  if(allocSize > size)
9817  {
9818  return false;
9819  }
9820  VkDeviceSize resultBaseOffset = size - allocSize;
9821  if(!suballocations2nd.empty())
9822  {
9823  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9824  resultBaseOffset = lastSuballoc.offset - allocSize;
9825  if(allocSize > lastSuballoc.offset)
9826  {
9827  return false;
9828  }
9829  }
9830 
9831  // Start from offset equal to end of free space.
9832  VkDeviceSize resultOffset = resultBaseOffset;
9833 
9834  // Apply VMA_DEBUG_MARGIN at the end.
9835  if(VMA_DEBUG_MARGIN > 0)
9836  {
9837  if(resultOffset < VMA_DEBUG_MARGIN)
9838  {
9839  return false;
9840  }
9841  resultOffset -= VMA_DEBUG_MARGIN;
9842  }
9843 
9844  // Apply alignment.
9845  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9846 
9847  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9848  // Make bigger alignment if necessary.
9849  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9850  {
9851  bool bufferImageGranularityConflict = false;
9852  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9853  {
9854  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9855  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9856  {
9857  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9858  {
9859  bufferImageGranularityConflict = true;
9860  break;
9861  }
9862  }
9863  else
9864  // Already on previous page.
9865  break;
9866  }
9867  if(bufferImageGranularityConflict)
9868  {
9869  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9870  }
9871  }
9872 
9873  // There is enough free space.
9874  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9875  suballocations1st.back().offset + suballocations1st.back().size :
9876  0;
9877  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9878  {
9879  // Check previous suballocations for BufferImageGranularity conflicts.
9880  // If conflict exists, allocation cannot be made here.
9881  if(bufferImageGranularity > 1)
9882  {
9883  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9884  {
9885  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9886  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9887  {
9888  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9889  {
9890  return false;
9891  }
9892  }
9893  else
9894  {
9895  // Already on next page.
9896  break;
9897  }
9898  }
9899  }
9900 
9901  // All tests passed: Success.
9902  pAllocationRequest->offset = resultOffset;
9903  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9904  pAllocationRequest->sumItemSize = 0;
9905  // pAllocationRequest->item unused.
9906  pAllocationRequest->itemsToMakeLostCount = 0;
9907  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
9908  return true;
9909  }
9910 
9911  return false;
9912 }
9913 
9914 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
9915  uint32_t currentFrameIndex,
9916  uint32_t frameInUseCount,
9917  VkDeviceSize bufferImageGranularity,
9918  VkDeviceSize allocSize,
9919  VkDeviceSize allocAlignment,
9920  VmaSuballocationType allocType,
9921  bool canMakeOtherLost,
9922  uint32_t strategy,
9923  VmaAllocationRequest* pAllocationRequest)
9924 {
9925  const VkDeviceSize size = GetSize();
9926  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9927  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9928 
9929  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9930  {
9931  // Try to allocate at the end of 1st vector.
9932 
9933  VkDeviceSize resultBaseOffset = 0;
9934  if(!suballocations1st.empty())
9935  {
9936  const VmaSuballocation& lastSuballoc = suballocations1st.back();
9937  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9938  }
9939 
9940  // Start from offset equal to beginning of free space.
9941  VkDeviceSize resultOffset = resultBaseOffset;
9942 
9943  // Apply VMA_DEBUG_MARGIN at the beginning.
9944  if(VMA_DEBUG_MARGIN > 0)
9945  {
9946  resultOffset += VMA_DEBUG_MARGIN;
9947  }
9948 
9949  // Apply alignment.
9950  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9951 
9952  // Check previous suballocations for BufferImageGranularity conflicts.
9953  // Make bigger alignment if necessary.
9954  if(bufferImageGranularity > 1 && !suballocations1st.empty())
9955  {
9956  bool bufferImageGranularityConflict = false;
9957  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9958  {
9959  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9960  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9961  {
9962  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9963  {
9964  bufferImageGranularityConflict = true;
9965  break;
9966  }
9967  }
9968  else
9969  // Already on previous page.
9970  break;
9971  }
9972  if(bufferImageGranularityConflict)
9973  {
9974  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9975  }
9976  }
9977 
9978  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9979  suballocations2nd.back().offset : size;
9980 
9981  // There is enough free space at the end after alignment.
9982  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9983  {
9984  // Check next suballocations for BufferImageGranularity conflicts.
9985  // If conflict exists, allocation cannot be made here.
9986  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9987  {
9988  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9989  {
9990  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9991  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9992  {
9993  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9994  {
9995  return false;
9996  }
9997  }
9998  else
9999  {
10000  // Already on previous page.
10001  break;
10002  }
10003  }
10004  }
10005 
10006  // All tests passed: Success.
10007  pAllocationRequest->offset = resultOffset;
10008  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
10009  pAllocationRequest->sumItemSize = 0;
10010  // pAllocationRequest->item, customData unused.
10011  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
10012  pAllocationRequest->itemsToMakeLostCount = 0;
10013  return true;
10014  }
10015  }
10016 
10017  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
10018  // beginning of 1st vector as the end of free space.
10019  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10020  {
10021  VMA_ASSERT(!suballocations1st.empty());
10022 
10023  VkDeviceSize resultBaseOffset = 0;
10024  if(!suballocations2nd.empty())
10025  {
10026  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10027  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10028  }
10029 
10030  // Start from offset equal to beginning of free space.
10031  VkDeviceSize resultOffset = resultBaseOffset;
10032 
10033  // Apply VMA_DEBUG_MARGIN at the beginning.
10034  if(VMA_DEBUG_MARGIN > 0)
10035  {
10036  resultOffset += VMA_DEBUG_MARGIN;
10037  }
10038 
10039  // Apply alignment.
10040  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10041 
10042  // Check previous suballocations for BufferImageGranularity conflicts.
10043  // Make bigger alignment if necessary.
10044  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10045  {
10046  bool bufferImageGranularityConflict = false;
10047  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
10048  {
10049  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
10050  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10051  {
10052  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10053  {
10054  bufferImageGranularityConflict = true;
10055  break;
10056  }
10057  }
10058  else
10059  // Already on previous page.
10060  break;
10061  }
10062  if(bufferImageGranularityConflict)
10063  {
10064  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10065  }
10066  }
10067 
10068  pAllocationRequest->itemsToMakeLostCount = 0;
10069  pAllocationRequest->sumItemSize = 0;
10070  size_t index1st = m_1stNullItemsBeginCount;
10071 
10072  if(canMakeOtherLost)
10073  {
10074  while(index1st < suballocations1st.size() &&
10075  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
10076  {
10077  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
10078  const VmaSuballocation& suballoc = suballocations1st[index1st];
10079  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
10080  {
10081  // No problem.
10082  }
10083  else
10084  {
10085  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10086  if(suballoc.hAllocation->CanBecomeLost() &&
10087  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10088  {
10089  ++pAllocationRequest->itemsToMakeLostCount;
10090  pAllocationRequest->sumItemSize += suballoc.size;
10091  }
10092  else
10093  {
10094  return false;
10095  }
10096  }
10097  ++index1st;
10098  }
10099 
10100  // Check next suballocations for BufferImageGranularity conflicts.
10101  // If conflict exists, we must mark more allocations lost or fail.
10102  if(bufferImageGranularity > 1)
10103  {
10104  while(index1st < suballocations1st.size())
10105  {
10106  const VmaSuballocation& suballoc = suballocations1st[index1st];
10107  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
10108  {
10109  if(suballoc.hAllocation != VK_NULL_HANDLE)
10110  {
10111  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
10112  if(suballoc.hAllocation->CanBecomeLost() &&
10113  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10114  {
10115  ++pAllocationRequest->itemsToMakeLostCount;
10116  pAllocationRequest->sumItemSize += suballoc.size;
10117  }
10118  else
10119  {
10120  return false;
10121  }
10122  }
10123  }
10124  else
10125  {
10126  // Already on next page.
10127  break;
10128  }
10129  ++index1st;
10130  }
10131  }
10132 
10133  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
10134  if(index1st == suballocations1st.size() &&
10135  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
10136  {
10137  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
10138  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
10139  }
10140  }
10141 
10142  // There is enough free space at the end after alignment.
10143  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
10144  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
10145  {
10146  // Check next suballocations for BufferImageGranularity conflicts.
10147  // If conflict exists, allocation cannot be made here.
10148  if(bufferImageGranularity > 1)
10149  {
10150  for(size_t nextSuballocIndex = index1st;
10151  nextSuballocIndex < suballocations1st.size();
10152  nextSuballocIndex++)
10153  {
10154  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
10155  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10156  {
10157  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10158  {
10159  return false;
10160  }
10161  }
10162  else
10163  {
10164  // Already on next page.
10165  break;
10166  }
10167  }
10168  }
10169 
10170  // All tests passed: Success.
10171  pAllocationRequest->offset = resultOffset;
10172  pAllocationRequest->sumFreeSize =
10173  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
10174  - resultBaseOffset
10175  - pAllocationRequest->sumItemSize;
10176  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
10177  // pAllocationRequest->item, customData unused.
10178  return true;
10179  }
10180  }
10181 
10182  return false;
10183 }
10184 
10185 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10186  uint32_t currentFrameIndex,
10187  uint32_t frameInUseCount,
10188  VmaAllocationRequest* pAllocationRequest)
10189 {
10190  if(pAllocationRequest->itemsToMakeLostCount == 0)
10191  {
10192  return true;
10193  }
10194 
10195  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10196 
10197  // We always start from 1st.
10198  SuballocationVectorType* suballocations = &AccessSuballocations1st();
10199  size_t index = m_1stNullItemsBeginCount;
10200  size_t madeLostCount = 0;
10201  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10202  {
10203  if(index == suballocations->size())
10204  {
10205  index = 0;
10206  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
10207  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10208  {
10209  suballocations = &AccessSuballocations2nd();
10210  }
10211  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
10212  // suballocations continues pointing at AccessSuballocations1st().
10213  VMA_ASSERT(!suballocations->empty());
10214  }
10215  VmaSuballocation& suballoc = (*suballocations)[index];
10216  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10217  {
10218  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10219  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10220  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10221  {
10222  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10223  suballoc.hAllocation = VK_NULL_HANDLE;
10224  m_SumFreeSize += suballoc.size;
10225  if(suballocations == &AccessSuballocations1st())
10226  {
10227  ++m_1stNullItemsMiddleCount;
10228  }
10229  else
10230  {
10231  ++m_2ndNullItemsCount;
10232  }
10233  ++madeLostCount;
10234  }
10235  else
10236  {
10237  return false;
10238  }
10239  }
10240  ++index;
10241  }
10242 
10243  CleanupAfterFree();
10244  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10245 
10246  return true;
10247 }
10248 
10249 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10250 {
10251  uint32_t lostAllocationCount = 0;
10252 
10253  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10254  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10255  {
10256  VmaSuballocation& suballoc = suballocations1st[i];
10257  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10258  suballoc.hAllocation->CanBecomeLost() &&
10259  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10260  {
10261  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10262  suballoc.hAllocation = VK_NULL_HANDLE;
10263  ++m_1stNullItemsMiddleCount;
10264  m_SumFreeSize += suballoc.size;
10265  ++lostAllocationCount;
10266  }
10267  }
10268 
10269  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10270  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10271  {
10272  VmaSuballocation& suballoc = suballocations2nd[i];
10273  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10274  suballoc.hAllocation->CanBecomeLost() &&
10275  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10276  {
10277  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10278  suballoc.hAllocation = VK_NULL_HANDLE;
10279  ++m_2ndNullItemsCount;
10280  m_SumFreeSize += suballoc.size;
10281  ++lostAllocationCount;
10282  }
10283  }
10284 
10285  if(lostAllocationCount)
10286  {
10287  CleanupAfterFree();
10288  }
10289 
10290  return lostAllocationCount;
10291 }
10292 
10293 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10294 {
10295  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10296  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10297  {
10298  const VmaSuballocation& suballoc = suballocations1st[i];
10299  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10300  {
10301  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10302  {
10303  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10304  return VK_ERROR_VALIDATION_FAILED_EXT;
10305  }
10306  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10307  {
10308  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10309  return VK_ERROR_VALIDATION_FAILED_EXT;
10310  }
10311  }
10312  }
10313 
10314  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10315  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10316  {
10317  const VmaSuballocation& suballoc = suballocations2nd[i];
10318  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10319  {
10320  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10321  {
10322  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10323  return VK_ERROR_VALIDATION_FAILED_EXT;
10324  }
10325  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10326  {
10327  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10328  return VK_ERROR_VALIDATION_FAILED_EXT;
10329  }
10330  }
10331  }
10332 
10333  return VK_SUCCESS;
10334 }
10335 
10336 void VmaBlockMetadata_Linear::Alloc(
10337  const VmaAllocationRequest& request,
10338  VmaSuballocationType type,
10339  VkDeviceSize allocSize,
10340  VmaAllocation hAllocation)
10341 {
10342  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10343 
10344  switch(request.type)
10345  {
10346  case VmaAllocationRequestType::UpperAddress:
10347  {
10348  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10349  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10350  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10351  suballocations2nd.push_back(newSuballoc);
10352  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10353  }
10354  break;
10355  case VmaAllocationRequestType::EndOf1st:
10356  {
10357  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10358 
10359  VMA_ASSERT(suballocations1st.empty() ||
10360  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
10361  // Check if it fits before the end of the block.
10362  VMA_ASSERT(request.offset + allocSize <= GetSize());
10363 
10364  suballocations1st.push_back(newSuballoc);
10365  }
10366  break;
10367  case VmaAllocationRequestType::EndOf2nd:
10368  {
10369  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10370  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10371  VMA_ASSERT(!suballocations1st.empty() &&
10372  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
10373  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10374 
10375  switch(m_2ndVectorMode)
10376  {
10377  case SECOND_VECTOR_EMPTY:
10378  // First allocation from second part ring buffer.
10379  VMA_ASSERT(suballocations2nd.empty());
10380  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10381  break;
10382  case SECOND_VECTOR_RING_BUFFER:
10383  // 2-part ring buffer is already started.
10384  VMA_ASSERT(!suballocations2nd.empty());
10385  break;
10386  case SECOND_VECTOR_DOUBLE_STACK:
10387  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10388  break;
10389  default:
10390  VMA_ASSERT(0);
10391  }
10392 
10393  suballocations2nd.push_back(newSuballoc);
10394  }
10395  break;
10396  default:
10397  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10398  }
10399 
10400  m_SumFreeSize -= newSuballoc.size;
10401 }
10402 
10403 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10404 {
10405  FreeAtOffset(allocation->GetOffset());
10406 }
10407 
10408 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10409 {
10410  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10411  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10412 
10413  if(!suballocations1st.empty())
10414  {
10415  // First allocation: Mark it as next empty at the beginning.
10416  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10417  if(firstSuballoc.offset == offset)
10418  {
10419  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10420  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10421  m_SumFreeSize += firstSuballoc.size;
10422  ++m_1stNullItemsBeginCount;
10423  CleanupAfterFree();
10424  return;
10425  }
10426  }
10427 
10428  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10429  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10430  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10431  {
10432  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10433  if(lastSuballoc.offset == offset)
10434  {
10435  m_SumFreeSize += lastSuballoc.size;
10436  suballocations2nd.pop_back();
10437  CleanupAfterFree();
10438  return;
10439  }
10440  }
10441  // Last allocation in 1st vector.
10442  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10443  {
10444  VmaSuballocation& lastSuballoc = suballocations1st.back();
10445  if(lastSuballoc.offset == offset)
10446  {
10447  m_SumFreeSize += lastSuballoc.size;
10448  suballocations1st.pop_back();
10449  CleanupAfterFree();
10450  return;
10451  }
10452  }
10453 
10454  // Item from the middle of 1st vector.
10455  {
10456  VmaSuballocation refSuballoc;
10457  refSuballoc.offset = offset;
10458  // Rest of members stays uninitialized intentionally for better performance.
10459  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
10460  suballocations1st.begin() + m_1stNullItemsBeginCount,
10461  suballocations1st.end(),
10462  refSuballoc,
10463  VmaSuballocationOffsetLess());
10464  if(it != suballocations1st.end())
10465  {
10466  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10467  it->hAllocation = VK_NULL_HANDLE;
10468  ++m_1stNullItemsMiddleCount;
10469  m_SumFreeSize += it->size;
10470  CleanupAfterFree();
10471  return;
10472  }
10473  }
10474 
10475  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10476  {
10477  // Item from the middle of 2nd vector.
10478  VmaSuballocation refSuballoc;
10479  refSuballoc.offset = offset;
10480  // Rest of members stays uninitialized intentionally for better performance.
10481  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10482  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
10483  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
10484  if(it != suballocations2nd.end())
10485  {
10486  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10487  it->hAllocation = VK_NULL_HANDLE;
10488  ++m_2ndNullItemsCount;
10489  m_SumFreeSize += it->size;
10490  CleanupAfterFree();
10491  return;
10492  }
10493  }
10494 
10495  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10496 }
10497 
10498 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10499 {
10500  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10501  const size_t suballocCount = AccessSuballocations1st().size();
10502  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10503 }
10504 
10505 void VmaBlockMetadata_Linear::CleanupAfterFree()
10506 {
10507  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10508  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10509 
10510  if(IsEmpty())
10511  {
10512  suballocations1st.clear();
10513  suballocations2nd.clear();
10514  m_1stNullItemsBeginCount = 0;
10515  m_1stNullItemsMiddleCount = 0;
10516  m_2ndNullItemsCount = 0;
10517  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10518  }
10519  else
10520  {
10521  const size_t suballoc1stCount = suballocations1st.size();
10522  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10523  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10524 
10525  // Find more null items at the beginning of 1st vector.
10526  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10527  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10528  {
10529  ++m_1stNullItemsBeginCount;
10530  --m_1stNullItemsMiddleCount;
10531  }
10532 
10533  // Find more null items at the end of 1st vector.
10534  while(m_1stNullItemsMiddleCount > 0 &&
10535  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10536  {
10537  --m_1stNullItemsMiddleCount;
10538  suballocations1st.pop_back();
10539  }
10540 
10541  // Find more null items at the end of 2nd vector.
10542  while(m_2ndNullItemsCount > 0 &&
10543  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10544  {
10545  --m_2ndNullItemsCount;
10546  suballocations2nd.pop_back();
10547  }
10548 
10549  // Find more null items at the beginning of 2nd vector.
10550  while(m_2ndNullItemsCount > 0 &&
10551  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
10552  {
10553  --m_2ndNullItemsCount;
10554  VmaVectorRemove(suballocations2nd, 0);
10555  }
10556 
10557  if(ShouldCompact1st())
10558  {
10559  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10560  size_t srcIndex = m_1stNullItemsBeginCount;
10561  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10562  {
10563  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10564  {
10565  ++srcIndex;
10566  }
10567  if(dstIndex != srcIndex)
10568  {
10569  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10570  }
10571  ++srcIndex;
10572  }
10573  suballocations1st.resize(nonNullItemCount);
10574  m_1stNullItemsBeginCount = 0;
10575  m_1stNullItemsMiddleCount = 0;
10576  }
10577 
10578  // 2nd vector became empty.
10579  if(suballocations2nd.empty())
10580  {
10581  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10582  }
10583 
10584  // 1st vector became empty.
10585  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10586  {
10587  suballocations1st.clear();
10588  m_1stNullItemsBeginCount = 0;
10589 
10590  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10591  {
10592  // Swap 1st with 2nd. Now 2nd is empty.
10593  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10594  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10595  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10596  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10597  {
10598  ++m_1stNullItemsBeginCount;
10599  --m_1stNullItemsMiddleCount;
10600  }
10601  m_2ndNullItemsCount = 0;
10602  m_1stVectorIndex ^= 1;
10603  }
10604  }
10605  }
10606 
10607  VMA_HEAVY_ASSERT(Validate());
10608 }
10609 
10610 
10612 // class VmaBlockMetadata_Buddy
10613 
10614 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10615  VmaBlockMetadata(hAllocator),
10616  m_Root(VMA_NULL),
10617  m_AllocationCount(0),
10618  m_FreeCount(1),
10619  m_SumFreeSize(0)
10620 {
10621  memset(m_FreeList, 0, sizeof(m_FreeList));
10622 }
10623 
10624 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10625 {
10626  DeleteNode(m_Root);
10627 }
10628 
10629 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10630 {
10631  VmaBlockMetadata::Init(size);
10632 
10633  m_UsableSize = VmaPrevPow2(size);
10634  m_SumFreeSize = m_UsableSize;
10635 
10636  // Calculate m_LevelCount.
10637  m_LevelCount = 1;
10638  while(m_LevelCount < MAX_LEVELS &&
10639  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10640  {
10641  ++m_LevelCount;
10642  }
10643 
10644  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10645  rootNode->offset = 0;
10646  rootNode->type = Node::TYPE_FREE;
10647  rootNode->parent = VMA_NULL;
10648  rootNode->buddy = VMA_NULL;
10649 
10650  m_Root = rootNode;
10651  AddToFreeListFront(0, rootNode);
10652 }
10653 
10654 bool VmaBlockMetadata_Buddy::Validate() const
10655 {
10656  // Validate tree.
10657  ValidationContext ctx;
10658  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10659  {
10660  VMA_VALIDATE(false && "ValidateNode failed.");
10661  }
10662  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10663  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10664 
10665  // Validate free node lists.
10666  for(uint32_t level = 0; level < m_LevelCount; ++level)
10667  {
10668  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10669  m_FreeList[level].front->free.prev == VMA_NULL);
10670 
10671  for(Node* node = m_FreeList[level].front;
10672  node != VMA_NULL;
10673  node = node->free.next)
10674  {
10675  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10676 
10677  if(node->free.next == VMA_NULL)
10678  {
10679  VMA_VALIDATE(m_FreeList[level].back == node);
10680  }
10681  else
10682  {
10683  VMA_VALIDATE(node->free.next->free.prev == node);
10684  }
10685  }
10686  }
10687 
10688  // Validate that free lists ar higher levels are empty.
10689  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10690  {
10691  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10692  }
10693 
10694  return true;
10695 }
10696 
10697 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10698 {
10699  for(uint32_t level = 0; level < m_LevelCount; ++level)
10700  {
10701  if(m_FreeList[level].front != VMA_NULL)
10702  {
10703  return LevelToNodeSize(level);
10704  }
10705  }
10706  return 0;
10707 }
10708 
10709 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10710 {
10711  const VkDeviceSize unusableSize = GetUnusableSize();
10712 
10713  outInfo.blockCount = 1;
10714 
10715  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10716  outInfo.usedBytes = outInfo.unusedBytes = 0;
10717 
10718  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10719  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10720  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10721 
10722  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10723 
10724  if(unusableSize > 0)
10725  {
10726  ++outInfo.unusedRangeCount;
10727  outInfo.unusedBytes += unusableSize;
10728  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10729  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10730  }
10731 }
10732 
10733 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10734 {
10735  const VkDeviceSize unusableSize = GetUnusableSize();
10736 
10737  inoutStats.size += GetSize();
10738  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10739  inoutStats.allocationCount += m_AllocationCount;
10740  inoutStats.unusedRangeCount += m_FreeCount;
10741  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10742 
10743  if(unusableSize > 0)
10744  {
10745  ++inoutStats.unusedRangeCount;
10746  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10747  }
10748 }
10749 
10750 #if VMA_STATS_STRING_ENABLED
10751 
10752 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10753 {
10754  // TODO optimize
10755  VmaStatInfo stat;
10756  CalcAllocationStatInfo(stat);
10757 
10758  PrintDetailedMap_Begin(
10759  json,
10760  stat.unusedBytes,
10761  stat.allocationCount,
10762  stat.unusedRangeCount);
10763 
10764  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10765 
10766  const VkDeviceSize unusableSize = GetUnusableSize();
10767  if(unusableSize > 0)
10768  {
10769  PrintDetailedMap_UnusedRange(json,
10770  m_UsableSize, // offset
10771  unusableSize); // size
10772  }
10773 
10774  PrintDetailedMap_End(json);
10775 }
10776 
10777 #endif // #if VMA_STATS_STRING_ENABLED
10778 
10779 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10780  uint32_t currentFrameIndex,
10781  uint32_t frameInUseCount,
10782  VkDeviceSize bufferImageGranularity,
10783  VkDeviceSize allocSize,
10784  VkDeviceSize allocAlignment,
10785  bool upperAddress,
10786  VmaSuballocationType allocType,
10787  bool canMakeOtherLost,
10788  uint32_t strategy,
10789  VmaAllocationRequest* pAllocationRequest)
10790 {
10791  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10792 
10793  // Simple way to respect bufferImageGranularity. May be optimized some day.
10794  // Whenever it might be an OPTIMAL image...
10795  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10796  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10797  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10798  {
10799  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10800  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10801  }
10802 
10803  if(allocSize > m_UsableSize)
10804  {
10805  return false;
10806  }
10807 
10808  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10809  for(uint32_t level = targetLevel + 1; level--; )
10810  {
10811  for(Node* freeNode = m_FreeList[level].front;
10812  freeNode != VMA_NULL;
10813  freeNode = freeNode->free.next)
10814  {
10815  if(freeNode->offset % allocAlignment == 0)
10816  {
10817  pAllocationRequest->type = VmaAllocationRequestType::Normal;
10818  pAllocationRequest->offset = freeNode->offset;
10819  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10820  pAllocationRequest->sumItemSize = 0;
10821  pAllocationRequest->itemsToMakeLostCount = 0;
10822  pAllocationRequest->customData = (void*)(uintptr_t)level;
10823  return true;
10824  }
10825  }
10826  }
10827 
10828  return false;
10829 }
10830 
10831 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10832  uint32_t currentFrameIndex,
10833  uint32_t frameInUseCount,
10834  VmaAllocationRequest* pAllocationRequest)
10835 {
10836  /*
10837  Lost allocations are not supported in buddy allocator at the moment.
10838  Support might be added in the future.
10839  */
10840  return pAllocationRequest->itemsToMakeLostCount == 0;
10841 }
10842 
10843 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10844 {
10845  /*
10846  Lost allocations are not supported in buddy allocator at the moment.
10847  Support might be added in the future.
10848  */
10849  return 0;
10850 }
10851 
10852 void VmaBlockMetadata_Buddy::Alloc(
10853  const VmaAllocationRequest& request,
10854  VmaSuballocationType type,
10855  VkDeviceSize allocSize,
10856  VmaAllocation hAllocation)
10857 {
10858  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
10859 
10860  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10861  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10862 
10863  Node* currNode = m_FreeList[currLevel].front;
10864  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10865  while(currNode->offset != request.offset)
10866  {
10867  currNode = currNode->free.next;
10868  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10869  }
10870 
10871  // Go down, splitting free nodes.
10872  while(currLevel < targetLevel)
10873  {
10874  // currNode is already first free node at currLevel.
10875  // Remove it from list of free nodes at this currLevel.
10876  RemoveFromFreeList(currLevel, currNode);
10877 
10878  const uint32_t childrenLevel = currLevel + 1;
10879 
10880  // Create two free sub-nodes.
10881  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10882  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10883 
10884  leftChild->offset = currNode->offset;
10885  leftChild->type = Node::TYPE_FREE;
10886  leftChild->parent = currNode;
10887  leftChild->buddy = rightChild;
10888 
10889  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10890  rightChild->type = Node::TYPE_FREE;
10891  rightChild->parent = currNode;
10892  rightChild->buddy = leftChild;
10893 
10894  // Convert current currNode to split type.
10895  currNode->type = Node::TYPE_SPLIT;
10896  currNode->split.leftChild = leftChild;
10897 
10898  // Add child nodes to free list. Order is important!
10899  AddToFreeListFront(childrenLevel, rightChild);
10900  AddToFreeListFront(childrenLevel, leftChild);
10901 
10902  ++m_FreeCount;
10903  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
10904  ++currLevel;
10905  currNode = m_FreeList[currLevel].front;
10906 
10907  /*
10908  We can be sure that currNode, as left child of node previously split,
10909  also fullfills the alignment requirement.
10910  */
10911  }
10912 
10913  // Remove from free list.
10914  VMA_ASSERT(currLevel == targetLevel &&
10915  currNode != VMA_NULL &&
10916  currNode->type == Node::TYPE_FREE);
10917  RemoveFromFreeList(currLevel, currNode);
10918 
10919  // Convert to allocation node.
10920  currNode->type = Node::TYPE_ALLOCATION;
10921  currNode->allocation.alloc = hAllocation;
10922 
10923  ++m_AllocationCount;
10924  --m_FreeCount;
10925  m_SumFreeSize -= allocSize;
10926 }
10927 
10928 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10929 {
10930  if(node->type == Node::TYPE_SPLIT)
10931  {
10932  DeleteNode(node->split.leftChild->buddy);
10933  DeleteNode(node->split.leftChild);
10934  }
10935 
10936  vma_delete(GetAllocationCallbacks(), node);
10937 }
10938 
10939 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
10940 {
10941  VMA_VALIDATE(level < m_LevelCount);
10942  VMA_VALIDATE(curr->parent == parent);
10943  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10944  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10945  switch(curr->type)
10946  {
10947  case Node::TYPE_FREE:
10948  // curr->free.prev, next are validated separately.
10949  ctx.calculatedSumFreeSize += levelNodeSize;
10950  ++ctx.calculatedFreeCount;
10951  break;
10952  case Node::TYPE_ALLOCATION:
10953  ++ctx.calculatedAllocationCount;
10954  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10955  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10956  break;
10957  case Node::TYPE_SPLIT:
10958  {
10959  const uint32_t childrenLevel = level + 1;
10960  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10961  const Node* const leftChild = curr->split.leftChild;
10962  VMA_VALIDATE(leftChild != VMA_NULL);
10963  VMA_VALIDATE(leftChild->offset == curr->offset);
10964  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10965  {
10966  VMA_VALIDATE(false && "ValidateNode for left child failed.");
10967  }
10968  const Node* const rightChild = leftChild->buddy;
10969  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10970  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10971  {
10972  VMA_VALIDATE(false && "ValidateNode for right child failed.");
10973  }
10974  }
10975  break;
10976  default:
10977  return false;
10978  }
10979 
10980  return true;
10981 }
10982 
10983 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
10984 {
10985  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
10986  uint32_t level = 0;
10987  VkDeviceSize currLevelNodeSize = m_UsableSize;
10988  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10989  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10990  {
10991  ++level;
10992  currLevelNodeSize = nextLevelNodeSize;
10993  nextLevelNodeSize = currLevelNodeSize >> 1;
10994  }
10995  return level;
10996 }
10997 
10998 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
10999 {
11000  // Find node and level.
11001  Node* node = m_Root;
11002  VkDeviceSize nodeOffset = 0;
11003  uint32_t level = 0;
11004  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
11005  while(node->type == Node::TYPE_SPLIT)
11006  {
11007  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
11008  if(offset < nodeOffset + nextLevelSize)
11009  {
11010  node = node->split.leftChild;
11011  }
11012  else
11013  {
11014  node = node->split.leftChild->buddy;
11015  nodeOffset += nextLevelSize;
11016  }
11017  ++level;
11018  levelNodeSize = nextLevelSize;
11019  }
11020 
11021  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
11022  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
11023 
11024  ++m_FreeCount;
11025  --m_AllocationCount;
11026  m_SumFreeSize += alloc->GetSize();
11027 
11028  node->type = Node::TYPE_FREE;
11029 
11030  // Join free nodes if possible.
11031  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
11032  {
11033  RemoveFromFreeList(level, node->buddy);
11034  Node* const parent = node->parent;
11035 
11036  vma_delete(GetAllocationCallbacks(), node->buddy);
11037  vma_delete(GetAllocationCallbacks(), node);
11038  parent->type = Node::TYPE_FREE;
11039 
11040  node = parent;
11041  --level;
11042  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
11043  --m_FreeCount;
11044  }
11045 
11046  AddToFreeListFront(level, node);
11047 }
11048 
11049 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
11050 {
11051  switch(node->type)
11052  {
11053  case Node::TYPE_FREE:
11054  ++outInfo.unusedRangeCount;
11055  outInfo.unusedBytes += levelNodeSize;
11056  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
11057  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
11058  break;
11059  case Node::TYPE_ALLOCATION:
11060  {
11061  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11062  ++outInfo.allocationCount;
11063  outInfo.usedBytes += allocSize;
11064  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
11065  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
11066 
11067  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
11068  if(unusedRangeSize > 0)
11069  {
11070  ++outInfo.unusedRangeCount;
11071  outInfo.unusedBytes += unusedRangeSize;
11072  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
11073  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
11074  }
11075  }
11076  break;
11077  case Node::TYPE_SPLIT:
11078  {
11079  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11080  const Node* const leftChild = node->split.leftChild;
11081  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
11082  const Node* const rightChild = leftChild->buddy;
11083  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
11084  }
11085  break;
11086  default:
11087  VMA_ASSERT(0);
11088  }
11089 }
11090 
11091 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
11092 {
11093  VMA_ASSERT(node->type == Node::TYPE_FREE);
11094 
11095  // List is empty.
11096  Node* const frontNode = m_FreeList[level].front;
11097  if(frontNode == VMA_NULL)
11098  {
11099  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
11100  node->free.prev = node->free.next = VMA_NULL;
11101  m_FreeList[level].front = m_FreeList[level].back = node;
11102  }
11103  else
11104  {
11105  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
11106  node->free.prev = VMA_NULL;
11107  node->free.next = frontNode;
11108  frontNode->free.prev = node;
11109  m_FreeList[level].front = node;
11110  }
11111 }
11112 
11113 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
11114 {
11115  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
11116 
11117  // It is at the front.
11118  if(node->free.prev == VMA_NULL)
11119  {
11120  VMA_ASSERT(m_FreeList[level].front == node);
11121  m_FreeList[level].front = node->free.next;
11122  }
11123  else
11124  {
11125  Node* const prevFreeNode = node->free.prev;
11126  VMA_ASSERT(prevFreeNode->free.next == node);
11127  prevFreeNode->free.next = node->free.next;
11128  }
11129 
11130  // It is at the back.
11131  if(node->free.next == VMA_NULL)
11132  {
11133  VMA_ASSERT(m_FreeList[level].back == node);
11134  m_FreeList[level].back = node->free.prev;
11135  }
11136  else
11137  {
11138  Node* const nextFreeNode = node->free.next;
11139  VMA_ASSERT(nextFreeNode->free.prev == node);
11140  nextFreeNode->free.prev = node->free.prev;
11141  }
11142 }
11143 
11144 #if VMA_STATS_STRING_ENABLED
11145 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
11146 {
11147  switch(node->type)
11148  {
11149  case Node::TYPE_FREE:
11150  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
11151  break;
11152  case Node::TYPE_ALLOCATION:
11153  {
11154  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
11155  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11156  if(allocSize < levelNodeSize)
11157  {
11158  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
11159  }
11160  }
11161  break;
11162  case Node::TYPE_SPLIT:
11163  {
11164  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11165  const Node* const leftChild = node->split.leftChild;
11166  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
11167  const Node* const rightChild = leftChild->buddy;
11168  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
11169  }
11170  break;
11171  default:
11172  VMA_ASSERT(0);
11173  }
11174 }
11175 #endif // #if VMA_STATS_STRING_ENABLED
11176 
11177 
11179 // class VmaDeviceMemoryBlock
11180 
11181 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
11182  m_pMetadata(VMA_NULL),
11183  m_MemoryTypeIndex(UINT32_MAX),
11184  m_Id(0),
11185  m_hMemory(VK_NULL_HANDLE),
11186  m_MapCount(0),
11187  m_pMappedData(VMA_NULL)
11188 {
11189 }
11190 
11191 void VmaDeviceMemoryBlock::Init(
11192  VmaAllocator hAllocator,
11193  VmaPool hParentPool,
11194  uint32_t newMemoryTypeIndex,
11195  VkDeviceMemory newMemory,
11196  VkDeviceSize newSize,
11197  uint32_t id,
11198  uint32_t algorithm)
11199 {
11200  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11201 
11202  m_hParentPool = hParentPool;
11203  m_MemoryTypeIndex = newMemoryTypeIndex;
11204  m_Id = id;
11205  m_hMemory = newMemory;
11206 
11207  switch(algorithm)
11208  {
11210  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
11211  break;
11213  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11214  break;
11215  default:
11216  VMA_ASSERT(0);
11217  // Fall-through.
11218  case 0:
11219  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11220  }
11221  m_pMetadata->Init(newSize);
11222 }
11223 
11224 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11225 {
11226  // This is the most important assert in the entire library.
11227  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11228  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11229 
11230  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11231  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11232  m_hMemory = VK_NULL_HANDLE;
11233 
11234  vma_delete(allocator, m_pMetadata);
11235  m_pMetadata = VMA_NULL;
11236 }
11237 
11238 bool VmaDeviceMemoryBlock::Validate() const
11239 {
11240  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11241  (m_pMetadata->GetSize() != 0));
11242 
11243  return m_pMetadata->Validate();
11244 }
11245 
11246 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11247 {
11248  void* pData = nullptr;
11249  VkResult res = Map(hAllocator, 1, &pData);
11250  if(res != VK_SUCCESS)
11251  {
11252  return res;
11253  }
11254 
11255  res = m_pMetadata->CheckCorruption(pData);
11256 
11257  Unmap(hAllocator, 1);
11258 
11259  return res;
11260 }
11261 
11262 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11263 {
11264  if(count == 0)
11265  {
11266  return VK_SUCCESS;
11267  }
11268 
11269  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11270  if(m_MapCount != 0)
11271  {
11272  m_MapCount += count;
11273  VMA_ASSERT(m_pMappedData != VMA_NULL);
11274  if(ppData != VMA_NULL)
11275  {
11276  *ppData = m_pMappedData;
11277  }
11278  return VK_SUCCESS;
11279  }
11280  else
11281  {
11282  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11283  hAllocator->m_hDevice,
11284  m_hMemory,
11285  0, // offset
11286  VK_WHOLE_SIZE,
11287  0, // flags
11288  &m_pMappedData);
11289  if(result == VK_SUCCESS)
11290  {
11291  if(ppData != VMA_NULL)
11292  {
11293  *ppData = m_pMappedData;
11294  }
11295  m_MapCount = count;
11296  }
11297  return result;
11298  }
11299 }
11300 
11301 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11302 {
11303  if(count == 0)
11304  {
11305  return;
11306  }
11307 
11308  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11309  if(m_MapCount >= count)
11310  {
11311  m_MapCount -= count;
11312  if(m_MapCount == 0)
11313  {
11314  m_pMappedData = VMA_NULL;
11315  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11316  }
11317  }
11318  else
11319  {
11320  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11321  }
11322 }
11323 
11324 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11325 {
11326  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11327  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11328 
11329  void* pData;
11330  VkResult res = Map(hAllocator, 1, &pData);
11331  if(res != VK_SUCCESS)
11332  {
11333  return res;
11334  }
11335 
11336  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11337  VmaWriteMagicValue(pData, allocOffset + allocSize);
11338 
11339  Unmap(hAllocator, 1);
11340 
11341  return VK_SUCCESS;
11342 }
11343 
11344 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11345 {
11346  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11347  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11348 
11349  void* pData;
11350  VkResult res = Map(hAllocator, 1, &pData);
11351  if(res != VK_SUCCESS)
11352  {
11353  return res;
11354  }
11355 
11356  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11357  {
11358  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11359  }
11360  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11361  {
11362  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11363  }
11364 
11365  Unmap(hAllocator, 1);
11366 
11367  return VK_SUCCESS;
11368 }
11369 
11370 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11371  const VmaAllocator hAllocator,
11372  const VmaAllocation hAllocation,
11373  VkBuffer hBuffer)
11374 {
11375  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11376  hAllocation->GetBlock() == this);
11377  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11378  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11379  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
11380  hAllocator->m_hDevice,
11381  hBuffer,
11382  m_hMemory,
11383  hAllocation->GetOffset());
11384 }
11385 
11386 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11387  const VmaAllocator hAllocator,
11388  const VmaAllocation hAllocation,
11389  VkImage hImage)
11390 {
11391  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11392  hAllocation->GetBlock() == this);
11393  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11394  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11395  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
11396  hAllocator->m_hDevice,
11397  hImage,
11398  m_hMemory,
11399  hAllocation->GetOffset());
11400 }
11401 
11402 static void InitStatInfo(VmaStatInfo& outInfo)
11403 {
11404  memset(&outInfo, 0, sizeof(outInfo));
11405  outInfo.allocationSizeMin = UINT64_MAX;
11406  outInfo.unusedRangeSizeMin = UINT64_MAX;
11407 }
11408 
11409 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11410 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11411 {
11412  inoutInfo.blockCount += srcInfo.blockCount;
11413  inoutInfo.allocationCount += srcInfo.allocationCount;
11414  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11415  inoutInfo.usedBytes += srcInfo.usedBytes;
11416  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11417  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11418  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11419  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11420  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11421 }
11422 
11423 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11424 {
11425  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11426  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11427  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11428  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11429 }
11430 
11431 VmaPool_T::VmaPool_T(
11432  VmaAllocator hAllocator,
11433  const VmaPoolCreateInfo& createInfo,
11434  VkDeviceSize preferredBlockSize) :
11435  m_BlockVector(
11436  hAllocator,
11437  this, // hParentPool
11438  createInfo.memoryTypeIndex,
11439  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11440  createInfo.minBlockCount,
11441  createInfo.maxBlockCount,
11442  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11443  createInfo.frameInUseCount,
11444  true, // isCustomPool
11445  createInfo.blockSize != 0, // explicitBlockSize
11446  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11447  m_Id(0)
11448 {
11449 }
11450 
11451 VmaPool_T::~VmaPool_T()
11452 {
11453 }
11454 
11455 #if VMA_STATS_STRING_ENABLED
11456 
11457 #endif // #if VMA_STATS_STRING_ENABLED
11458 
11459 VmaBlockVector::VmaBlockVector(
11460  VmaAllocator hAllocator,
11461  VmaPool hParentPool,
11462  uint32_t memoryTypeIndex,
11463  VkDeviceSize preferredBlockSize,
11464  size_t minBlockCount,
11465  size_t maxBlockCount,
11466  VkDeviceSize bufferImageGranularity,
11467  uint32_t frameInUseCount,
11468  bool isCustomPool,
11469  bool explicitBlockSize,
11470  uint32_t algorithm) :
11471  m_hAllocator(hAllocator),
11472  m_hParentPool(hParentPool),
11473  m_MemoryTypeIndex(memoryTypeIndex),
11474  m_PreferredBlockSize(preferredBlockSize),
11475  m_MinBlockCount(minBlockCount),
11476  m_MaxBlockCount(maxBlockCount),
11477  m_BufferImageGranularity(bufferImageGranularity),
11478  m_FrameInUseCount(frameInUseCount),
11479  m_IsCustomPool(isCustomPool),
11480  m_ExplicitBlockSize(explicitBlockSize),
11481  m_Algorithm(algorithm),
11482  m_HasEmptyBlock(false),
11483  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11484  m_NextBlockId(0)
11485 {
11486 }
11487 
11488 VmaBlockVector::~VmaBlockVector()
11489 {
11490  for(size_t i = m_Blocks.size(); i--; )
11491  {
11492  m_Blocks[i]->Destroy(m_hAllocator);
11493  vma_delete(m_hAllocator, m_Blocks[i]);
11494  }
11495 }
11496 
11497 VkResult VmaBlockVector::CreateMinBlocks()
11498 {
11499  for(size_t i = 0; i < m_MinBlockCount; ++i)
11500  {
11501  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11502  if(res != VK_SUCCESS)
11503  {
11504  return res;
11505  }
11506  }
11507  return VK_SUCCESS;
11508 }
11509 
11510 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11511 {
11512  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11513 
11514  const size_t blockCount = m_Blocks.size();
11515 
11516  pStats->size = 0;
11517  pStats->unusedSize = 0;
11518  pStats->allocationCount = 0;
11519  pStats->unusedRangeCount = 0;
11520  pStats->unusedRangeSizeMax = 0;
11521  pStats->blockCount = blockCount;
11522 
11523  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11524  {
11525  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11526  VMA_ASSERT(pBlock);
11527  VMA_HEAVY_ASSERT(pBlock->Validate());
11528  pBlock->m_pMetadata->AddPoolStats(*pStats);
11529  }
11530 }
11531 
11532 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11533 {
11534  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11535  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11536  (VMA_DEBUG_MARGIN > 0) &&
11537  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
11538  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11539 }
11540 
11541 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11542 
11543 VkResult VmaBlockVector::Allocate(
11544  uint32_t currentFrameIndex,
11545  VkDeviceSize size,
11546  VkDeviceSize alignment,
11547  const VmaAllocationCreateInfo& createInfo,
11548  VmaSuballocationType suballocType,
11549  size_t allocationCount,
11550  VmaAllocation* pAllocations)
11551 {
11552  size_t allocIndex;
11553  VkResult res = VK_SUCCESS;
11554 
11555  if(IsCorruptionDetectionEnabled())
11556  {
11557  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11558  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11559  }
11560 
11561  {
11562  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11563  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11564  {
11565  res = AllocatePage(
11566  currentFrameIndex,
11567  size,
11568  alignment,
11569  createInfo,
11570  suballocType,
11571  pAllocations + allocIndex);
11572  if(res != VK_SUCCESS)
11573  {
11574  break;
11575  }
11576  }
11577  }
11578 
11579  if(res != VK_SUCCESS)
11580  {
11581  // Free all already created allocations.
11582  while(allocIndex--)
11583  {
11584  Free(pAllocations[allocIndex]);
11585  }
11586  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11587  }
11588 
11589  return res;
11590 }
11591 
11592 VkResult VmaBlockVector::AllocatePage(
11593  uint32_t currentFrameIndex,
11594  VkDeviceSize size,
11595  VkDeviceSize alignment,
11596  const VmaAllocationCreateInfo& createInfo,
11597  VmaSuballocationType suballocType,
11598  VmaAllocation* pAllocation)
11599 {
11600  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11601  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11602  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11603  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11604  const bool canCreateNewBlock =
11605  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11606  (m_Blocks.size() < m_MaxBlockCount);
11607  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11608 
11609  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11610  // Which in turn is available only when maxBlockCount = 1.
11611  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11612  {
11613  canMakeOtherLost = false;
11614  }
11615 
11616  // Upper address can only be used with linear allocator and within single memory block.
11617  if(isUpperAddress &&
11618  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11619  {
11620  return VK_ERROR_FEATURE_NOT_PRESENT;
11621  }
11622 
11623  // Validate strategy.
11624  switch(strategy)
11625  {
11626  case 0:
11628  break;
11632  break;
11633  default:
11634  return VK_ERROR_FEATURE_NOT_PRESENT;
11635  }
11636 
11637  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11638  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11639  {
11640  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11641  }
11642 
11643  /*
11644  Under certain condition, this whole section can be skipped for optimization, so
11645  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11646  e.g. for custom pools with linear algorithm.
11647  */
11648  if(!canMakeOtherLost || canCreateNewBlock)
11649  {
11650  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11651  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11653 
11654  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11655  {
11656  // Use only last block.
11657  if(!m_Blocks.empty())
11658  {
11659  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11660  VMA_ASSERT(pCurrBlock);
11661  VkResult res = AllocateFromBlock(
11662  pCurrBlock,
11663  currentFrameIndex,
11664  size,
11665  alignment,
11666  allocFlagsCopy,
11667  createInfo.pUserData,
11668  suballocType,
11669  strategy,
11670  pAllocation);
11671  if(res == VK_SUCCESS)
11672  {
11673  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11674  return VK_SUCCESS;
11675  }
11676  }
11677  }
11678  else
11679  {
11681  {
11682  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11683  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11684  {
11685  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11686  VMA_ASSERT(pCurrBlock);
11687  VkResult res = AllocateFromBlock(
11688  pCurrBlock,
11689  currentFrameIndex,
11690  size,
11691  alignment,
11692  allocFlagsCopy,
11693  createInfo.pUserData,
11694  suballocType,
11695  strategy,
11696  pAllocation);
11697  if(res == VK_SUCCESS)
11698  {
11699  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11700  return VK_SUCCESS;
11701  }
11702  }
11703  }
11704  else // WORST_FIT, FIRST_FIT
11705  {
11706  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11707  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11708  {
11709  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11710  VMA_ASSERT(pCurrBlock);
11711  VkResult res = AllocateFromBlock(
11712  pCurrBlock,
11713  currentFrameIndex,
11714  size,
11715  alignment,
11716  allocFlagsCopy,
11717  createInfo.pUserData,
11718  suballocType,
11719  strategy,
11720  pAllocation);
11721  if(res == VK_SUCCESS)
11722  {
11723  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11724  return VK_SUCCESS;
11725  }
11726  }
11727  }
11728  }
11729 
11730  // 2. Try to create new block.
11731  if(canCreateNewBlock)
11732  {
11733  // Calculate optimal size for new block.
11734  VkDeviceSize newBlockSize = m_PreferredBlockSize;
11735  uint32_t newBlockSizeShift = 0;
11736  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11737 
11738  if(!m_ExplicitBlockSize)
11739  {
11740  // Allocate 1/8, 1/4, 1/2 as first blocks.
11741  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11742  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11743  {
11744  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11745  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11746  {
11747  newBlockSize = smallerNewBlockSize;
11748  ++newBlockSizeShift;
11749  }
11750  else
11751  {
11752  break;
11753  }
11754  }
11755  }
11756 
11757  size_t newBlockIndex = 0;
11758  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11759  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11760  if(!m_ExplicitBlockSize)
11761  {
11762  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11763  {
11764  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11765  if(smallerNewBlockSize >= size)
11766  {
11767  newBlockSize = smallerNewBlockSize;
11768  ++newBlockSizeShift;
11769  res = CreateBlock(newBlockSize, &newBlockIndex);
11770  }
11771  else
11772  {
11773  break;
11774  }
11775  }
11776  }
11777 
11778  if(res == VK_SUCCESS)
11779  {
11780  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11781  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11782 
11783  res = AllocateFromBlock(
11784  pBlock,
11785  currentFrameIndex,
11786  size,
11787  alignment,
11788  allocFlagsCopy,
11789  createInfo.pUserData,
11790  suballocType,
11791  strategy,
11792  pAllocation);
11793  if(res == VK_SUCCESS)
11794  {
11795  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
11796  return VK_SUCCESS;
11797  }
11798  else
11799  {
11800  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11801  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11802  }
11803  }
11804  }
11805  }
11806 
11807  // 3. Try to allocate from existing blocks with making other allocations lost.
11808  if(canMakeOtherLost)
11809  {
11810  uint32_t tryIndex = 0;
11811  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11812  {
11813  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11814  VmaAllocationRequest bestRequest = {};
11815  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11816 
11817  // 1. Search existing allocations.
11819  {
11820  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11821  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11822  {
11823  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11824  VMA_ASSERT(pCurrBlock);
11825  VmaAllocationRequest currRequest = {};
11826  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11827  currentFrameIndex,
11828  m_FrameInUseCount,
11829  m_BufferImageGranularity,
11830  size,
11831  alignment,
11832  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11833  suballocType,
11834  canMakeOtherLost,
11835  strategy,
11836  &currRequest))
11837  {
11838  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11839  if(pBestRequestBlock == VMA_NULL ||
11840  currRequestCost < bestRequestCost)
11841  {
11842  pBestRequestBlock = pCurrBlock;
11843  bestRequest = currRequest;
11844  bestRequestCost = currRequestCost;
11845 
11846  if(bestRequestCost == 0)
11847  {
11848  break;
11849  }
11850  }
11851  }
11852  }
11853  }
11854  else // WORST_FIT, FIRST_FIT
11855  {
11856  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11857  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11858  {
11859  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11860  VMA_ASSERT(pCurrBlock);
11861  VmaAllocationRequest currRequest = {};
11862  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11863  currentFrameIndex,
11864  m_FrameInUseCount,
11865  m_BufferImageGranularity,
11866  size,
11867  alignment,
11868  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11869  suballocType,
11870  canMakeOtherLost,
11871  strategy,
11872  &currRequest))
11873  {
11874  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11875  if(pBestRequestBlock == VMA_NULL ||
11876  currRequestCost < bestRequestCost ||
11878  {
11879  pBestRequestBlock = pCurrBlock;
11880  bestRequest = currRequest;
11881  bestRequestCost = currRequestCost;
11882 
11883  if(bestRequestCost == 0 ||
11885  {
11886  break;
11887  }
11888  }
11889  }
11890  }
11891  }
11892 
11893  if(pBestRequestBlock != VMA_NULL)
11894  {
11895  if(mapped)
11896  {
11897  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11898  if(res != VK_SUCCESS)
11899  {
11900  return res;
11901  }
11902  }
11903 
11904  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11905  currentFrameIndex,
11906  m_FrameInUseCount,
11907  &bestRequest))
11908  {
11909  // We no longer have an empty Allocation.
11910  if(pBestRequestBlock->m_pMetadata->IsEmpty())
11911  {
11912  m_HasEmptyBlock = false;
11913  }
11914  // Allocate from this pBlock.
11915  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
11916  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
11917  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
11918  (*pAllocation)->InitBlockAllocation(
11919  pBestRequestBlock,
11920  bestRequest.offset,
11921  alignment,
11922  size,
11923  suballocType,
11924  mapped,
11925  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11926  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11927  VMA_DEBUG_LOG(" Returned from existing block");
11928  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
11929  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11930  {
11931  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11932  }
11933  if(IsCorruptionDetectionEnabled())
11934  {
11935  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11936  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11937  }
11938  return VK_SUCCESS;
11939  }
11940  // else: Some allocations must have been touched while we are here. Next try.
11941  }
11942  else
11943  {
11944  // Could not find place in any of the blocks - break outer loop.
11945  break;
11946  }
11947  }
11948  /* Maximum number of tries exceeded - a very unlike event when many other
11949  threads are simultaneously touching allocations making it impossible to make
11950  lost at the same time as we try to allocate. */
11951  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11952  {
11953  return VK_ERROR_TOO_MANY_OBJECTS;
11954  }
11955  }
11956 
11957  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11958 }
11959 
11960 void VmaBlockVector::Free(
11961  VmaAllocation hAllocation)
11962 {
11963  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11964 
11965  // Scope for lock.
11966  {
11967  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11968 
11969  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11970 
11971  if(IsCorruptionDetectionEnabled())
11972  {
11973  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11974  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
11975  }
11976 
11977  if(hAllocation->IsPersistentMap())
11978  {
11979  pBlock->Unmap(m_hAllocator, 1);
11980  }
11981 
11982  pBlock->m_pMetadata->Free(hAllocation);
11983  VMA_HEAVY_ASSERT(pBlock->Validate());
11984 
11985  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
11986 
11987  // pBlock became empty after this deallocation.
11988  if(pBlock->m_pMetadata->IsEmpty())
11989  {
11990  // Already has empty Allocation. We don't want to have two, so delete this one.
11991  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11992  {
11993  pBlockToDelete = pBlock;
11994  Remove(pBlock);
11995  }
11996  // We now have first empty block.
11997  else
11998  {
11999  m_HasEmptyBlock = true;
12000  }
12001  }
12002  // pBlock didn't become empty, but we have another empty block - find and free that one.
12003  // (This is optional, heuristics.)
12004  else if(m_HasEmptyBlock)
12005  {
12006  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
12007  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
12008  {
12009  pBlockToDelete = pLastBlock;
12010  m_Blocks.pop_back();
12011  m_HasEmptyBlock = false;
12012  }
12013  }
12014 
12015  IncrementallySortBlocks();
12016  }
12017 
12018  // Destruction of a free Allocation. Deferred until this point, outside of mutex
12019  // lock, for performance reason.
12020  if(pBlockToDelete != VMA_NULL)
12021  {
12022  VMA_DEBUG_LOG(" Deleted empty allocation");
12023  pBlockToDelete->Destroy(m_hAllocator);
12024  vma_delete(m_hAllocator, pBlockToDelete);
12025  }
12026 }
12027 
12028 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
12029 {
12030  VkDeviceSize result = 0;
12031  for(size_t i = m_Blocks.size(); i--; )
12032  {
12033  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
12034  if(result >= m_PreferredBlockSize)
12035  {
12036  break;
12037  }
12038  }
12039  return result;
12040 }
12041 
12042 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
12043 {
12044  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12045  {
12046  if(m_Blocks[blockIndex] == pBlock)
12047  {
12048  VmaVectorRemove(m_Blocks, blockIndex);
12049  return;
12050  }
12051  }
12052  VMA_ASSERT(0);
12053 }
12054 
12055 void VmaBlockVector::IncrementallySortBlocks()
12056 {
12057  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12058  {
12059  // Bubble sort only until first swap.
12060  for(size_t i = 1; i < m_Blocks.size(); ++i)
12061  {
12062  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
12063  {
12064  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
12065  return;
12066  }
12067  }
12068  }
12069 }
12070 
12071 VkResult VmaBlockVector::AllocateFromBlock(
12072  VmaDeviceMemoryBlock* pBlock,
12073  uint32_t currentFrameIndex,
12074  VkDeviceSize size,
12075  VkDeviceSize alignment,
12076  VmaAllocationCreateFlags allocFlags,
12077  void* pUserData,
12078  VmaSuballocationType suballocType,
12079  uint32_t strategy,
12080  VmaAllocation* pAllocation)
12081 {
12082  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
12083  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12084  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12085  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12086 
12087  VmaAllocationRequest currRequest = {};
12088  if(pBlock->m_pMetadata->CreateAllocationRequest(
12089  currentFrameIndex,
12090  m_FrameInUseCount,
12091  m_BufferImageGranularity,
12092  size,
12093  alignment,
12094  isUpperAddress,
12095  suballocType,
12096  false, // canMakeOtherLost
12097  strategy,
12098  &currRequest))
12099  {
12100  // Allocate from pCurrBlock.
12101  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
12102 
12103  if(mapped)
12104  {
12105  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12106  if(res != VK_SUCCESS)
12107  {
12108  return res;
12109  }
12110  }
12111 
12112  // We no longer have an empty Allocation.
12113  if(pBlock->m_pMetadata->IsEmpty())
12114  {
12115  m_HasEmptyBlock = false;
12116  }
12117 
12118  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
12119  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
12120  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
12121  (*pAllocation)->InitBlockAllocation(
12122  pBlock,
12123  currRequest.offset,
12124  alignment,
12125  size,
12126  suballocType,
12127  mapped,
12128  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12129  VMA_HEAVY_ASSERT(pBlock->Validate());
12130  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12131  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12132  {
12133  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12134  }
12135  if(IsCorruptionDetectionEnabled())
12136  {
12137  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
12138  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12139  }
12140  return VK_SUCCESS;
12141  }
12142  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12143 }
12144 
12145 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12146 {
12147  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12148  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12149  allocInfo.allocationSize = blockSize;
12150  VkDeviceMemory mem = VK_NULL_HANDLE;
12151  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12152  if(res < 0)
12153  {
12154  return res;
12155  }
12156 
12157  // New VkDeviceMemory successfully created.
12158 
12159  // Create new Allocation for it.
12160  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12161  pBlock->Init(
12162  m_hAllocator,
12163  m_hParentPool,
12164  m_MemoryTypeIndex,
12165  mem,
12166  allocInfo.allocationSize,
12167  m_NextBlockId++,
12168  m_Algorithm);
12169 
12170  m_Blocks.push_back(pBlock);
12171  if(pNewBlockIndex != VMA_NULL)
12172  {
12173  *pNewBlockIndex = m_Blocks.size() - 1;
12174  }
12175 
12176  return VK_SUCCESS;
12177 }
12178 
12179 void VmaBlockVector::ApplyDefragmentationMovesCpu(
12180  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12181  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
12182 {
12183  const size_t blockCount = m_Blocks.size();
12184  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
12185 
12186  enum BLOCK_FLAG
12187  {
12188  BLOCK_FLAG_USED = 0x00000001,
12189  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
12190  };
12191 
12192  struct BlockInfo
12193  {
12194  uint32_t flags;
12195  void* pMappedData;
12196  };
12197  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
12198  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
12199  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
12200 
12201  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12202  const size_t moveCount = moves.size();
12203  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12204  {
12205  const VmaDefragmentationMove& move = moves[moveIndex];
12206  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
12207  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
12208  }
12209 
12210  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12211 
12212  // Go over all blocks. Get mapped pointer or map if necessary.
12213  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12214  {
12215  BlockInfo& currBlockInfo = blockInfo[blockIndex];
12216  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12217  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12218  {
12219  currBlockInfo.pMappedData = pBlock->GetMappedData();
12220  // It is not originally mapped - map it.
12221  if(currBlockInfo.pMappedData == VMA_NULL)
12222  {
12223  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12224  if(pDefragCtx->res == VK_SUCCESS)
12225  {
12226  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12227  }
12228  }
12229  }
12230  }
12231 
12232  // Go over all moves. Do actual data transfer.
12233  if(pDefragCtx->res == VK_SUCCESS)
12234  {
12235  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12236  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12237 
12238  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12239  {
12240  const VmaDefragmentationMove& move = moves[moveIndex];
12241 
12242  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12243  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12244 
12245  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12246 
12247  // Invalidate source.
12248  if(isNonCoherent)
12249  {
12250  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12251  memRange.memory = pSrcBlock->GetDeviceMemory();
12252  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12253  memRange.size = VMA_MIN(
12254  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12255  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12256  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12257  }
12258 
12259  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12260  memmove(
12261  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12262  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12263  static_cast<size_t>(move.size));
12264 
12265  if(IsCorruptionDetectionEnabled())
12266  {
12267  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12268  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12269  }
12270 
12271  // Flush destination.
12272  if(isNonCoherent)
12273  {
12274  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12275  memRange.memory = pDstBlock->GetDeviceMemory();
12276  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12277  memRange.size = VMA_MIN(
12278  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12279  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12280  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12281  }
12282  }
12283  }
12284 
12285  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12286  // Regardless of pCtx->res == VK_SUCCESS.
12287  for(size_t blockIndex = blockCount; blockIndex--; )
12288  {
12289  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12290  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12291  {
12292  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12293  pBlock->Unmap(m_hAllocator, 1);
12294  }
12295  }
12296 }
12297 
12298 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12299  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12300  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12301  VkCommandBuffer commandBuffer)
12302 {
12303  const size_t blockCount = m_Blocks.size();
12304 
12305  pDefragCtx->blockContexts.resize(blockCount);
12306  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12307 
12308  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12309  const size_t moveCount = moves.size();
12310  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12311  {
12312  const VmaDefragmentationMove& move = moves[moveIndex];
12313  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12314  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12315  }
12316 
12317  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12318 
12319  // Go over all blocks. Create and bind buffer for whole block if necessary.
12320  {
12321  VkBufferCreateInfo bufCreateInfo;
12322  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
12323 
12324  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12325  {
12326  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12327  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12328  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12329  {
12330  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12331  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12332  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12333  if(pDefragCtx->res == VK_SUCCESS)
12334  {
12335  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12336  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12337  }
12338  }
12339  }
12340  }
12341 
12342  // Go over all moves. Post data transfer commands to command buffer.
12343  if(pDefragCtx->res == VK_SUCCESS)
12344  {
12345  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12346  {
12347  const VmaDefragmentationMove& move = moves[moveIndex];
12348 
12349  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12350  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12351 
12352  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12353 
12354  VkBufferCopy region = {
12355  move.srcOffset,
12356  move.dstOffset,
12357  move.size };
12358  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12359  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12360  }
12361  }
12362 
12363  // Save buffers to defrag context for later destruction.
12364  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12365  {
12366  pDefragCtx->res = VK_NOT_READY;
12367  }
12368 }
12369 
12370 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12371 {
12372  m_HasEmptyBlock = false;
12373  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12374  {
12375  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12376  if(pBlock->m_pMetadata->IsEmpty())
12377  {
12378  if(m_Blocks.size() > m_MinBlockCount)
12379  {
12380  if(pDefragmentationStats != VMA_NULL)
12381  {
12382  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12383  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12384  }
12385 
12386  VmaVectorRemove(m_Blocks, blockIndex);
12387  pBlock->Destroy(m_hAllocator);
12388  vma_delete(m_hAllocator, pBlock);
12389  }
12390  else
12391  {
12392  m_HasEmptyBlock = true;
12393  }
12394  }
12395  }
12396 }
12397 
12398 #if VMA_STATS_STRING_ENABLED
12399 
12400 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12401 {
12402  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12403 
12404  json.BeginObject();
12405 
12406  if(m_IsCustomPool)
12407  {
12408  json.WriteString("MemoryTypeIndex");
12409  json.WriteNumber(m_MemoryTypeIndex);
12410 
12411  json.WriteString("BlockSize");
12412  json.WriteNumber(m_PreferredBlockSize);
12413 
12414  json.WriteString("BlockCount");
12415  json.BeginObject(true);
12416  if(m_MinBlockCount > 0)
12417  {
12418  json.WriteString("Min");
12419  json.WriteNumber((uint64_t)m_MinBlockCount);
12420  }
12421  if(m_MaxBlockCount < SIZE_MAX)
12422  {
12423  json.WriteString("Max");
12424  json.WriteNumber((uint64_t)m_MaxBlockCount);
12425  }
12426  json.WriteString("Cur");
12427  json.WriteNumber((uint64_t)m_Blocks.size());
12428  json.EndObject();
12429 
12430  if(m_FrameInUseCount > 0)
12431  {
12432  json.WriteString("FrameInUseCount");
12433  json.WriteNumber(m_FrameInUseCount);
12434  }
12435 
12436  if(m_Algorithm != 0)
12437  {
12438  json.WriteString("Algorithm");
12439  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12440  }
12441  }
12442  else
12443  {
12444  json.WriteString("PreferredBlockSize");
12445  json.WriteNumber(m_PreferredBlockSize);
12446  }
12447 
12448  json.WriteString("Blocks");
12449  json.BeginObject();
12450  for(size_t i = 0; i < m_Blocks.size(); ++i)
12451  {
12452  json.BeginString();
12453  json.ContinueString(m_Blocks[i]->GetId());
12454  json.EndString();
12455 
12456  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12457  }
12458  json.EndObject();
12459 
12460  json.EndObject();
12461 }
12462 
12463 #endif // #if VMA_STATS_STRING_ENABLED
12464 
12465 void VmaBlockVector::Defragment(
12466  class VmaBlockVectorDefragmentationContext* pCtx,
12467  VmaDefragmentationStats* pStats,
12468  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12469  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12470  VkCommandBuffer commandBuffer)
12471 {
12472  pCtx->res = VK_SUCCESS;
12473 
12474  const VkMemoryPropertyFlags memPropFlags =
12475  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12476  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12477 
12478  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12479  isHostVisible;
12480  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12481  !IsCorruptionDetectionEnabled() &&
12482  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
12483 
12484  // There are options to defragment this memory type.
12485  if(canDefragmentOnCpu || canDefragmentOnGpu)
12486  {
12487  bool defragmentOnGpu;
12488  // There is only one option to defragment this memory type.
12489  if(canDefragmentOnGpu != canDefragmentOnCpu)
12490  {
12491  defragmentOnGpu = canDefragmentOnGpu;
12492  }
12493  // Both options are available: Heuristics to choose the best one.
12494  else
12495  {
12496  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12497  m_hAllocator->IsIntegratedGpu();
12498  }
12499 
12500  bool overlappingMoveSupported = !defragmentOnGpu;
12501 
12502  if(m_hAllocator->m_UseMutex)
12503  {
12504  m_Mutex.LockWrite();
12505  pCtx->mutexLocked = true;
12506  }
12507 
12508  pCtx->Begin(overlappingMoveSupported);
12509 
12510  // Defragment.
12511 
12512  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12513  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12514  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12515  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12516  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12517 
12518  // Accumulate statistics.
12519  if(pStats != VMA_NULL)
12520  {
12521  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12522  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12523  pStats->bytesMoved += bytesMoved;
12524  pStats->allocationsMoved += allocationsMoved;
12525  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12526  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12527  if(defragmentOnGpu)
12528  {
12529  maxGpuBytesToMove -= bytesMoved;
12530  maxGpuAllocationsToMove -= allocationsMoved;
12531  }
12532  else
12533  {
12534  maxCpuBytesToMove -= bytesMoved;
12535  maxCpuAllocationsToMove -= allocationsMoved;
12536  }
12537  }
12538 
12539  if(pCtx->res >= VK_SUCCESS)
12540  {
12541  if(defragmentOnGpu)
12542  {
12543  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12544  }
12545  else
12546  {
12547  ApplyDefragmentationMovesCpu(pCtx, moves);
12548  }
12549  }
12550  }
12551 }
12552 
12553 void VmaBlockVector::DefragmentationEnd(
12554  class VmaBlockVectorDefragmentationContext* pCtx,
12555  VmaDefragmentationStats* pStats)
12556 {
12557  // Destroy buffers.
12558  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12559  {
12560  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12561  if(blockCtx.hBuffer)
12562  {
12563  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12564  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12565  }
12566  }
12567 
12568  if(pCtx->res >= VK_SUCCESS)
12569  {
12570  FreeEmptyBlocks(pStats);
12571  }
12572 
12573  if(pCtx->mutexLocked)
12574  {
12575  VMA_ASSERT(m_hAllocator->m_UseMutex);
12576  m_Mutex.UnlockWrite();
12577  }
12578 }
12579 
12580 size_t VmaBlockVector::CalcAllocationCount() const
12581 {
12582  size_t result = 0;
12583  for(size_t i = 0; i < m_Blocks.size(); ++i)
12584  {
12585  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12586  }
12587  return result;
12588 }
12589 
12590 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12591 {
12592  if(m_BufferImageGranularity == 1)
12593  {
12594  return false;
12595  }
12596  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12597  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12598  {
12599  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12600  VMA_ASSERT(m_Algorithm == 0);
12601  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12602  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12603  {
12604  return true;
12605  }
12606  }
12607  return false;
12608 }
12609 
12610 void VmaBlockVector::MakePoolAllocationsLost(
12611  uint32_t currentFrameIndex,
12612  size_t* pLostAllocationCount)
12613 {
12614  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12615  size_t lostAllocationCount = 0;
12616  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12617  {
12618  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12619  VMA_ASSERT(pBlock);
12620  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12621  }
12622  if(pLostAllocationCount != VMA_NULL)
12623  {
12624  *pLostAllocationCount = lostAllocationCount;
12625  }
12626 }
12627 
12628 VkResult VmaBlockVector::CheckCorruption()
12629 {
12630  if(!IsCorruptionDetectionEnabled())
12631  {
12632  return VK_ERROR_FEATURE_NOT_PRESENT;
12633  }
12634 
12635  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12636  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12637  {
12638  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12639  VMA_ASSERT(pBlock);
12640  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12641  if(res != VK_SUCCESS)
12642  {
12643  return res;
12644  }
12645  }
12646  return VK_SUCCESS;
12647 }
12648 
12649 void VmaBlockVector::AddStats(VmaStats* pStats)
12650 {
12651  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12652  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12653 
12654  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12655 
12656  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12657  {
12658  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12659  VMA_ASSERT(pBlock);
12660  VMA_HEAVY_ASSERT(pBlock->Validate());
12661  VmaStatInfo allocationStatInfo;
12662  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12663  VmaAddStatInfo(pStats->total, allocationStatInfo);
12664  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12665  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12666  }
12667 }
12668 
12670 // VmaDefragmentationAlgorithm_Generic members definition
12671 
12672 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12673  VmaAllocator hAllocator,
12674  VmaBlockVector* pBlockVector,
12675  uint32_t currentFrameIndex,
12676  bool overlappingMoveSupported) :
12677  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12678  m_AllocationCount(0),
12679  m_AllAllocations(false),
12680  m_BytesMoved(0),
12681  m_AllocationsMoved(0),
12682  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12683 {
12684  // Create block info for each block.
12685  const size_t blockCount = m_pBlockVector->m_Blocks.size();
12686  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12687  {
12688  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12689  pBlockInfo->m_OriginalBlockIndex = blockIndex;
12690  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12691  m_Blocks.push_back(pBlockInfo);
12692  }
12693 
12694  // Sort them by m_pBlock pointer value.
12695  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12696 }
12697 
12698 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12699 {
12700  for(size_t i = m_Blocks.size(); i--; )
12701  {
12702  vma_delete(m_hAllocator, m_Blocks[i]);
12703  }
12704 }
12705 
12706 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12707 {
12708  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12709  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12710  {
12711  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12712  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12713  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12714  {
12715  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12716  (*it)->m_Allocations.push_back(allocInfo);
12717  }
12718  else
12719  {
12720  VMA_ASSERT(0);
12721  }
12722 
12723  ++m_AllocationCount;
12724  }
12725 }
12726 
12727 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12728  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12729  VkDeviceSize maxBytesToMove,
12730  uint32_t maxAllocationsToMove)
12731 {
12732  if(m_Blocks.empty())
12733  {
12734  return VK_SUCCESS;
12735  }
12736 
12737  // This is a choice based on research.
12738  // Option 1:
12739  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12740  // Option 2:
12741  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12742  // Option 3:
12743  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12744 
12745  size_t srcBlockMinIndex = 0;
12746  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12747  /*
12748  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12749  {
12750  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12751  if(blocksWithNonMovableCount > 0)
12752  {
12753  srcBlockMinIndex = blocksWithNonMovableCount - 1;
12754  }
12755  }
12756  */
12757 
12758  size_t srcBlockIndex = m_Blocks.size() - 1;
12759  size_t srcAllocIndex = SIZE_MAX;
12760  for(;;)
12761  {
12762  // 1. Find next allocation to move.
12763  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12764  // 1.2. Then start from last to first m_Allocations.
12765  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12766  {
12767  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12768  {
12769  // Finished: no more allocations to process.
12770  if(srcBlockIndex == srcBlockMinIndex)
12771  {
12772  return VK_SUCCESS;
12773  }
12774  else
12775  {
12776  --srcBlockIndex;
12777  srcAllocIndex = SIZE_MAX;
12778  }
12779  }
12780  else
12781  {
12782  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12783  }
12784  }
12785 
12786  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12787  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12788 
12789  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12790  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12791  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12792  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12793 
12794  // 2. Try to find new place for this allocation in preceding or current block.
12795  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12796  {
12797  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12798  VmaAllocationRequest dstAllocRequest;
12799  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12800  m_CurrentFrameIndex,
12801  m_pBlockVector->GetFrameInUseCount(),
12802  m_pBlockVector->GetBufferImageGranularity(),
12803  size,
12804  alignment,
12805  false, // upperAddress
12806  suballocType,
12807  false, // canMakeOtherLost
12808  strategy,
12809  &dstAllocRequest) &&
12810  MoveMakesSense(
12811  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12812  {
12813  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12814 
12815  // Reached limit on number of allocations or bytes to move.
12816  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12817  (m_BytesMoved + size > maxBytesToMove))
12818  {
12819  return VK_SUCCESS;
12820  }
12821 
12822  VmaDefragmentationMove move;
12823  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12824  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12825  move.srcOffset = srcOffset;
12826  move.dstOffset = dstAllocRequest.offset;
12827  move.size = size;
12828  moves.push_back(move);
12829 
12830  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12831  dstAllocRequest,
12832  suballocType,
12833  size,
12834  allocInfo.m_hAllocation);
12835  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12836 
12837  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12838 
12839  if(allocInfo.m_pChanged != VMA_NULL)
12840  {
12841  *allocInfo.m_pChanged = VK_TRUE;
12842  }
12843 
12844  ++m_AllocationsMoved;
12845  m_BytesMoved += size;
12846 
12847  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12848 
12849  break;
12850  }
12851  }
12852 
12853  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
12854 
12855  if(srcAllocIndex > 0)
12856  {
12857  --srcAllocIndex;
12858  }
12859  else
12860  {
12861  if(srcBlockIndex > 0)
12862  {
12863  --srcBlockIndex;
12864  srcAllocIndex = SIZE_MAX;
12865  }
12866  else
12867  {
12868  return VK_SUCCESS;
12869  }
12870  }
12871  }
12872 }
12873 
12874 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
12875 {
12876  size_t result = 0;
12877  for(size_t i = 0; i < m_Blocks.size(); ++i)
12878  {
12879  if(m_Blocks[i]->m_HasNonMovableAllocations)
12880  {
12881  ++result;
12882  }
12883  }
12884  return result;
12885 }
12886 
12887 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12888  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12889  VkDeviceSize maxBytesToMove,
12890  uint32_t maxAllocationsToMove)
12891 {
12892  if(!m_AllAllocations && m_AllocationCount == 0)
12893  {
12894  return VK_SUCCESS;
12895  }
12896 
12897  const size_t blockCount = m_Blocks.size();
12898  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12899  {
12900  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12901 
12902  if(m_AllAllocations)
12903  {
12904  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12905  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12906  it != pMetadata->m_Suballocations.end();
12907  ++it)
12908  {
12909  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12910  {
12911  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12912  pBlockInfo->m_Allocations.push_back(allocInfo);
12913  }
12914  }
12915  }
12916 
12917  pBlockInfo->CalcHasNonMovableAllocations();
12918 
12919  // This is a choice based on research.
12920  // Option 1:
12921  pBlockInfo->SortAllocationsByOffsetDescending();
12922  // Option 2:
12923  //pBlockInfo->SortAllocationsBySizeDescending();
12924  }
12925 
12926  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
12927  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12928 
12929  // This is a choice based on research.
12930  const uint32_t roundCount = 2;
12931 
12932  // Execute defragmentation rounds (the main part).
12933  VkResult result = VK_SUCCESS;
12934  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12935  {
12936  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12937  }
12938 
12939  return result;
12940 }
12941 
12942 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12943  size_t dstBlockIndex, VkDeviceSize dstOffset,
12944  size_t srcBlockIndex, VkDeviceSize srcOffset)
12945 {
12946  if(dstBlockIndex < srcBlockIndex)
12947  {
12948  return true;
12949  }
12950  if(dstBlockIndex > srcBlockIndex)
12951  {
12952  return false;
12953  }
12954  if(dstOffset < srcOffset)
12955  {
12956  return true;
12957  }
12958  return false;
12959 }
12960 
12962 // VmaDefragmentationAlgorithm_Fast
12963 
12964 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12965  VmaAllocator hAllocator,
12966  VmaBlockVector* pBlockVector,
12967  uint32_t currentFrameIndex,
12968  bool overlappingMoveSupported) :
12969  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12970  m_OverlappingMoveSupported(overlappingMoveSupported),
12971  m_AllocationCount(0),
12972  m_AllAllocations(false),
12973  m_BytesMoved(0),
12974  m_AllocationsMoved(0),
12975  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12976 {
12977  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12978 
12979 }
12980 
12981 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12982 {
12983 }
12984 
12985 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12986  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12987  VkDeviceSize maxBytesToMove,
12988  uint32_t maxAllocationsToMove)
12989 {
12990  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12991 
12992  const size_t blockCount = m_pBlockVector->GetBlockCount();
12993  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12994  {
12995  return VK_SUCCESS;
12996  }
12997 
12998  PreprocessMetadata();
12999 
13000  // Sort blocks in order from most destination.
13001 
13002  m_BlockInfos.resize(blockCount);
13003  for(size_t i = 0; i < blockCount; ++i)
13004  {
13005  m_BlockInfos[i].origBlockIndex = i;
13006  }
13007 
13008  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
13009  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
13010  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
13011  });
13012 
13013  // THE MAIN ALGORITHM
13014 
13015  FreeSpaceDatabase freeSpaceDb;
13016 
13017  size_t dstBlockInfoIndex = 0;
13018  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13019  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13020  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13021  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
13022  VkDeviceSize dstOffset = 0;
13023 
13024  bool end = false;
13025  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
13026  {
13027  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
13028  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
13029  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
13030  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
13031  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
13032  {
13033  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
13034  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
13035  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
13036  if(m_AllocationsMoved == maxAllocationsToMove ||
13037  m_BytesMoved + srcAllocSize > maxBytesToMove)
13038  {
13039  end = true;
13040  break;
13041  }
13042  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
13043 
13044  // Try to place it in one of free spaces from the database.
13045  size_t freeSpaceInfoIndex;
13046  VkDeviceSize dstAllocOffset;
13047  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
13048  freeSpaceInfoIndex, dstAllocOffset))
13049  {
13050  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
13051  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
13052  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
13053 
13054  // Same block
13055  if(freeSpaceInfoIndex == srcBlockInfoIndex)
13056  {
13057  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13058 
13059  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13060 
13061  VmaSuballocation suballoc = *srcSuballocIt;
13062  suballoc.offset = dstAllocOffset;
13063  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
13064  m_BytesMoved += srcAllocSize;
13065  ++m_AllocationsMoved;
13066 
13067  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13068  ++nextSuballocIt;
13069  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13070  srcSuballocIt = nextSuballocIt;
13071 
13072  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13073 
13074  VmaDefragmentationMove move = {
13075  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13076  srcAllocOffset, dstAllocOffset,
13077  srcAllocSize };
13078  moves.push_back(move);
13079  }
13080  // Different block
13081  else
13082  {
13083  // MOVE OPTION 2: Move the allocation to a different block.
13084 
13085  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
13086 
13087  VmaSuballocation suballoc = *srcSuballocIt;
13088  suballoc.offset = dstAllocOffset;
13089  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
13090  m_BytesMoved += srcAllocSize;
13091  ++m_AllocationsMoved;
13092 
13093  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13094  ++nextSuballocIt;
13095  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13096  srcSuballocIt = nextSuballocIt;
13097 
13098  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13099 
13100  VmaDefragmentationMove move = {
13101  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13102  srcAllocOffset, dstAllocOffset,
13103  srcAllocSize };
13104  moves.push_back(move);
13105  }
13106  }
13107  else
13108  {
13109  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
13110 
13111  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
13112  while(dstBlockInfoIndex < srcBlockInfoIndex &&
13113  dstAllocOffset + srcAllocSize > dstBlockSize)
13114  {
13115  // But before that, register remaining free space at the end of dst block.
13116  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
13117 
13118  ++dstBlockInfoIndex;
13119  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13120  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13121  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13122  dstBlockSize = pDstMetadata->GetSize();
13123  dstOffset = 0;
13124  dstAllocOffset = 0;
13125  }
13126 
13127  // Same block
13128  if(dstBlockInfoIndex == srcBlockInfoIndex)
13129  {
13130  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13131 
13132  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
13133 
13134  bool skipOver = overlap;
13135  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
13136  {
13137  // If destination and source place overlap, skip if it would move it
13138  // by only < 1/64 of its size.
13139  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
13140  }
13141 
13142  if(skipOver)
13143  {
13144  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
13145 
13146  dstOffset = srcAllocOffset + srcAllocSize;
13147  ++srcSuballocIt;
13148  }
13149  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13150  else
13151  {
13152  srcSuballocIt->offset = dstAllocOffset;
13153  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
13154  dstOffset = dstAllocOffset + srcAllocSize;
13155  m_BytesMoved += srcAllocSize;
13156  ++m_AllocationsMoved;
13157  ++srcSuballocIt;
13158  VmaDefragmentationMove move = {
13159  srcOrigBlockIndex, dstOrigBlockIndex,
13160  srcAllocOffset, dstAllocOffset,
13161  srcAllocSize };
13162  moves.push_back(move);
13163  }
13164  }
13165  // Different block
13166  else
13167  {
13168  // MOVE OPTION 2: Move the allocation to a different block.
13169 
13170  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
13171  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
13172 
13173  VmaSuballocation suballoc = *srcSuballocIt;
13174  suballoc.offset = dstAllocOffset;
13175  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
13176  dstOffset = dstAllocOffset + srcAllocSize;
13177  m_BytesMoved += srcAllocSize;
13178  ++m_AllocationsMoved;
13179 
13180  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13181  ++nextSuballocIt;
13182  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13183  srcSuballocIt = nextSuballocIt;
13184 
13185  pDstMetadata->m_Suballocations.push_back(suballoc);
13186 
13187  VmaDefragmentationMove move = {
13188  srcOrigBlockIndex, dstOrigBlockIndex,
13189  srcAllocOffset, dstAllocOffset,
13190  srcAllocSize };
13191  moves.push_back(move);
13192  }
13193  }
13194  }
13195  }
13196 
13197  m_BlockInfos.clear();
13198 
13199  PostprocessMetadata();
13200 
13201  return VK_SUCCESS;
13202 }
13203 
13204 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
13205 {
13206  const size_t blockCount = m_pBlockVector->GetBlockCount();
13207  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13208  {
13209  VmaBlockMetadata_Generic* const pMetadata =
13210  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13211  pMetadata->m_FreeCount = 0;
13212  pMetadata->m_SumFreeSize = pMetadata->GetSize();
13213  pMetadata->m_FreeSuballocationsBySize.clear();
13214  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13215  it != pMetadata->m_Suballocations.end(); )
13216  {
13217  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13218  {
13219  VmaSuballocationList::iterator nextIt = it;
13220  ++nextIt;
13221  pMetadata->m_Suballocations.erase(it);
13222  it = nextIt;
13223  }
13224  else
13225  {
13226  ++it;
13227  }
13228  }
13229  }
13230 }
13231 
13232 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13233 {
13234  const size_t blockCount = m_pBlockVector->GetBlockCount();
13235  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13236  {
13237  VmaBlockMetadata_Generic* const pMetadata =
13238  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13239  const VkDeviceSize blockSize = pMetadata->GetSize();
13240 
13241  // No allocations in this block - entire area is free.
13242  if(pMetadata->m_Suballocations.empty())
13243  {
13244  pMetadata->m_FreeCount = 1;
13245  //pMetadata->m_SumFreeSize is already set to blockSize.
13246  VmaSuballocation suballoc = {
13247  0, // offset
13248  blockSize, // size
13249  VMA_NULL, // hAllocation
13250  VMA_SUBALLOCATION_TYPE_FREE };
13251  pMetadata->m_Suballocations.push_back(suballoc);
13252  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13253  }
13254  // There are some allocations in this block.
13255  else
13256  {
13257  VkDeviceSize offset = 0;
13258  VmaSuballocationList::iterator it;
13259  for(it = pMetadata->m_Suballocations.begin();
13260  it != pMetadata->m_Suballocations.end();
13261  ++it)
13262  {
13263  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13264  VMA_ASSERT(it->offset >= offset);
13265 
13266  // Need to insert preceding free space.
13267  if(it->offset > offset)
13268  {
13269  ++pMetadata->m_FreeCount;
13270  const VkDeviceSize freeSize = it->offset - offset;
13271  VmaSuballocation suballoc = {
13272  offset, // offset
13273  freeSize, // size
13274  VMA_NULL, // hAllocation
13275  VMA_SUBALLOCATION_TYPE_FREE };
13276  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13277  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13278  {
13279  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13280  }
13281  }
13282 
13283  pMetadata->m_SumFreeSize -= it->size;
13284  offset = it->offset + it->size;
13285  }
13286 
13287  // Need to insert trailing free space.
13288  if(offset < blockSize)
13289  {
13290  ++pMetadata->m_FreeCount;
13291  const VkDeviceSize freeSize = blockSize - offset;
13292  VmaSuballocation suballoc = {
13293  offset, // offset
13294  freeSize, // size
13295  VMA_NULL, // hAllocation
13296  VMA_SUBALLOCATION_TYPE_FREE };
13297  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13298  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13299  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13300  {
13301  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13302  }
13303  }
13304 
13305  VMA_SORT(
13306  pMetadata->m_FreeSuballocationsBySize.begin(),
13307  pMetadata->m_FreeSuballocationsBySize.end(),
13308  VmaSuballocationItemSizeLess());
13309  }
13310 
13311  VMA_HEAVY_ASSERT(pMetadata->Validate());
13312  }
13313 }
13314 
13315 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13316 {
13317  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13318  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13319  while(it != pMetadata->m_Suballocations.end())
13320  {
13321  if(it->offset < suballoc.offset)
13322  {
13323  ++it;
13324  }
13325  }
13326  pMetadata->m_Suballocations.insert(it, suballoc);
13327 }
13328 
13330 // VmaBlockVectorDefragmentationContext
13331 
13332 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13333  VmaAllocator hAllocator,
13334  VmaPool hCustomPool,
13335  VmaBlockVector* pBlockVector,
13336  uint32_t currFrameIndex) :
13337  res(VK_SUCCESS),
13338  mutexLocked(false),
13339  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13340  m_hAllocator(hAllocator),
13341  m_hCustomPool(hCustomPool),
13342  m_pBlockVector(pBlockVector),
13343  m_CurrFrameIndex(currFrameIndex),
13344  m_pAlgorithm(VMA_NULL),
13345  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13346  m_AllAllocations(false)
13347 {
13348 }
13349 
13350 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13351 {
13352  vma_delete(m_hAllocator, m_pAlgorithm);
13353 }
13354 
13355 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13356 {
13357  AllocInfo info = { hAlloc, pChanged };
13358  m_Allocations.push_back(info);
13359 }
13360 
13361 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13362 {
13363  const bool allAllocations = m_AllAllocations ||
13364  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13365 
13366  /********************************
13367  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13368  ********************************/
13369 
13370  /*
13371  Fast algorithm is supported only when certain criteria are met:
13372  - VMA_DEBUG_MARGIN is 0.
13373  - All allocations in this block vector are moveable.
13374  - There is no possibility of image/buffer granularity conflict.
13375  */
13376  if(VMA_DEBUG_MARGIN == 0 &&
13377  allAllocations &&
13378  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13379  {
13380  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13381  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13382  }
13383  else
13384  {
13385  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13386  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13387  }
13388 
13389  if(allAllocations)
13390  {
13391  m_pAlgorithm->AddAll();
13392  }
13393  else
13394  {
13395  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13396  {
13397  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13398  }
13399  }
13400 }
13401 
13403 // VmaDefragmentationContext
13404 
13405 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13406  VmaAllocator hAllocator,
13407  uint32_t currFrameIndex,
13408  uint32_t flags,
13409  VmaDefragmentationStats* pStats) :
13410  m_hAllocator(hAllocator),
13411  m_CurrFrameIndex(currFrameIndex),
13412  m_Flags(flags),
13413  m_pStats(pStats),
13414  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13415 {
13416  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13417 }
13418 
13419 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13420 {
13421  for(size_t i = m_CustomPoolContexts.size(); i--; )
13422  {
13423  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13424  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13425  vma_delete(m_hAllocator, pBlockVectorCtx);
13426  }
13427  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13428  {
13429  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13430  if(pBlockVectorCtx)
13431  {
13432  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13433  vma_delete(m_hAllocator, pBlockVectorCtx);
13434  }
13435  }
13436 }
13437 
13438 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13439 {
13440  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13441  {
13442  VmaPool pool = pPools[poolIndex];
13443  VMA_ASSERT(pool);
13444  // Pools with algorithm other than default are not defragmented.
13445  if(pool->m_BlockVector.GetAlgorithm() == 0)
13446  {
13447  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13448 
13449  for(size_t i = m_CustomPoolContexts.size(); i--; )
13450  {
13451  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13452  {
13453  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13454  break;
13455  }
13456  }
13457 
13458  if(!pBlockVectorDefragCtx)
13459  {
13460  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13461  m_hAllocator,
13462  pool,
13463  &pool->m_BlockVector,
13464  m_CurrFrameIndex);
13465  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13466  }
13467 
13468  pBlockVectorDefragCtx->AddAll();
13469  }
13470  }
13471 }
13472 
13473 void VmaDefragmentationContext_T::AddAllocations(
13474  uint32_t allocationCount,
13475  VmaAllocation* pAllocations,
13476  VkBool32* pAllocationsChanged)
13477 {
13478  // Dispatch pAllocations among defragmentators. Create them when necessary.
13479  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13480  {
13481  const VmaAllocation hAlloc = pAllocations[allocIndex];
13482  VMA_ASSERT(hAlloc);
13483  // DedicatedAlloc cannot be defragmented.
13484  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13485  // Lost allocation cannot be defragmented.
13486  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13487  {
13488  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13489 
13490  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
13491  // This allocation belongs to custom pool.
13492  if(hAllocPool != VK_NULL_HANDLE)
13493  {
13494  // Pools with algorithm other than default are not defragmented.
13495  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13496  {
13497  for(size_t i = m_CustomPoolContexts.size(); i--; )
13498  {
13499  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13500  {
13501  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13502  break;
13503  }
13504  }
13505  if(!pBlockVectorDefragCtx)
13506  {
13507  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13508  m_hAllocator,
13509  hAllocPool,
13510  &hAllocPool->m_BlockVector,
13511  m_CurrFrameIndex);
13512  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13513  }
13514  }
13515  }
13516  // This allocation belongs to default pool.
13517  else
13518  {
13519  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13520  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13521  if(!pBlockVectorDefragCtx)
13522  {
13523  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13524  m_hAllocator,
13525  VMA_NULL, // hCustomPool
13526  m_hAllocator->m_pBlockVectors[memTypeIndex],
13527  m_CurrFrameIndex);
13528  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13529  }
13530  }
13531 
13532  if(pBlockVectorDefragCtx)
13533  {
13534  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13535  &pAllocationsChanged[allocIndex] : VMA_NULL;
13536  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13537  }
13538  }
13539  }
13540 }
13541 
13542 VkResult VmaDefragmentationContext_T::Defragment(
13543  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13544  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13545  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13546 {
13547  if(pStats)
13548  {
13549  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13550  }
13551 
13552  if(commandBuffer == VK_NULL_HANDLE)
13553  {
13554  maxGpuBytesToMove = 0;
13555  maxGpuAllocationsToMove = 0;
13556  }
13557 
13558  VkResult res = VK_SUCCESS;
13559 
13560  // Process default pools.
13561  for(uint32_t memTypeIndex = 0;
13562  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13563  ++memTypeIndex)
13564  {
13565  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13566  if(pBlockVectorCtx)
13567  {
13568  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13569  pBlockVectorCtx->GetBlockVector()->Defragment(
13570  pBlockVectorCtx,
13571  pStats,
13572  maxCpuBytesToMove, maxCpuAllocationsToMove,
13573  maxGpuBytesToMove, maxGpuAllocationsToMove,
13574  commandBuffer);
13575  if(pBlockVectorCtx->res != VK_SUCCESS)
13576  {
13577  res = pBlockVectorCtx->res;
13578  }
13579  }
13580  }
13581 
13582  // Process custom pools.
13583  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13584  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13585  ++customCtxIndex)
13586  {
13587  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13588  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13589  pBlockVectorCtx->GetBlockVector()->Defragment(
13590  pBlockVectorCtx,
13591  pStats,
13592  maxCpuBytesToMove, maxCpuAllocationsToMove,
13593  maxGpuBytesToMove, maxGpuAllocationsToMove,
13594  commandBuffer);
13595  if(pBlockVectorCtx->res != VK_SUCCESS)
13596  {
13597  res = pBlockVectorCtx->res;
13598  }
13599  }
13600 
13601  return res;
13602 }
13603 
13605 // VmaRecorder
13606 
13607 #if VMA_RECORDING_ENABLED
13608 
13609 VmaRecorder::VmaRecorder() :
13610  m_UseMutex(true),
13611  m_Flags(0),
13612  m_File(VMA_NULL),
13613  m_Freq(INT64_MAX),
13614  m_StartCounter(INT64_MAX)
13615 {
13616 }
13617 
13618 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13619 {
13620  m_UseMutex = useMutex;
13621  m_Flags = settings.flags;
13622 
13623  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13624  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13625 
13626  // Open file for writing.
13627  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13628  if(err != 0)
13629  {
13630  return VK_ERROR_INITIALIZATION_FAILED;
13631  }
13632 
13633  // Write header.
13634  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13635  fprintf(m_File, "%s\n", "1,5");
13636 
13637  return VK_SUCCESS;
13638 }
13639 
13640 VmaRecorder::~VmaRecorder()
13641 {
13642  if(m_File != VMA_NULL)
13643  {
13644  fclose(m_File);
13645  }
13646 }
13647 
13648 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13649 {
13650  CallParams callParams;
13651  GetBasicParams(callParams);
13652 
13653  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13654  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13655  Flush();
13656 }
13657 
13658 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13659 {
13660  CallParams callParams;
13661  GetBasicParams(callParams);
13662 
13663  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13664  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13665  Flush();
13666 }
13667 
13668 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13669 {
13670  CallParams callParams;
13671  GetBasicParams(callParams);
13672 
13673  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13674  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13675  createInfo.memoryTypeIndex,
13676  createInfo.flags,
13677  createInfo.blockSize,
13678  (uint64_t)createInfo.minBlockCount,
13679  (uint64_t)createInfo.maxBlockCount,
13680  createInfo.frameInUseCount,
13681  pool);
13682  Flush();
13683 }
13684 
13685 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13686 {
13687  CallParams callParams;
13688  GetBasicParams(callParams);
13689 
13690  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13691  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13692  pool);
13693  Flush();
13694 }
13695 
13696 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13697  const VkMemoryRequirements& vkMemReq,
13698  const VmaAllocationCreateInfo& createInfo,
13699  VmaAllocation allocation)
13700 {
13701  CallParams callParams;
13702  GetBasicParams(callParams);
13703 
13704  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13705  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13706  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13707  vkMemReq.size,
13708  vkMemReq.alignment,
13709  vkMemReq.memoryTypeBits,
13710  createInfo.flags,
13711  createInfo.usage,
13712  createInfo.requiredFlags,
13713  createInfo.preferredFlags,
13714  createInfo.memoryTypeBits,
13715  createInfo.pool,
13716  allocation,
13717  userDataStr.GetString());
13718  Flush();
13719 }
13720 
13721 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
13722  const VkMemoryRequirements& vkMemReq,
13723  const VmaAllocationCreateInfo& createInfo,
13724  uint64_t allocationCount,
13725  const VmaAllocation* pAllocations)
13726 {
13727  CallParams callParams;
13728  GetBasicParams(callParams);
13729 
13730  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13731  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13732  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
13733  vkMemReq.size,
13734  vkMemReq.alignment,
13735  vkMemReq.memoryTypeBits,
13736  createInfo.flags,
13737  createInfo.usage,
13738  createInfo.requiredFlags,
13739  createInfo.preferredFlags,
13740  createInfo.memoryTypeBits,
13741  createInfo.pool);
13742  PrintPointerList(allocationCount, pAllocations);
13743  fprintf(m_File, ",%s\n", userDataStr.GetString());
13744  Flush();
13745 }
13746 
13747 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13748  const VkMemoryRequirements& vkMemReq,
13749  bool requiresDedicatedAllocation,
13750  bool prefersDedicatedAllocation,
13751  const VmaAllocationCreateInfo& createInfo,
13752  VmaAllocation allocation)
13753 {
13754  CallParams callParams;
13755  GetBasicParams(callParams);
13756 
13757  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13758  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13759  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13760  vkMemReq.size,
13761  vkMemReq.alignment,
13762  vkMemReq.memoryTypeBits,
13763  requiresDedicatedAllocation ? 1 : 0,
13764  prefersDedicatedAllocation ? 1 : 0,
13765  createInfo.flags,
13766  createInfo.usage,
13767  createInfo.requiredFlags,
13768  createInfo.preferredFlags,
13769  createInfo.memoryTypeBits,
13770  createInfo.pool,
13771  allocation,
13772  userDataStr.GetString());
13773  Flush();
13774 }
13775 
13776 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13777  const VkMemoryRequirements& vkMemReq,
13778  bool requiresDedicatedAllocation,
13779  bool prefersDedicatedAllocation,
13780  const VmaAllocationCreateInfo& createInfo,
13781  VmaAllocation allocation)
13782 {
13783  CallParams callParams;
13784  GetBasicParams(callParams);
13785 
13786  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13787  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13788  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13789  vkMemReq.size,
13790  vkMemReq.alignment,
13791  vkMemReq.memoryTypeBits,
13792  requiresDedicatedAllocation ? 1 : 0,
13793  prefersDedicatedAllocation ? 1 : 0,
13794  createInfo.flags,
13795  createInfo.usage,
13796  createInfo.requiredFlags,
13797  createInfo.preferredFlags,
13798  createInfo.memoryTypeBits,
13799  createInfo.pool,
13800  allocation,
13801  userDataStr.GetString());
13802  Flush();
13803 }
13804 
13805 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13806  VmaAllocation allocation)
13807 {
13808  CallParams callParams;
13809  GetBasicParams(callParams);
13810 
13811  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13812  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13813  allocation);
13814  Flush();
13815 }
13816 
13817 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
13818  uint64_t allocationCount,
13819  const VmaAllocation* pAllocations)
13820 {
13821  CallParams callParams;
13822  GetBasicParams(callParams);
13823 
13824  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13825  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
13826  PrintPointerList(allocationCount, pAllocations);
13827  fprintf(m_File, "\n");
13828  Flush();
13829 }
13830 
13831 void VmaRecorder::RecordResizeAllocation(
13832  uint32_t frameIndex,
13833  VmaAllocation allocation,
13834  VkDeviceSize newSize)
13835 {
13836  CallParams callParams;
13837  GetBasicParams(callParams);
13838 
13839  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13840  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
13841  allocation, newSize);
13842  Flush();
13843 }
13844 
13845 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13846  VmaAllocation allocation,
13847  const void* pUserData)
13848 {
13849  CallParams callParams;
13850  GetBasicParams(callParams);
13851 
13852  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13853  UserDataString userDataStr(
13854  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
13855  pUserData);
13856  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13857  allocation,
13858  userDataStr.GetString());
13859  Flush();
13860 }
13861 
13862 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13863  VmaAllocation allocation)
13864 {
13865  CallParams callParams;
13866  GetBasicParams(callParams);
13867 
13868  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13869  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13870  allocation);
13871  Flush();
13872 }
13873 
13874 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13875  VmaAllocation allocation)
13876 {
13877  CallParams callParams;
13878  GetBasicParams(callParams);
13879 
13880  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13881  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13882  allocation);
13883  Flush();
13884 }
13885 
13886 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13887  VmaAllocation allocation)
13888 {
13889  CallParams callParams;
13890  GetBasicParams(callParams);
13891 
13892  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13893  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13894  allocation);
13895  Flush();
13896 }
13897 
13898 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13899  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13900 {
13901  CallParams callParams;
13902  GetBasicParams(callParams);
13903 
13904  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13905  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13906  allocation,
13907  offset,
13908  size);
13909  Flush();
13910 }
13911 
13912 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13913  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13914 {
13915  CallParams callParams;
13916  GetBasicParams(callParams);
13917 
13918  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13919  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13920  allocation,
13921  offset,
13922  size);
13923  Flush();
13924 }
13925 
13926 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13927  const VkBufferCreateInfo& bufCreateInfo,
13928  const VmaAllocationCreateInfo& allocCreateInfo,
13929  VmaAllocation allocation)
13930 {
13931  CallParams callParams;
13932  GetBasicParams(callParams);
13933 
13934  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13935  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13936  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13937  bufCreateInfo.flags,
13938  bufCreateInfo.size,
13939  bufCreateInfo.usage,
13940  bufCreateInfo.sharingMode,
13941  allocCreateInfo.flags,
13942  allocCreateInfo.usage,
13943  allocCreateInfo.requiredFlags,
13944  allocCreateInfo.preferredFlags,
13945  allocCreateInfo.memoryTypeBits,
13946  allocCreateInfo.pool,
13947  allocation,
13948  userDataStr.GetString());
13949  Flush();
13950 }
13951 
13952 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13953  const VkImageCreateInfo& imageCreateInfo,
13954  const VmaAllocationCreateInfo& allocCreateInfo,
13955  VmaAllocation allocation)
13956 {
13957  CallParams callParams;
13958  GetBasicParams(callParams);
13959 
13960  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13961  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13962  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13963  imageCreateInfo.flags,
13964  imageCreateInfo.imageType,
13965  imageCreateInfo.format,
13966  imageCreateInfo.extent.width,
13967  imageCreateInfo.extent.height,
13968  imageCreateInfo.extent.depth,
13969  imageCreateInfo.mipLevels,
13970  imageCreateInfo.arrayLayers,
13971  imageCreateInfo.samples,
13972  imageCreateInfo.tiling,
13973  imageCreateInfo.usage,
13974  imageCreateInfo.sharingMode,
13975  imageCreateInfo.initialLayout,
13976  allocCreateInfo.flags,
13977  allocCreateInfo.usage,
13978  allocCreateInfo.requiredFlags,
13979  allocCreateInfo.preferredFlags,
13980  allocCreateInfo.memoryTypeBits,
13981  allocCreateInfo.pool,
13982  allocation,
13983  userDataStr.GetString());
13984  Flush();
13985 }
13986 
13987 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13988  VmaAllocation allocation)
13989 {
13990  CallParams callParams;
13991  GetBasicParams(callParams);
13992 
13993  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13994  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13995  allocation);
13996  Flush();
13997 }
13998 
13999 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
14000  VmaAllocation allocation)
14001 {
14002  CallParams callParams;
14003  GetBasicParams(callParams);
14004 
14005  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14006  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
14007  allocation);
14008  Flush();
14009 }
14010 
14011 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
14012  VmaAllocation allocation)
14013 {
14014  CallParams callParams;
14015  GetBasicParams(callParams);
14016 
14017  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14018  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
14019  allocation);
14020  Flush();
14021 }
14022 
14023 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
14024  VmaAllocation allocation)
14025 {
14026  CallParams callParams;
14027  GetBasicParams(callParams);
14028 
14029  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14030  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
14031  allocation);
14032  Flush();
14033 }
14034 
14035 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
14036  VmaPool pool)
14037 {
14038  CallParams callParams;
14039  GetBasicParams(callParams);
14040 
14041  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14042  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
14043  pool);
14044  Flush();
14045 }
14046 
14047 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
14048  const VmaDefragmentationInfo2& info,
14050 {
14051  CallParams callParams;
14052  GetBasicParams(callParams);
14053 
14054  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14055  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
14056  info.flags);
14057  PrintPointerList(info.allocationCount, info.pAllocations);
14058  fprintf(m_File, ",");
14059  PrintPointerList(info.poolCount, info.pPools);
14060  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
14061  info.maxCpuBytesToMove,
14063  info.maxGpuBytesToMove,
14065  info.commandBuffer,
14066  ctx);
14067  Flush();
14068 }
14069 
14070 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
14072 {
14073  CallParams callParams;
14074  GetBasicParams(callParams);
14075 
14076  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14077  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
14078  ctx);
14079  Flush();
14080 }
14081 
14082 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
14083 {
14084  if(pUserData != VMA_NULL)
14085  {
14086  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
14087  {
14088  m_Str = (const char*)pUserData;
14089  }
14090  else
14091  {
14092  sprintf_s(m_PtrStr, "%p", pUserData);
14093  m_Str = m_PtrStr;
14094  }
14095  }
14096  else
14097  {
14098  m_Str = "";
14099  }
14100 }
14101 
14102 void VmaRecorder::WriteConfiguration(
14103  const VkPhysicalDeviceProperties& devProps,
14104  const VkPhysicalDeviceMemoryProperties& memProps,
14105  bool dedicatedAllocationExtensionEnabled)
14106 {
14107  fprintf(m_File, "Config,Begin\n");
14108 
14109  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
14110  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
14111  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
14112  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
14113  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
14114  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
14115 
14116  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
14117  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
14118  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
14119 
14120  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
14121  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
14122  {
14123  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
14124  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
14125  }
14126  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
14127  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
14128  {
14129  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
14130  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
14131  }
14132 
14133  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
14134 
14135  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
14136  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
14137  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
14138  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
14139  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
14140  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
14141  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
14142  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
14143  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14144 
14145  fprintf(m_File, "Config,End\n");
14146 }
14147 
14148 void VmaRecorder::GetBasicParams(CallParams& outParams)
14149 {
14150  outParams.threadId = GetCurrentThreadId();
14151 
14152  LARGE_INTEGER counter;
14153  QueryPerformanceCounter(&counter);
14154  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
14155 }
14156 
14157 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
14158 {
14159  if(count)
14160  {
14161  fprintf(m_File, "%p", pItems[0]);
14162  for(uint64_t i = 1; i < count; ++i)
14163  {
14164  fprintf(m_File, " %p", pItems[i]);
14165  }
14166  }
14167 }
14168 
14169 void VmaRecorder::Flush()
14170 {
14171  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
14172  {
14173  fflush(m_File);
14174  }
14175 }
14176 
14177 #endif // #if VMA_RECORDING_ENABLED
14178 
14180 // VmaAllocationObjectAllocator
14181 
14182 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
14183  m_Allocator(pAllocationCallbacks, 1024)
14184 {
14185 }
14186 
14187 VmaAllocation VmaAllocationObjectAllocator::Allocate()
14188 {
14189  VmaMutexLock mutexLock(m_Mutex);
14190  return m_Allocator.Alloc();
14191 }
14192 
14193 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
14194 {
14195  VmaMutexLock mutexLock(m_Mutex);
14196  m_Allocator.Free(hAlloc);
14197 }
14198 
14200 // VmaAllocator_T
14201 
14202 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
14203  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
14204  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
14205  m_hDevice(pCreateInfo->device),
14206  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
14207  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
14208  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
14209  m_AllocationObjectAllocator(&m_AllocationCallbacks),
14210  m_PreferredLargeHeapBlockSize(0),
14211  m_PhysicalDevice(pCreateInfo->physicalDevice),
14212  m_CurrentFrameIndex(0),
14213  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
14214  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
14215  m_NextPoolId(0)
14217  ,m_pRecorder(VMA_NULL)
14218 #endif
14219 {
14220  if(VMA_DEBUG_DETECT_CORRUPTION)
14221  {
14222  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
14223  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
14224  }
14225 
14226  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
14227 
14228 #if !(VMA_DEDICATED_ALLOCATION)
14230  {
14231  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14232  }
14233 #endif
14234 
14235  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
14236  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14237  memset(&m_MemProps, 0, sizeof(m_MemProps));
14238 
14239  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14240  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
14241 
14242  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14243  {
14244  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
14245  }
14246 
14247  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14248  {
14249  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14250  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14251  }
14252 
14253  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14254 
14255  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14256  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14257 
14258  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14259  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14260  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14261  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14262 
14263  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14264  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14265 
14266  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14267  {
14268  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14269  {
14270  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14271  if(limit != VK_WHOLE_SIZE)
14272  {
14273  m_HeapSizeLimit[heapIndex] = limit;
14274  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14275  {
14276  m_MemProps.memoryHeaps[heapIndex].size = limit;
14277  }
14278  }
14279  }
14280  }
14281 
14282  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14283  {
14284  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14285 
14286  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14287  this,
14288  VK_NULL_HANDLE, // hParentPool
14289  memTypeIndex,
14290  preferredBlockSize,
14291  0,
14292  SIZE_MAX,
14293  GetBufferImageGranularity(),
14294  pCreateInfo->frameInUseCount,
14295  false, // isCustomPool
14296  false, // explicitBlockSize
14297  false); // linearAlgorithm
14298  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14299  // becase minBlockCount is 0.
14300  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14301 
14302  }
14303 }
14304 
14305 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14306 {
14307  VkResult res = VK_SUCCESS;
14308 
14309  if(pCreateInfo->pRecordSettings != VMA_NULL &&
14310  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14311  {
14312 #if VMA_RECORDING_ENABLED
14313  m_pRecorder = vma_new(this, VmaRecorder)();
14314  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14315  if(res != VK_SUCCESS)
14316  {
14317  return res;
14318  }
14319  m_pRecorder->WriteConfiguration(
14320  m_PhysicalDeviceProperties,
14321  m_MemProps,
14322  m_UseKhrDedicatedAllocation);
14323  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14324 #else
14325  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14326  return VK_ERROR_FEATURE_NOT_PRESENT;
14327 #endif
14328  }
14329 
14330  return res;
14331 }
14332 
14333 VmaAllocator_T::~VmaAllocator_T()
14334 {
14335 #if VMA_RECORDING_ENABLED
14336  if(m_pRecorder != VMA_NULL)
14337  {
14338  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14339  vma_delete(this, m_pRecorder);
14340  }
14341 #endif
14342 
14343  VMA_ASSERT(m_Pools.empty());
14344 
14345  for(size_t i = GetMemoryTypeCount(); i--; )
14346  {
14347  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
14348  {
14349  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
14350  }
14351 
14352  vma_delete(this, m_pDedicatedAllocations[i]);
14353  vma_delete(this, m_pBlockVectors[i]);
14354  }
14355 }
14356 
14357 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14358 {
14359 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14360  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
14361  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
14362  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
14363  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
14364  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
14365  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
14366  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
14367  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
14368  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
14369  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
14370  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
14371  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
14372  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
14373  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
14374  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
14375  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
14376  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
14377 #if VMA_DEDICATED_ALLOCATION
14378  if(m_UseKhrDedicatedAllocation)
14379  {
14380  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14381  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14382  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14383  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14384  }
14385 #endif // #if VMA_DEDICATED_ALLOCATION
14386 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14387 
14388 #define VMA_COPY_IF_NOT_NULL(funcName) \
14389  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14390 
14391  if(pVulkanFunctions != VMA_NULL)
14392  {
14393  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14394  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14395  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14396  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14397  VMA_COPY_IF_NOT_NULL(vkMapMemory);
14398  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14399  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14400  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14401  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14402  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14403  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14404  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14405  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14406  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14407  VMA_COPY_IF_NOT_NULL(vkCreateImage);
14408  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14409  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14410 #if VMA_DEDICATED_ALLOCATION
14411  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14412  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14413 #endif
14414  }
14415 
14416 #undef VMA_COPY_IF_NOT_NULL
14417 
14418  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14419  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14420  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14421  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14422  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14423  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14424  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14425  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14426  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14427  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14428  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14429  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14430  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14431  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14432  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14433  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14434  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14435  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14436  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14437 #if VMA_DEDICATED_ALLOCATION
14438  if(m_UseKhrDedicatedAllocation)
14439  {
14440  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14441  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14442  }
14443 #endif
14444 }
14445 
14446 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14447 {
14448  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14449  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14450  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14451  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
14452 }
14453 
14454 VkResult VmaAllocator_T::AllocateMemoryOfType(
14455  VkDeviceSize size,
14456  VkDeviceSize alignment,
14457  bool dedicatedAllocation,
14458  VkBuffer dedicatedBuffer,
14459  VkImage dedicatedImage,
14460  const VmaAllocationCreateInfo& createInfo,
14461  uint32_t memTypeIndex,
14462  VmaSuballocationType suballocType,
14463  size_t allocationCount,
14464  VmaAllocation* pAllocations)
14465 {
14466  VMA_ASSERT(pAllocations != VMA_NULL);
14467  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
14468 
14469  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14470 
14471  // If memory type is not HOST_VISIBLE, disable MAPPED.
14472  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14473  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14474  {
14475  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14476  }
14477 
14478  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14479  VMA_ASSERT(blockVector);
14480 
14481  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14482  bool preferDedicatedMemory =
14483  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14484  dedicatedAllocation ||
14485  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14486  size > preferredBlockSize / 2;
14487 
14488  if(preferDedicatedMemory &&
14489  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14490  finalCreateInfo.pool == VK_NULL_HANDLE)
14491  {
14493  }
14494 
14495  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14496  {
14497  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14498  {
14499  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14500  }
14501  else
14502  {
14503  return AllocateDedicatedMemory(
14504  size,
14505  suballocType,
14506  memTypeIndex,
14507  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14508  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14509  finalCreateInfo.pUserData,
14510  dedicatedBuffer,
14511  dedicatedImage,
14512  allocationCount,
14513  pAllocations);
14514  }
14515  }
14516  else
14517  {
14518  VkResult res = blockVector->Allocate(
14519  m_CurrentFrameIndex.load(),
14520  size,
14521  alignment,
14522  finalCreateInfo,
14523  suballocType,
14524  allocationCount,
14525  pAllocations);
14526  if(res == VK_SUCCESS)
14527  {
14528  return res;
14529  }
14530 
14531  // 5. Try dedicated memory.
14532  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14533  {
14534  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14535  }
14536  else
14537  {
14538  res = AllocateDedicatedMemory(
14539  size,
14540  suballocType,
14541  memTypeIndex,
14542  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14543  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14544  finalCreateInfo.pUserData,
14545  dedicatedBuffer,
14546  dedicatedImage,
14547  allocationCount,
14548  pAllocations);
14549  if(res == VK_SUCCESS)
14550  {
14551  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14552  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14553  return VK_SUCCESS;
14554  }
14555  else
14556  {
14557  // Everything failed: Return error code.
14558  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14559  return res;
14560  }
14561  }
14562  }
14563 }
14564 
14565 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14566  VkDeviceSize size,
14567  VmaSuballocationType suballocType,
14568  uint32_t memTypeIndex,
14569  bool map,
14570  bool isUserDataString,
14571  void* pUserData,
14572  VkBuffer dedicatedBuffer,
14573  VkImage dedicatedImage,
14574  size_t allocationCount,
14575  VmaAllocation* pAllocations)
14576 {
14577  VMA_ASSERT(allocationCount > 0 && pAllocations);
14578 
14579  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14580  allocInfo.memoryTypeIndex = memTypeIndex;
14581  allocInfo.allocationSize = size;
14582 
14583 #if VMA_DEDICATED_ALLOCATION
14584  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14585  if(m_UseKhrDedicatedAllocation)
14586  {
14587  if(dedicatedBuffer != VK_NULL_HANDLE)
14588  {
14589  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14590  dedicatedAllocInfo.buffer = dedicatedBuffer;
14591  allocInfo.pNext = &dedicatedAllocInfo;
14592  }
14593  else if(dedicatedImage != VK_NULL_HANDLE)
14594  {
14595  dedicatedAllocInfo.image = dedicatedImage;
14596  allocInfo.pNext = &dedicatedAllocInfo;
14597  }
14598  }
14599 #endif // #if VMA_DEDICATED_ALLOCATION
14600 
14601  size_t allocIndex;
14602  VkResult res = VK_SUCCESS;
14603  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14604  {
14605  res = AllocateDedicatedMemoryPage(
14606  size,
14607  suballocType,
14608  memTypeIndex,
14609  allocInfo,
14610  map,
14611  isUserDataString,
14612  pUserData,
14613  pAllocations + allocIndex);
14614  if(res != VK_SUCCESS)
14615  {
14616  break;
14617  }
14618  }
14619 
14620  if(res == VK_SUCCESS)
14621  {
14622  // Register them in m_pDedicatedAllocations.
14623  {
14624  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14625  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14626  VMA_ASSERT(pDedicatedAllocations);
14627  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14628  {
14629  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
14630  }
14631  }
14632 
14633  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14634  }
14635  else
14636  {
14637  // Free all already created allocations.
14638  while(allocIndex--)
14639  {
14640  VmaAllocation currAlloc = pAllocations[allocIndex];
14641  VkDeviceMemory hMemory = currAlloc->GetMemory();
14642 
14643  /*
14644  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14645  before vkFreeMemory.
14646 
14647  if(currAlloc->GetMappedData() != VMA_NULL)
14648  {
14649  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14650  }
14651  */
14652 
14653  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14654 
14655  currAlloc->SetUserData(this, VMA_NULL);
14656  currAlloc->Dtor();
14657  m_AllocationObjectAllocator.Free(currAlloc);
14658  }
14659 
14660  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14661  }
14662 
14663  return res;
14664 }
14665 
14666 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14667  VkDeviceSize size,
14668  VmaSuballocationType suballocType,
14669  uint32_t memTypeIndex,
14670  const VkMemoryAllocateInfo& allocInfo,
14671  bool map,
14672  bool isUserDataString,
14673  void* pUserData,
14674  VmaAllocation* pAllocation)
14675 {
14676  VkDeviceMemory hMemory = VK_NULL_HANDLE;
14677  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14678  if(res < 0)
14679  {
14680  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14681  return res;
14682  }
14683 
14684  void* pMappedData = VMA_NULL;
14685  if(map)
14686  {
14687  res = (*m_VulkanFunctions.vkMapMemory)(
14688  m_hDevice,
14689  hMemory,
14690  0,
14691  VK_WHOLE_SIZE,
14692  0,
14693  &pMappedData);
14694  if(res < 0)
14695  {
14696  VMA_DEBUG_LOG(" vkMapMemory FAILED");
14697  FreeVulkanMemory(memTypeIndex, size, hMemory);
14698  return res;
14699  }
14700  }
14701 
14702  *pAllocation = m_AllocationObjectAllocator.Allocate();
14703  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
14704  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14705  (*pAllocation)->SetUserData(this, pUserData);
14706  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14707  {
14708  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14709  }
14710 
14711  return VK_SUCCESS;
14712 }
14713 
14714 void VmaAllocator_T::GetBufferMemoryRequirements(
14715  VkBuffer hBuffer,
14716  VkMemoryRequirements& memReq,
14717  bool& requiresDedicatedAllocation,
14718  bool& prefersDedicatedAllocation) const
14719 {
14720 #if VMA_DEDICATED_ALLOCATION
14721  if(m_UseKhrDedicatedAllocation)
14722  {
14723  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14724  memReqInfo.buffer = hBuffer;
14725 
14726  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14727 
14728  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14729  memReq2.pNext = &memDedicatedReq;
14730 
14731  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14732 
14733  memReq = memReq2.memoryRequirements;
14734  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14735  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14736  }
14737  else
14738 #endif // #if VMA_DEDICATED_ALLOCATION
14739  {
14740  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14741  requiresDedicatedAllocation = false;
14742  prefersDedicatedAllocation = false;
14743  }
14744 }
14745 
14746 void VmaAllocator_T::GetImageMemoryRequirements(
14747  VkImage hImage,
14748  VkMemoryRequirements& memReq,
14749  bool& requiresDedicatedAllocation,
14750  bool& prefersDedicatedAllocation) const
14751 {
14752 #if VMA_DEDICATED_ALLOCATION
14753  if(m_UseKhrDedicatedAllocation)
14754  {
14755  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14756  memReqInfo.image = hImage;
14757 
14758  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14759 
14760  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14761  memReq2.pNext = &memDedicatedReq;
14762 
14763  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14764 
14765  memReq = memReq2.memoryRequirements;
14766  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14767  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14768  }
14769  else
14770 #endif // #if VMA_DEDICATED_ALLOCATION
14771  {
14772  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14773  requiresDedicatedAllocation = false;
14774  prefersDedicatedAllocation = false;
14775  }
14776 }
14777 
14778 VkResult VmaAllocator_T::AllocateMemory(
14779  const VkMemoryRequirements& vkMemReq,
14780  bool requiresDedicatedAllocation,
14781  bool prefersDedicatedAllocation,
14782  VkBuffer dedicatedBuffer,
14783  VkImage dedicatedImage,
14784  const VmaAllocationCreateInfo& createInfo,
14785  VmaSuballocationType suballocType,
14786  size_t allocationCount,
14787  VmaAllocation* pAllocations)
14788 {
14789  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14790 
14791  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14792 
14793  if(vkMemReq.size == 0)
14794  {
14795  return VK_ERROR_VALIDATION_FAILED_EXT;
14796  }
14797  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14798  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14799  {
14800  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14801  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14802  }
14803  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14805  {
14806  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14807  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14808  }
14809  if(requiresDedicatedAllocation)
14810  {
14811  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14812  {
14813  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14814  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14815  }
14816  if(createInfo.pool != VK_NULL_HANDLE)
14817  {
14818  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
14819  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14820  }
14821  }
14822  if((createInfo.pool != VK_NULL_HANDLE) &&
14823  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
14824  {
14825  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14826  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14827  }
14828 
14829  if(createInfo.pool != VK_NULL_HANDLE)
14830  {
14831  const VkDeviceSize alignmentForPool = VMA_MAX(
14832  vkMemReq.alignment,
14833  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
14834 
14835  VmaAllocationCreateInfo createInfoForPool = createInfo;
14836  // If memory type is not HOST_VISIBLE, disable MAPPED.
14837  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14838  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14839  {
14840  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14841  }
14842 
14843  return createInfo.pool->m_BlockVector.Allocate(
14844  m_CurrentFrameIndex.load(),
14845  vkMemReq.size,
14846  alignmentForPool,
14847  createInfoForPool,
14848  suballocType,
14849  allocationCount,
14850  pAllocations);
14851  }
14852  else
14853  {
14854  // Bit mask of memory Vulkan types acceptable for this allocation.
14855  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14856  uint32_t memTypeIndex = UINT32_MAX;
14857  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14858  if(res == VK_SUCCESS)
14859  {
14860  VkDeviceSize alignmentForMemType = VMA_MAX(
14861  vkMemReq.alignment,
14862  GetMemoryTypeMinAlignment(memTypeIndex));
14863 
14864  res = AllocateMemoryOfType(
14865  vkMemReq.size,
14866  alignmentForMemType,
14867  requiresDedicatedAllocation || prefersDedicatedAllocation,
14868  dedicatedBuffer,
14869  dedicatedImage,
14870  createInfo,
14871  memTypeIndex,
14872  suballocType,
14873  allocationCount,
14874  pAllocations);
14875  // Succeeded on first try.
14876  if(res == VK_SUCCESS)
14877  {
14878  return res;
14879  }
14880  // Allocation from this memory type failed. Try other compatible memory types.
14881  else
14882  {
14883  for(;;)
14884  {
14885  // Remove old memTypeIndex from list of possibilities.
14886  memoryTypeBits &= ~(1u << memTypeIndex);
14887  // Find alternative memTypeIndex.
14888  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14889  if(res == VK_SUCCESS)
14890  {
14891  alignmentForMemType = VMA_MAX(
14892  vkMemReq.alignment,
14893  GetMemoryTypeMinAlignment(memTypeIndex));
14894 
14895  res = AllocateMemoryOfType(
14896  vkMemReq.size,
14897  alignmentForMemType,
14898  requiresDedicatedAllocation || prefersDedicatedAllocation,
14899  dedicatedBuffer,
14900  dedicatedImage,
14901  createInfo,
14902  memTypeIndex,
14903  suballocType,
14904  allocationCount,
14905  pAllocations);
14906  // Allocation from this alternative memory type succeeded.
14907  if(res == VK_SUCCESS)
14908  {
14909  return res;
14910  }
14911  // else: Allocation from this memory type failed. Try next one - next loop iteration.
14912  }
14913  // No other matching memory type index could be found.
14914  else
14915  {
14916  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
14917  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14918  }
14919  }
14920  }
14921  }
14922  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
14923  else
14924  return res;
14925  }
14926 }
14927 
14928 void VmaAllocator_T::FreeMemory(
14929  size_t allocationCount,
14930  const VmaAllocation* pAllocations)
14931 {
14932  VMA_ASSERT(pAllocations);
14933 
14934  for(size_t allocIndex = allocationCount; allocIndex--; )
14935  {
14936  VmaAllocation allocation = pAllocations[allocIndex];
14937 
14938  if(allocation != VK_NULL_HANDLE)
14939  {
14940  if(TouchAllocation(allocation))
14941  {
14942  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14943  {
14944  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14945  }
14946 
14947  switch(allocation->GetType())
14948  {
14949  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14950  {
14951  VmaBlockVector* pBlockVector = VMA_NULL;
14952  VmaPool hPool = allocation->GetBlock()->GetParentPool();
14953  if(hPool != VK_NULL_HANDLE)
14954  {
14955  pBlockVector = &hPool->m_BlockVector;
14956  }
14957  else
14958  {
14959  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14960  pBlockVector = m_pBlockVectors[memTypeIndex];
14961  }
14962  pBlockVector->Free(allocation);
14963  }
14964  break;
14965  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14966  FreeDedicatedMemory(allocation);
14967  break;
14968  default:
14969  VMA_ASSERT(0);
14970  }
14971  }
14972 
14973  allocation->SetUserData(this, VMA_NULL);
14974  allocation->Dtor();
14975  m_AllocationObjectAllocator.Free(allocation);
14976  }
14977  }
14978 }
14979 
14980 VkResult VmaAllocator_T::ResizeAllocation(
14981  const VmaAllocation alloc,
14982  VkDeviceSize newSize)
14983 {
14984  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14985  {
14986  return VK_ERROR_VALIDATION_FAILED_EXT;
14987  }
14988  if(newSize == alloc->GetSize())
14989  {
14990  return VK_SUCCESS;
14991  }
14992 
14993  switch(alloc->GetType())
14994  {
14995  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14996  return VK_ERROR_FEATURE_NOT_PRESENT;
14997  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14998  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
14999  {
15000  alloc->ChangeSize(newSize);
15001  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
15002  return VK_SUCCESS;
15003  }
15004  else
15005  {
15006  return VK_ERROR_OUT_OF_POOL_MEMORY;
15007  }
15008  default:
15009  VMA_ASSERT(0);
15010  return VK_ERROR_VALIDATION_FAILED_EXT;
15011  }
15012 }
15013 
15014 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
15015 {
15016  // Initialize.
15017  InitStatInfo(pStats->total);
15018  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
15019  InitStatInfo(pStats->memoryType[i]);
15020  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
15021  InitStatInfo(pStats->memoryHeap[i]);
15022 
15023  // Process default pools.
15024  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15025  {
15026  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15027  VMA_ASSERT(pBlockVector);
15028  pBlockVector->AddStats(pStats);
15029  }
15030 
15031  // Process custom pools.
15032  {
15033  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15034  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15035  {
15036  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
15037  }
15038  }
15039 
15040  // Process dedicated allocations.
15041  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15042  {
15043  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15044  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15045  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15046  VMA_ASSERT(pDedicatedAllocVector);
15047  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
15048  {
15049  VmaStatInfo allocationStatInfo;
15050  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
15051  VmaAddStatInfo(pStats->total, allocationStatInfo);
15052  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
15053  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
15054  }
15055  }
15056 
15057  // Postprocess.
15058  VmaPostprocessCalcStatInfo(pStats->total);
15059  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
15060  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
15061  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
15062  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
15063 }
15064 
15065 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
15066 
15067 VkResult VmaAllocator_T::DefragmentationBegin(
15068  const VmaDefragmentationInfo2& info,
15069  VmaDefragmentationStats* pStats,
15070  VmaDefragmentationContext* pContext)
15071 {
15072  if(info.pAllocationsChanged != VMA_NULL)
15073  {
15074  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
15075  }
15076 
15077  *pContext = vma_new(this, VmaDefragmentationContext_T)(
15078  this, m_CurrentFrameIndex.load(), info.flags, pStats);
15079 
15080  (*pContext)->AddPools(info.poolCount, info.pPools);
15081  (*pContext)->AddAllocations(
15083 
15084  VkResult res = (*pContext)->Defragment(
15087  info.commandBuffer, pStats);
15088 
15089  if(res != VK_NOT_READY)
15090  {
15091  vma_delete(this, *pContext);
15092  *pContext = VMA_NULL;
15093  }
15094 
15095  return res;
15096 }
15097 
15098 VkResult VmaAllocator_T::DefragmentationEnd(
15099  VmaDefragmentationContext context)
15100 {
15101  vma_delete(this, context);
15102  return VK_SUCCESS;
15103 }
15104 
15105 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
15106 {
15107  if(hAllocation->CanBecomeLost())
15108  {
15109  /*
15110  Warning: This is a carefully designed algorithm.
15111  Do not modify unless you really know what you're doing :)
15112  */
15113  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15114  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15115  for(;;)
15116  {
15117  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15118  {
15119  pAllocationInfo->memoryType = UINT32_MAX;
15120  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
15121  pAllocationInfo->offset = 0;
15122  pAllocationInfo->size = hAllocation->GetSize();
15123  pAllocationInfo->pMappedData = VMA_NULL;
15124  pAllocationInfo->pUserData = hAllocation->GetUserData();
15125  return;
15126  }
15127  else if(localLastUseFrameIndex == localCurrFrameIndex)
15128  {
15129  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15130  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15131  pAllocationInfo->offset = hAllocation->GetOffset();
15132  pAllocationInfo->size = hAllocation->GetSize();
15133  pAllocationInfo->pMappedData = VMA_NULL;
15134  pAllocationInfo->pUserData = hAllocation->GetUserData();
15135  return;
15136  }
15137  else // Last use time earlier than current time.
15138  {
15139  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15140  {
15141  localLastUseFrameIndex = localCurrFrameIndex;
15142  }
15143  }
15144  }
15145  }
15146  else
15147  {
15148 #if VMA_STATS_STRING_ENABLED
15149  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15150  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15151  for(;;)
15152  {
15153  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15154  if(localLastUseFrameIndex == localCurrFrameIndex)
15155  {
15156  break;
15157  }
15158  else // Last use time earlier than current time.
15159  {
15160  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15161  {
15162  localLastUseFrameIndex = localCurrFrameIndex;
15163  }
15164  }
15165  }
15166 #endif
15167 
15168  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15169  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15170  pAllocationInfo->offset = hAllocation->GetOffset();
15171  pAllocationInfo->size = hAllocation->GetSize();
15172  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
15173  pAllocationInfo->pUserData = hAllocation->GetUserData();
15174  }
15175 }
15176 
15177 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
15178 {
15179  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
15180  if(hAllocation->CanBecomeLost())
15181  {
15182  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15183  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15184  for(;;)
15185  {
15186  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15187  {
15188  return false;
15189  }
15190  else if(localLastUseFrameIndex == localCurrFrameIndex)
15191  {
15192  return true;
15193  }
15194  else // Last use time earlier than current time.
15195  {
15196  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15197  {
15198  localLastUseFrameIndex = localCurrFrameIndex;
15199  }
15200  }
15201  }
15202  }
15203  else
15204  {
15205 #if VMA_STATS_STRING_ENABLED
15206  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15207  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15208  for(;;)
15209  {
15210  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15211  if(localLastUseFrameIndex == localCurrFrameIndex)
15212  {
15213  break;
15214  }
15215  else // Last use time earlier than current time.
15216  {
15217  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15218  {
15219  localLastUseFrameIndex = localCurrFrameIndex;
15220  }
15221  }
15222  }
15223 #endif
15224 
15225  return true;
15226  }
15227 }
15228 
15229 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
15230 {
15231  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
15232 
15233  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
15234 
15235  if(newCreateInfo.maxBlockCount == 0)
15236  {
15237  newCreateInfo.maxBlockCount = SIZE_MAX;
15238  }
15239  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
15240  {
15241  return VK_ERROR_INITIALIZATION_FAILED;
15242  }
15243 
15244  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
15245 
15246  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
15247 
15248  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15249  if(res != VK_SUCCESS)
15250  {
15251  vma_delete(this, *pPool);
15252  *pPool = VMA_NULL;
15253  return res;
15254  }
15255 
15256  // Add to m_Pools.
15257  {
15258  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15259  (*pPool)->SetId(m_NextPoolId++);
15260  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
15261  }
15262 
15263  return VK_SUCCESS;
15264 }
15265 
15266 void VmaAllocator_T::DestroyPool(VmaPool pool)
15267 {
15268  // Remove from m_Pools.
15269  {
15270  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15271  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15272  VMA_ASSERT(success && "Pool not found in Allocator.");
15273  }
15274 
15275  vma_delete(this, pool);
15276 }
15277 
15278 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15279 {
15280  pool->m_BlockVector.GetPoolStats(pPoolStats);
15281 }
15282 
15283 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15284 {
15285  m_CurrentFrameIndex.store(frameIndex);
15286 }
15287 
15288 void VmaAllocator_T::MakePoolAllocationsLost(
15289  VmaPool hPool,
15290  size_t* pLostAllocationCount)
15291 {
15292  hPool->m_BlockVector.MakePoolAllocationsLost(
15293  m_CurrentFrameIndex.load(),
15294  pLostAllocationCount);
15295 }
15296 
15297 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15298 {
15299  return hPool->m_BlockVector.CheckCorruption();
15300 }
15301 
15302 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15303 {
15304  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15305 
15306  // Process default pools.
15307  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15308  {
15309  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15310  {
15311  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15312  VMA_ASSERT(pBlockVector);
15313  VkResult localRes = pBlockVector->CheckCorruption();
15314  switch(localRes)
15315  {
15316  case VK_ERROR_FEATURE_NOT_PRESENT:
15317  break;
15318  case VK_SUCCESS:
15319  finalRes = VK_SUCCESS;
15320  break;
15321  default:
15322  return localRes;
15323  }
15324  }
15325  }
15326 
15327  // Process custom pools.
15328  {
15329  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15330  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15331  {
15332  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15333  {
15334  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15335  switch(localRes)
15336  {
15337  case VK_ERROR_FEATURE_NOT_PRESENT:
15338  break;
15339  case VK_SUCCESS:
15340  finalRes = VK_SUCCESS;
15341  break;
15342  default:
15343  return localRes;
15344  }
15345  }
15346  }
15347  }
15348 
15349  return finalRes;
15350 }
15351 
15352 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15353 {
15354  *pAllocation = m_AllocationObjectAllocator.Allocate();
15355  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
15356  (*pAllocation)->InitLost();
15357 }
15358 
15359 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15360 {
15361  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15362 
15363  VkResult res;
15364  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15365  {
15366  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15367  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
15368  {
15369  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15370  if(res == VK_SUCCESS)
15371  {
15372  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
15373  }
15374  }
15375  else
15376  {
15377  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
15378  }
15379  }
15380  else
15381  {
15382  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15383  }
15384 
15385  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15386  {
15387  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15388  }
15389 
15390  return res;
15391 }
15392 
15393 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15394 {
15395  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15396  {
15397  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15398  }
15399 
15400  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15401 
15402  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15403  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15404  {
15405  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15406  m_HeapSizeLimit[heapIndex] += size;
15407  }
15408 }
15409 
15410 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15411 {
15412  if(hAllocation->CanBecomeLost())
15413  {
15414  return VK_ERROR_MEMORY_MAP_FAILED;
15415  }
15416 
15417  switch(hAllocation->GetType())
15418  {
15419  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15420  {
15421  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15422  char *pBytes = VMA_NULL;
15423  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15424  if(res == VK_SUCCESS)
15425  {
15426  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15427  hAllocation->BlockAllocMap();
15428  }
15429  return res;
15430  }
15431  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15432  return hAllocation->DedicatedAllocMap(this, ppData);
15433  default:
15434  VMA_ASSERT(0);
15435  return VK_ERROR_MEMORY_MAP_FAILED;
15436  }
15437 }
15438 
15439 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15440 {
15441  switch(hAllocation->GetType())
15442  {
15443  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15444  {
15445  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15446  hAllocation->BlockAllocUnmap();
15447  pBlock->Unmap(this, 1);
15448  }
15449  break;
15450  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15451  hAllocation->DedicatedAllocUnmap(this);
15452  break;
15453  default:
15454  VMA_ASSERT(0);
15455  }
15456 }
15457 
15458 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
15459 {
15460  VkResult res = VK_SUCCESS;
15461  switch(hAllocation->GetType())
15462  {
15463  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15464  res = GetVulkanFunctions().vkBindBufferMemory(
15465  m_hDevice,
15466  hBuffer,
15467  hAllocation->GetMemory(),
15468  0); //memoryOffset
15469  break;
15470  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15471  {
15472  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15473  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
15474  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
15475  break;
15476  }
15477  default:
15478  VMA_ASSERT(0);
15479  }
15480  return res;
15481 }
15482 
15483 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
15484 {
15485  VkResult res = VK_SUCCESS;
15486  switch(hAllocation->GetType())
15487  {
15488  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15489  res = GetVulkanFunctions().vkBindImageMemory(
15490  m_hDevice,
15491  hImage,
15492  hAllocation->GetMemory(),
15493  0); //memoryOffset
15494  break;
15495  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15496  {
15497  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15498  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
15499  res = pBlock->BindImageMemory(this, hAllocation, hImage);
15500  break;
15501  }
15502  default:
15503  VMA_ASSERT(0);
15504  }
15505  return res;
15506 }
15507 
15508 void VmaAllocator_T::FlushOrInvalidateAllocation(
15509  VmaAllocation hAllocation,
15510  VkDeviceSize offset, VkDeviceSize size,
15511  VMA_CACHE_OPERATION op)
15512 {
15513  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
15514  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15515  {
15516  const VkDeviceSize allocationSize = hAllocation->GetSize();
15517  VMA_ASSERT(offset <= allocationSize);
15518 
15519  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15520 
15521  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
15522  memRange.memory = hAllocation->GetMemory();
15523 
15524  switch(hAllocation->GetType())
15525  {
15526  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15527  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15528  if(size == VK_WHOLE_SIZE)
15529  {
15530  memRange.size = allocationSize - memRange.offset;
15531  }
15532  else
15533  {
15534  VMA_ASSERT(offset + size <= allocationSize);
15535  memRange.size = VMA_MIN(
15536  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
15537  allocationSize - memRange.offset);
15538  }
15539  break;
15540 
15541  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15542  {
15543  // 1. Still within this allocation.
15544  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15545  if(size == VK_WHOLE_SIZE)
15546  {
15547  size = allocationSize - offset;
15548  }
15549  else
15550  {
15551  VMA_ASSERT(offset + size <= allocationSize);
15552  }
15553  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
15554 
15555  // 2. Adjust to whole block.
15556  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
15557  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15558  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
15559  memRange.offset += allocationOffset;
15560  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
15561 
15562  break;
15563  }
15564 
15565  default:
15566  VMA_ASSERT(0);
15567  }
15568 
15569  switch(op)
15570  {
15571  case VMA_CACHE_FLUSH:
15572  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15573  break;
15574  case VMA_CACHE_INVALIDATE:
15575  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15576  break;
15577  default:
15578  VMA_ASSERT(0);
15579  }
15580  }
15581  // else: Just ignore this call.
15582 }
15583 
15584 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
15585 {
15586  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15587 
15588  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15589  {
15590  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15591  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15592  VMA_ASSERT(pDedicatedAllocations);
15593  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
15594  VMA_ASSERT(success);
15595  }
15596 
15597  VkDeviceMemory hMemory = allocation->GetMemory();
15598 
15599  /*
15600  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15601  before vkFreeMemory.
15602 
15603  if(allocation->GetMappedData() != VMA_NULL)
15604  {
15605  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15606  }
15607  */
15608 
15609  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15610 
15611  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15612 }
15613 
15614 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
15615 {
15616  VkBufferCreateInfo dummyBufCreateInfo;
15617  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
15618 
15619  uint32_t memoryTypeBits = 0;
15620 
15621  // Create buffer.
15622  VkBuffer buf = VK_NULL_HANDLE;
15623  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
15624  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
15625  if(res == VK_SUCCESS)
15626  {
15627  // Query for supported memory types.
15628  VkMemoryRequirements memReq;
15629  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
15630  memoryTypeBits = memReq.memoryTypeBits;
15631 
15632  // Destroy buffer.
15633  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
15634  }
15635 
15636  return memoryTypeBits;
15637 }
15638 
15639 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15640 {
15641  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15642  !hAllocation->CanBecomeLost() &&
15643  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15644  {
15645  void* pData = VMA_NULL;
15646  VkResult res = Map(hAllocation, &pData);
15647  if(res == VK_SUCCESS)
15648  {
15649  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15650  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15651  Unmap(hAllocation);
15652  }
15653  else
15654  {
15655  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15656  }
15657  }
15658 }
15659 
15660 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
15661 {
15662  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
15663  if(memoryTypeBits == UINT32_MAX)
15664  {
15665  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
15666  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
15667  }
15668  return memoryTypeBits;
15669 }
15670 
15671 #if VMA_STATS_STRING_ENABLED
15672 
15673 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15674 {
15675  bool dedicatedAllocationsStarted = false;
15676  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15677  {
15678  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15679  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15680  VMA_ASSERT(pDedicatedAllocVector);
15681  if(pDedicatedAllocVector->empty() == false)
15682  {
15683  if(dedicatedAllocationsStarted == false)
15684  {
15685  dedicatedAllocationsStarted = true;
15686  json.WriteString("DedicatedAllocations");
15687  json.BeginObject();
15688  }
15689 
15690  json.BeginString("Type ");
15691  json.ContinueString(memTypeIndex);
15692  json.EndString();
15693 
15694  json.BeginArray();
15695 
15696  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15697  {
15698  json.BeginObject(true);
15699  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
15700  hAlloc->PrintParameters(json);
15701  json.EndObject();
15702  }
15703 
15704  json.EndArray();
15705  }
15706  }
15707  if(dedicatedAllocationsStarted)
15708  {
15709  json.EndObject();
15710  }
15711 
15712  {
15713  bool allocationsStarted = false;
15714  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15715  {
15716  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
15717  {
15718  if(allocationsStarted == false)
15719  {
15720  allocationsStarted = true;
15721  json.WriteString("DefaultPools");
15722  json.BeginObject();
15723  }
15724 
15725  json.BeginString("Type ");
15726  json.ContinueString(memTypeIndex);
15727  json.EndString();
15728 
15729  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15730  }
15731  }
15732  if(allocationsStarted)
15733  {
15734  json.EndObject();
15735  }
15736  }
15737 
15738  // Custom pools
15739  {
15740  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15741  const size_t poolCount = m_Pools.size();
15742  if(poolCount > 0)
15743  {
15744  json.WriteString("Pools");
15745  json.BeginObject();
15746  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15747  {
15748  json.BeginString();
15749  json.ContinueString(m_Pools[poolIndex]->GetId());
15750  json.EndString();
15751 
15752  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15753  }
15754  json.EndObject();
15755  }
15756  }
15757 }
15758 
15759 #endif // #if VMA_STATS_STRING_ENABLED
15760 
15762 // Public interface
15763 
15764 VkResult vmaCreateAllocator(
15765  const VmaAllocatorCreateInfo* pCreateInfo,
15766  VmaAllocator* pAllocator)
15767 {
15768  VMA_ASSERT(pCreateInfo && pAllocator);
15769  VMA_DEBUG_LOG("vmaCreateAllocator");
15770  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
15771  return (*pAllocator)->Init(pCreateInfo);
15772 }
15773 
15774 void vmaDestroyAllocator(
15775  VmaAllocator allocator)
15776 {
15777  if(allocator != VK_NULL_HANDLE)
15778  {
15779  VMA_DEBUG_LOG("vmaDestroyAllocator");
15780  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15781  vma_delete(&allocationCallbacks, allocator);
15782  }
15783 }
15784 
15786  VmaAllocator allocator,
15787  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15788 {
15789  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15790  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15791 }
15792 
15794  VmaAllocator allocator,
15795  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15796 {
15797  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15798  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15799 }
15800 
15802  VmaAllocator allocator,
15803  uint32_t memoryTypeIndex,
15804  VkMemoryPropertyFlags* pFlags)
15805 {
15806  VMA_ASSERT(allocator && pFlags);
15807  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15808  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15809 }
15810 
15812  VmaAllocator allocator,
15813  uint32_t frameIndex)
15814 {
15815  VMA_ASSERT(allocator);
15816  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15817 
15818  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15819 
15820  allocator->SetCurrentFrameIndex(frameIndex);
15821 }
15822 
15823 void vmaCalculateStats(
15824  VmaAllocator allocator,
15825  VmaStats* pStats)
15826 {
15827  VMA_ASSERT(allocator && pStats);
15828  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15829  allocator->CalculateStats(pStats);
15830 }
15831 
15832 #if VMA_STATS_STRING_ENABLED
15833 
15834 void vmaBuildStatsString(
15835  VmaAllocator allocator,
15836  char** ppStatsString,
15837  VkBool32 detailedMap)
15838 {
15839  VMA_ASSERT(allocator && ppStatsString);
15840  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15841 
15842  VmaStringBuilder sb(allocator);
15843  {
15844  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15845  json.BeginObject();
15846 
15847  VmaStats stats;
15848  allocator->CalculateStats(&stats);
15849 
15850  json.WriteString("Total");
15851  VmaPrintStatInfo(json, stats.total);
15852 
15853  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15854  {
15855  json.BeginString("Heap ");
15856  json.ContinueString(heapIndex);
15857  json.EndString();
15858  json.BeginObject();
15859 
15860  json.WriteString("Size");
15861  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15862 
15863  json.WriteString("Flags");
15864  json.BeginArray(true);
15865  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15866  {
15867  json.WriteString("DEVICE_LOCAL");
15868  }
15869  json.EndArray();
15870 
15871  if(stats.memoryHeap[heapIndex].blockCount > 0)
15872  {
15873  json.WriteString("Stats");
15874  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
15875  }
15876 
15877  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15878  {
15879  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15880  {
15881  json.BeginString("Type ");
15882  json.ContinueString(typeIndex);
15883  json.EndString();
15884 
15885  json.BeginObject();
15886 
15887  json.WriteString("Flags");
15888  json.BeginArray(true);
15889  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15890  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15891  {
15892  json.WriteString("DEVICE_LOCAL");
15893  }
15894  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15895  {
15896  json.WriteString("HOST_VISIBLE");
15897  }
15898  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15899  {
15900  json.WriteString("HOST_COHERENT");
15901  }
15902  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15903  {
15904  json.WriteString("HOST_CACHED");
15905  }
15906  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15907  {
15908  json.WriteString("LAZILY_ALLOCATED");
15909  }
15910  json.EndArray();
15911 
15912  if(stats.memoryType[typeIndex].blockCount > 0)
15913  {
15914  json.WriteString("Stats");
15915  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
15916  }
15917 
15918  json.EndObject();
15919  }
15920  }
15921 
15922  json.EndObject();
15923  }
15924  if(detailedMap == VK_TRUE)
15925  {
15926  allocator->PrintDetailedMap(json);
15927  }
15928 
15929  json.EndObject();
15930  }
15931 
15932  const size_t len = sb.GetLength();
15933  char* const pChars = vma_new_array(allocator, char, len + 1);
15934  if(len > 0)
15935  {
15936  memcpy(pChars, sb.GetData(), len);
15937  }
15938  pChars[len] = '\0';
15939  *ppStatsString = pChars;
15940 }
15941 
15942 void vmaFreeStatsString(
15943  VmaAllocator allocator,
15944  char* pStatsString)
15945 {
15946  if(pStatsString != VMA_NULL)
15947  {
15948  VMA_ASSERT(allocator);
15949  size_t len = strlen(pStatsString);
15950  vma_delete_array(allocator, pStatsString, len + 1);
15951  }
15952 }
15953 
15954 #endif // #if VMA_STATS_STRING_ENABLED
15955 
15956 /*
15957 This function is not protected by any mutex because it just reads immutable data.
15958 */
15959 VkResult vmaFindMemoryTypeIndex(
15960  VmaAllocator allocator,
15961  uint32_t memoryTypeBits,
15962  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15963  uint32_t* pMemoryTypeIndex)
15964 {
15965  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15966  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15967  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15968 
15969  if(pAllocationCreateInfo->memoryTypeBits != 0)
15970  {
15971  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
15972  }
15973 
15974  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
15975  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
15976 
15977  // Convert usage to requiredFlags and preferredFlags.
15978  switch(pAllocationCreateInfo->usage)
15979  {
15981  break;
15983  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15984  {
15985  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15986  }
15987  break;
15989  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15990  break;
15992  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15993  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15994  {
15995  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15996  }
15997  break;
15999  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
16000  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
16001  break;
16002  default:
16003  break;
16004  }
16005 
16006  *pMemoryTypeIndex = UINT32_MAX;
16007  uint32_t minCost = UINT32_MAX;
16008  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
16009  memTypeIndex < allocator->GetMemoryTypeCount();
16010  ++memTypeIndex, memTypeBit <<= 1)
16011  {
16012  // This memory type is acceptable according to memoryTypeBits bitmask.
16013  if((memTypeBit & memoryTypeBits) != 0)
16014  {
16015  const VkMemoryPropertyFlags currFlags =
16016  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
16017  // This memory type contains requiredFlags.
16018  if((requiredFlags & ~currFlags) == 0)
16019  {
16020  // Calculate cost as number of bits from preferredFlags not present in this memory type.
16021  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
16022  // Remember memory type with lowest cost.
16023  if(currCost < minCost)
16024  {
16025  *pMemoryTypeIndex = memTypeIndex;
16026  if(currCost == 0)
16027  {
16028  return VK_SUCCESS;
16029  }
16030  minCost = currCost;
16031  }
16032  }
16033  }
16034  }
16035  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
16036 }
16037 
16039  VmaAllocator allocator,
16040  const VkBufferCreateInfo* pBufferCreateInfo,
16041  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16042  uint32_t* pMemoryTypeIndex)
16043 {
16044  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16045  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
16046  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16047  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16048 
16049  const VkDevice hDev = allocator->m_hDevice;
16050  VkBuffer hBuffer = VK_NULL_HANDLE;
16051  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
16052  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
16053  if(res == VK_SUCCESS)
16054  {
16055  VkMemoryRequirements memReq = {};
16056  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
16057  hDev, hBuffer, &memReq);
16058 
16059  res = vmaFindMemoryTypeIndex(
16060  allocator,
16061  memReq.memoryTypeBits,
16062  pAllocationCreateInfo,
16063  pMemoryTypeIndex);
16064 
16065  allocator->GetVulkanFunctions().vkDestroyBuffer(
16066  hDev, hBuffer, allocator->GetAllocationCallbacks());
16067  }
16068  return res;
16069 }
16070 
16072  VmaAllocator allocator,
16073  const VkImageCreateInfo* pImageCreateInfo,
16074  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16075  uint32_t* pMemoryTypeIndex)
16076 {
16077  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16078  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
16079  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16080  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16081 
16082  const VkDevice hDev = allocator->m_hDevice;
16083  VkImage hImage = VK_NULL_HANDLE;
16084  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
16085  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
16086  if(res == VK_SUCCESS)
16087  {
16088  VkMemoryRequirements memReq = {};
16089  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
16090  hDev, hImage, &memReq);
16091 
16092  res = vmaFindMemoryTypeIndex(
16093  allocator,
16094  memReq.memoryTypeBits,
16095  pAllocationCreateInfo,
16096  pMemoryTypeIndex);
16097 
16098  allocator->GetVulkanFunctions().vkDestroyImage(
16099  hDev, hImage, allocator->GetAllocationCallbacks());
16100  }
16101  return res;
16102 }
16103 
16104 VkResult vmaCreatePool(
16105  VmaAllocator allocator,
16106  const VmaPoolCreateInfo* pCreateInfo,
16107  VmaPool* pPool)
16108 {
16109  VMA_ASSERT(allocator && pCreateInfo && pPool);
16110 
16111  VMA_DEBUG_LOG("vmaCreatePool");
16112 
16113  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16114 
16115  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
16116 
16117 #if VMA_RECORDING_ENABLED
16118  if(allocator->GetRecorder() != VMA_NULL)
16119  {
16120  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
16121  }
16122 #endif
16123 
16124  return res;
16125 }
16126 
16127 void vmaDestroyPool(
16128  VmaAllocator allocator,
16129  VmaPool pool)
16130 {
16131  VMA_ASSERT(allocator);
16132 
16133  if(pool == VK_NULL_HANDLE)
16134  {
16135  return;
16136  }
16137 
16138  VMA_DEBUG_LOG("vmaDestroyPool");
16139 
16140  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16141 
16142 #if VMA_RECORDING_ENABLED
16143  if(allocator->GetRecorder() != VMA_NULL)
16144  {
16145  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
16146  }
16147 #endif
16148 
16149  allocator->DestroyPool(pool);
16150 }
16151 
16152 void vmaGetPoolStats(
16153  VmaAllocator allocator,
16154  VmaPool pool,
16155  VmaPoolStats* pPoolStats)
16156 {
16157  VMA_ASSERT(allocator && pool && pPoolStats);
16158 
16159  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16160 
16161  allocator->GetPoolStats(pool, pPoolStats);
16162 }
16163 
16165  VmaAllocator allocator,
16166  VmaPool pool,
16167  size_t* pLostAllocationCount)
16168 {
16169  VMA_ASSERT(allocator && pool);
16170 
16171  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16172 
16173 #if VMA_RECORDING_ENABLED
16174  if(allocator->GetRecorder() != VMA_NULL)
16175  {
16176  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
16177  }
16178 #endif
16179 
16180  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
16181 }
16182 
16183 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
16184 {
16185  VMA_ASSERT(allocator && pool);
16186 
16187  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16188 
16189  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
16190 
16191  return allocator->CheckPoolCorruption(pool);
16192 }
16193 
16194 VkResult vmaAllocateMemory(
16195  VmaAllocator allocator,
16196  const VkMemoryRequirements* pVkMemoryRequirements,
16197  const VmaAllocationCreateInfo* pCreateInfo,
16198  VmaAllocation* pAllocation,
16199  VmaAllocationInfo* pAllocationInfo)
16200 {
16201  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
16202 
16203  VMA_DEBUG_LOG("vmaAllocateMemory");
16204 
16205  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16206 
16207  VkResult result = allocator->AllocateMemory(
16208  *pVkMemoryRequirements,
16209  false, // requiresDedicatedAllocation
16210  false, // prefersDedicatedAllocation
16211  VK_NULL_HANDLE, // dedicatedBuffer
16212  VK_NULL_HANDLE, // dedicatedImage
16213  *pCreateInfo,
16214  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16215  1, // allocationCount
16216  pAllocation);
16217 
16218 #if VMA_RECORDING_ENABLED
16219  if(allocator->GetRecorder() != VMA_NULL)
16220  {
16221  allocator->GetRecorder()->RecordAllocateMemory(
16222  allocator->GetCurrentFrameIndex(),
16223  *pVkMemoryRequirements,
16224  *pCreateInfo,
16225  *pAllocation);
16226  }
16227 #endif
16228 
16229  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16230  {
16231  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16232  }
16233 
16234  return result;
16235 }
16236 
16237 VkResult vmaAllocateMemoryPages(
16238  VmaAllocator allocator,
16239  const VkMemoryRequirements* pVkMemoryRequirements,
16240  const VmaAllocationCreateInfo* pCreateInfo,
16241  size_t allocationCount,
16242  VmaAllocation* pAllocations,
16243  VmaAllocationInfo* pAllocationInfo)
16244 {
16245  if(allocationCount == 0)
16246  {
16247  return VK_SUCCESS;
16248  }
16249 
16250  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
16251 
16252  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
16253 
16254  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16255 
16256  VkResult result = allocator->AllocateMemory(
16257  *pVkMemoryRequirements,
16258  false, // requiresDedicatedAllocation
16259  false, // prefersDedicatedAllocation
16260  VK_NULL_HANDLE, // dedicatedBuffer
16261  VK_NULL_HANDLE, // dedicatedImage
16262  *pCreateInfo,
16263  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16264  allocationCount,
16265  pAllocations);
16266 
16267 #if VMA_RECORDING_ENABLED
16268  if(allocator->GetRecorder() != VMA_NULL)
16269  {
16270  allocator->GetRecorder()->RecordAllocateMemoryPages(
16271  allocator->GetCurrentFrameIndex(),
16272  *pVkMemoryRequirements,
16273  *pCreateInfo,
16274  (uint64_t)allocationCount,
16275  pAllocations);
16276  }
16277 #endif
16278 
16279  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16280  {
16281  for(size_t i = 0; i < allocationCount; ++i)
16282  {
16283  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16284  }
16285  }
16286 
16287  return result;
16288 }
16289 
16291  VmaAllocator allocator,
16292  VkBuffer buffer,
16293  const VmaAllocationCreateInfo* pCreateInfo,
16294  VmaAllocation* pAllocation,
16295  VmaAllocationInfo* pAllocationInfo)
16296 {
16297  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16298 
16299  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16300 
16301  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16302 
16303  VkMemoryRequirements vkMemReq = {};
16304  bool requiresDedicatedAllocation = false;
16305  bool prefersDedicatedAllocation = false;
16306  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16307  requiresDedicatedAllocation,
16308  prefersDedicatedAllocation);
16309 
16310  VkResult result = allocator->AllocateMemory(
16311  vkMemReq,
16312  requiresDedicatedAllocation,
16313  prefersDedicatedAllocation,
16314  buffer, // dedicatedBuffer
16315  VK_NULL_HANDLE, // dedicatedImage
16316  *pCreateInfo,
16317  VMA_SUBALLOCATION_TYPE_BUFFER,
16318  1, // allocationCount
16319  pAllocation);
16320 
16321 #if VMA_RECORDING_ENABLED
16322  if(allocator->GetRecorder() != VMA_NULL)
16323  {
16324  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16325  allocator->GetCurrentFrameIndex(),
16326  vkMemReq,
16327  requiresDedicatedAllocation,
16328  prefersDedicatedAllocation,
16329  *pCreateInfo,
16330  *pAllocation);
16331  }
16332 #endif
16333 
16334  if(pAllocationInfo && result == VK_SUCCESS)
16335  {
16336  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16337  }
16338 
16339  return result;
16340 }
16341 
16342 VkResult vmaAllocateMemoryForImage(
16343  VmaAllocator allocator,
16344  VkImage image,
16345  const VmaAllocationCreateInfo* pCreateInfo,
16346  VmaAllocation* pAllocation,
16347  VmaAllocationInfo* pAllocationInfo)
16348 {
16349  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16350 
16351  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
16352 
16353  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16354 
16355  VkMemoryRequirements vkMemReq = {};
16356  bool requiresDedicatedAllocation = false;
16357  bool prefersDedicatedAllocation = false;
16358  allocator->GetImageMemoryRequirements(image, vkMemReq,
16359  requiresDedicatedAllocation, prefersDedicatedAllocation);
16360 
16361  VkResult result = allocator->AllocateMemory(
16362  vkMemReq,
16363  requiresDedicatedAllocation,
16364  prefersDedicatedAllocation,
16365  VK_NULL_HANDLE, // dedicatedBuffer
16366  image, // dedicatedImage
16367  *pCreateInfo,
16368  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16369  1, // allocationCount
16370  pAllocation);
16371 
16372 #if VMA_RECORDING_ENABLED
16373  if(allocator->GetRecorder() != VMA_NULL)
16374  {
16375  allocator->GetRecorder()->RecordAllocateMemoryForImage(
16376  allocator->GetCurrentFrameIndex(),
16377  vkMemReq,
16378  requiresDedicatedAllocation,
16379  prefersDedicatedAllocation,
16380  *pCreateInfo,
16381  *pAllocation);
16382  }
16383 #endif
16384 
16385  if(pAllocationInfo && result == VK_SUCCESS)
16386  {
16387  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16388  }
16389 
16390  return result;
16391 }
16392 
16393 void vmaFreeMemory(
16394  VmaAllocator allocator,
16395  VmaAllocation allocation)
16396 {
16397  VMA_ASSERT(allocator);
16398 
16399  if(allocation == VK_NULL_HANDLE)
16400  {
16401  return;
16402  }
16403 
16404  VMA_DEBUG_LOG("vmaFreeMemory");
16405 
16406  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16407 
16408 #if VMA_RECORDING_ENABLED
16409  if(allocator->GetRecorder() != VMA_NULL)
16410  {
16411  allocator->GetRecorder()->RecordFreeMemory(
16412  allocator->GetCurrentFrameIndex(),
16413  allocation);
16414  }
16415 #endif
16416 
16417  allocator->FreeMemory(
16418  1, // allocationCount
16419  &allocation);
16420 }
16421 
16422 void vmaFreeMemoryPages(
16423  VmaAllocator allocator,
16424  size_t allocationCount,
16425  VmaAllocation* pAllocations)
16426 {
16427  if(allocationCount == 0)
16428  {
16429  return;
16430  }
16431 
16432  VMA_ASSERT(allocator);
16433 
16434  VMA_DEBUG_LOG("vmaFreeMemoryPages");
16435 
16436  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16437 
16438 #if VMA_RECORDING_ENABLED
16439  if(allocator->GetRecorder() != VMA_NULL)
16440  {
16441  allocator->GetRecorder()->RecordFreeMemoryPages(
16442  allocator->GetCurrentFrameIndex(),
16443  (uint64_t)allocationCount,
16444  pAllocations);
16445  }
16446 #endif
16447 
16448  allocator->FreeMemory(allocationCount, pAllocations);
16449 }
16450 
16451 VkResult vmaResizeAllocation(
16452  VmaAllocator allocator,
16453  VmaAllocation allocation,
16454  VkDeviceSize newSize)
16455 {
16456  VMA_ASSERT(allocator && allocation);
16457 
16458  VMA_DEBUG_LOG("vmaResizeAllocation");
16459 
16460  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16461 
16462 #if VMA_RECORDING_ENABLED
16463  if(allocator->GetRecorder() != VMA_NULL)
16464  {
16465  allocator->GetRecorder()->RecordResizeAllocation(
16466  allocator->GetCurrentFrameIndex(),
16467  allocation,
16468  newSize);
16469  }
16470 #endif
16471 
16472  return allocator->ResizeAllocation(allocation, newSize);
16473 }
16474 
16476  VmaAllocator allocator,
16477  VmaAllocation allocation,
16478  VmaAllocationInfo* pAllocationInfo)
16479 {
16480  VMA_ASSERT(allocator && allocation && pAllocationInfo);
16481 
16482  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16483 
16484 #if VMA_RECORDING_ENABLED
16485  if(allocator->GetRecorder() != VMA_NULL)
16486  {
16487  allocator->GetRecorder()->RecordGetAllocationInfo(
16488  allocator->GetCurrentFrameIndex(),
16489  allocation);
16490  }
16491 #endif
16492 
16493  allocator->GetAllocationInfo(allocation, pAllocationInfo);
16494 }
16495 
16496 VkBool32 vmaTouchAllocation(
16497  VmaAllocator allocator,
16498  VmaAllocation allocation)
16499 {
16500  VMA_ASSERT(allocator && allocation);
16501 
16502  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16503 
16504 #if VMA_RECORDING_ENABLED
16505  if(allocator->GetRecorder() != VMA_NULL)
16506  {
16507  allocator->GetRecorder()->RecordTouchAllocation(
16508  allocator->GetCurrentFrameIndex(),
16509  allocation);
16510  }
16511 #endif
16512 
16513  return allocator->TouchAllocation(allocation);
16514 }
16515 
16517  VmaAllocator allocator,
16518  VmaAllocation allocation,
16519  void* pUserData)
16520 {
16521  VMA_ASSERT(allocator && allocation);
16522 
16523  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16524 
16525  allocation->SetUserData(allocator, pUserData);
16526 
16527 #if VMA_RECORDING_ENABLED
16528  if(allocator->GetRecorder() != VMA_NULL)
16529  {
16530  allocator->GetRecorder()->RecordSetAllocationUserData(
16531  allocator->GetCurrentFrameIndex(),
16532  allocation,
16533  pUserData);
16534  }
16535 #endif
16536 }
16537 
16539  VmaAllocator allocator,
16540  VmaAllocation* pAllocation)
16541 {
16542  VMA_ASSERT(allocator && pAllocation);
16543 
16544  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
16545 
16546  allocator->CreateLostAllocation(pAllocation);
16547 
16548 #if VMA_RECORDING_ENABLED
16549  if(allocator->GetRecorder() != VMA_NULL)
16550  {
16551  allocator->GetRecorder()->RecordCreateLostAllocation(
16552  allocator->GetCurrentFrameIndex(),
16553  *pAllocation);
16554  }
16555 #endif
16556 }
16557 
16558 VkResult vmaMapMemory(
16559  VmaAllocator allocator,
16560  VmaAllocation allocation,
16561  void** ppData)
16562 {
16563  VMA_ASSERT(allocator && allocation && ppData);
16564 
16565  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16566 
16567  VkResult res = allocator->Map(allocation, ppData);
16568 
16569 #if VMA_RECORDING_ENABLED
16570  if(allocator->GetRecorder() != VMA_NULL)
16571  {
16572  allocator->GetRecorder()->RecordMapMemory(
16573  allocator->GetCurrentFrameIndex(),
16574  allocation);
16575  }
16576 #endif
16577 
16578  return res;
16579 }
16580 
16581 void vmaUnmapMemory(
16582  VmaAllocator allocator,
16583  VmaAllocation allocation)
16584 {
16585  VMA_ASSERT(allocator && allocation);
16586 
16587  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16588 
16589 #if VMA_RECORDING_ENABLED
16590  if(allocator->GetRecorder() != VMA_NULL)
16591  {
16592  allocator->GetRecorder()->RecordUnmapMemory(
16593  allocator->GetCurrentFrameIndex(),
16594  allocation);
16595  }
16596 #endif
16597 
16598  allocator->Unmap(allocation);
16599 }
16600 
16601 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16602 {
16603  VMA_ASSERT(allocator && allocation);
16604 
16605  VMA_DEBUG_LOG("vmaFlushAllocation");
16606 
16607  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16608 
16609  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
16610 
16611 #if VMA_RECORDING_ENABLED
16612  if(allocator->GetRecorder() != VMA_NULL)
16613  {
16614  allocator->GetRecorder()->RecordFlushAllocation(
16615  allocator->GetCurrentFrameIndex(),
16616  allocation, offset, size);
16617  }
16618 #endif
16619 }
16620 
16621 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16622 {
16623  VMA_ASSERT(allocator && allocation);
16624 
16625  VMA_DEBUG_LOG("vmaInvalidateAllocation");
16626 
16627  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16628 
16629  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
16630 
16631 #if VMA_RECORDING_ENABLED
16632  if(allocator->GetRecorder() != VMA_NULL)
16633  {
16634  allocator->GetRecorder()->RecordInvalidateAllocation(
16635  allocator->GetCurrentFrameIndex(),
16636  allocation, offset, size);
16637  }
16638 #endif
16639 }
16640 
16641 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
16642 {
16643  VMA_ASSERT(allocator);
16644 
16645  VMA_DEBUG_LOG("vmaCheckCorruption");
16646 
16647  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16648 
16649  return allocator->CheckCorruption(memoryTypeBits);
16650 }
16651 
16652 VkResult vmaDefragment(
16653  VmaAllocator allocator,
16654  VmaAllocation* pAllocations,
16655  size_t allocationCount,
16656  VkBool32* pAllocationsChanged,
16657  const VmaDefragmentationInfo *pDefragmentationInfo,
16658  VmaDefragmentationStats* pDefragmentationStats)
16659 {
16660  // Deprecated interface, reimplemented using new one.
16661 
16662  VmaDefragmentationInfo2 info2 = {};
16663  info2.allocationCount = (uint32_t)allocationCount;
16664  info2.pAllocations = pAllocations;
16665  info2.pAllocationsChanged = pAllocationsChanged;
16666  if(pDefragmentationInfo != VMA_NULL)
16667  {
16668  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
16669  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
16670  }
16671  else
16672  {
16673  info2.maxCpuAllocationsToMove = UINT32_MAX;
16674  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
16675  }
16676  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
16677 
16679  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
16680  if(res == VK_NOT_READY)
16681  {
16682  res = vmaDefragmentationEnd( allocator, ctx);
16683  }
16684  return res;
16685 }
16686 
16687 VkResult vmaDefragmentationBegin(
16688  VmaAllocator allocator,
16689  const VmaDefragmentationInfo2* pInfo,
16690  VmaDefragmentationStats* pStats,
16691  VmaDefragmentationContext *pContext)
16692 {
16693  VMA_ASSERT(allocator && pInfo && pContext);
16694 
16695  // Degenerate case: Nothing to defragment.
16696  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
16697  {
16698  return VK_SUCCESS;
16699  }
16700 
16701  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
16702  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
16703  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
16704  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
16705 
16706  VMA_DEBUG_LOG("vmaDefragmentationBegin");
16707 
16708  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16709 
16710  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
16711 
16712 #if VMA_RECORDING_ENABLED
16713  if(allocator->GetRecorder() != VMA_NULL)
16714  {
16715  allocator->GetRecorder()->RecordDefragmentationBegin(
16716  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
16717  }
16718 #endif
16719 
16720  return res;
16721 }
16722 
16723 VkResult vmaDefragmentationEnd(
16724  VmaAllocator allocator,
16725  VmaDefragmentationContext context)
16726 {
16727  VMA_ASSERT(allocator);
16728 
16729  VMA_DEBUG_LOG("vmaDefragmentationEnd");
16730 
16731  if(context != VK_NULL_HANDLE)
16732  {
16733  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16734 
16735 #if VMA_RECORDING_ENABLED
16736  if(allocator->GetRecorder() != VMA_NULL)
16737  {
16738  allocator->GetRecorder()->RecordDefragmentationEnd(
16739  allocator->GetCurrentFrameIndex(), context);
16740  }
16741 #endif
16742 
16743  return allocator->DefragmentationEnd(context);
16744  }
16745  else
16746  {
16747  return VK_SUCCESS;
16748  }
16749 }
16750 
16751 VkResult vmaBindBufferMemory(
16752  VmaAllocator allocator,
16753  VmaAllocation allocation,
16754  VkBuffer buffer)
16755 {
16756  VMA_ASSERT(allocator && allocation && buffer);
16757 
16758  VMA_DEBUG_LOG("vmaBindBufferMemory");
16759 
16760  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16761 
16762  return allocator->BindBufferMemory(allocation, buffer);
16763 }
16764 
16765 VkResult vmaBindImageMemory(
16766  VmaAllocator allocator,
16767  VmaAllocation allocation,
16768  VkImage image)
16769 {
16770  VMA_ASSERT(allocator && allocation && image);
16771 
16772  VMA_DEBUG_LOG("vmaBindImageMemory");
16773 
16774  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16775 
16776  return allocator->BindImageMemory(allocation, image);
16777 }
16778 
16779 VkResult vmaCreateBuffer(
16780  VmaAllocator allocator,
16781  const VkBufferCreateInfo* pBufferCreateInfo,
16782  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16783  VkBuffer* pBuffer,
16784  VmaAllocation* pAllocation,
16785  VmaAllocationInfo* pAllocationInfo)
16786 {
16787  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16788 
16789  if(pBufferCreateInfo->size == 0)
16790  {
16791  return VK_ERROR_VALIDATION_FAILED_EXT;
16792  }
16793 
16794  VMA_DEBUG_LOG("vmaCreateBuffer");
16795 
16796  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16797 
16798  *pBuffer = VK_NULL_HANDLE;
16799  *pAllocation = VK_NULL_HANDLE;
16800 
16801  // 1. Create VkBuffer.
16802  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16803  allocator->m_hDevice,
16804  pBufferCreateInfo,
16805  allocator->GetAllocationCallbacks(),
16806  pBuffer);
16807  if(res >= 0)
16808  {
16809  // 2. vkGetBufferMemoryRequirements.
16810  VkMemoryRequirements vkMemReq = {};
16811  bool requiresDedicatedAllocation = false;
16812  bool prefersDedicatedAllocation = false;
16813  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16814  requiresDedicatedAllocation, prefersDedicatedAllocation);
16815 
16816  // Make sure alignment requirements for specific buffer usages reported
16817  // in Physical Device Properties are included in alignment reported by memory requirements.
16818  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16819  {
16820  VMA_ASSERT(vkMemReq.alignment %
16821  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16822  }
16823  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16824  {
16825  VMA_ASSERT(vkMemReq.alignment %
16826  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16827  }
16828  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16829  {
16830  VMA_ASSERT(vkMemReq.alignment %
16831  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16832  }
16833 
16834  // 3. Allocate memory using allocator.
16835  res = allocator->AllocateMemory(
16836  vkMemReq,
16837  requiresDedicatedAllocation,
16838  prefersDedicatedAllocation,
16839  *pBuffer, // dedicatedBuffer
16840  VK_NULL_HANDLE, // dedicatedImage
16841  *pAllocationCreateInfo,
16842  VMA_SUBALLOCATION_TYPE_BUFFER,
16843  1, // allocationCount
16844  pAllocation);
16845 
16846 #if VMA_RECORDING_ENABLED
16847  if(allocator->GetRecorder() != VMA_NULL)
16848  {
16849  allocator->GetRecorder()->RecordCreateBuffer(
16850  allocator->GetCurrentFrameIndex(),
16851  *pBufferCreateInfo,
16852  *pAllocationCreateInfo,
16853  *pAllocation);
16854  }
16855 #endif
16856 
16857  if(res >= 0)
16858  {
16859  // 3. Bind buffer with memory.
16860  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
16861  {
16862  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
16863  }
16864  if(res >= 0)
16865  {
16866  // All steps succeeded.
16867  #if VMA_STATS_STRING_ENABLED
16868  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16869  #endif
16870  if(pAllocationInfo != VMA_NULL)
16871  {
16872  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16873  }
16874 
16875  return VK_SUCCESS;
16876  }
16877  allocator->FreeMemory(
16878  1, // allocationCount
16879  pAllocation);
16880  *pAllocation = VK_NULL_HANDLE;
16881  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16882  *pBuffer = VK_NULL_HANDLE;
16883  return res;
16884  }
16885  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16886  *pBuffer = VK_NULL_HANDLE;
16887  return res;
16888  }
16889  return res;
16890 }
16891 
16892 void vmaDestroyBuffer(
16893  VmaAllocator allocator,
16894  VkBuffer buffer,
16895  VmaAllocation allocation)
16896 {
16897  VMA_ASSERT(allocator);
16898 
16899  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16900  {
16901  return;
16902  }
16903 
16904  VMA_DEBUG_LOG("vmaDestroyBuffer");
16905 
16906  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16907 
16908 #if VMA_RECORDING_ENABLED
16909  if(allocator->GetRecorder() != VMA_NULL)
16910  {
16911  allocator->GetRecorder()->RecordDestroyBuffer(
16912  allocator->GetCurrentFrameIndex(),
16913  allocation);
16914  }
16915 #endif
16916 
16917  if(buffer != VK_NULL_HANDLE)
16918  {
16919  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16920  }
16921 
16922  if(allocation != VK_NULL_HANDLE)
16923  {
16924  allocator->FreeMemory(
16925  1, // allocationCount
16926  &allocation);
16927  }
16928 }
16929 
16930 VkResult vmaCreateImage(
16931  VmaAllocator allocator,
16932  const VkImageCreateInfo* pImageCreateInfo,
16933  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16934  VkImage* pImage,
16935  VmaAllocation* pAllocation,
16936  VmaAllocationInfo* pAllocationInfo)
16937 {
16938  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16939 
16940  if(pImageCreateInfo->extent.width == 0 ||
16941  pImageCreateInfo->extent.height == 0 ||
16942  pImageCreateInfo->extent.depth == 0 ||
16943  pImageCreateInfo->mipLevels == 0 ||
16944  pImageCreateInfo->arrayLayers == 0)
16945  {
16946  return VK_ERROR_VALIDATION_FAILED_EXT;
16947  }
16948 
16949  VMA_DEBUG_LOG("vmaCreateImage");
16950 
16951  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16952 
16953  *pImage = VK_NULL_HANDLE;
16954  *pAllocation = VK_NULL_HANDLE;
16955 
16956  // 1. Create VkImage.
16957  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16958  allocator->m_hDevice,
16959  pImageCreateInfo,
16960  allocator->GetAllocationCallbacks(),
16961  pImage);
16962  if(res >= 0)
16963  {
16964  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16965  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16966  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16967 
16968  // 2. Allocate memory using allocator.
16969  VkMemoryRequirements vkMemReq = {};
16970  bool requiresDedicatedAllocation = false;
16971  bool prefersDedicatedAllocation = false;
16972  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16973  requiresDedicatedAllocation, prefersDedicatedAllocation);
16974 
16975  res = allocator->AllocateMemory(
16976  vkMemReq,
16977  requiresDedicatedAllocation,
16978  prefersDedicatedAllocation,
16979  VK_NULL_HANDLE, // dedicatedBuffer
16980  *pImage, // dedicatedImage
16981  *pAllocationCreateInfo,
16982  suballocType,
16983  1, // allocationCount
16984  pAllocation);
16985 
16986 #if VMA_RECORDING_ENABLED
16987  if(allocator->GetRecorder() != VMA_NULL)
16988  {
16989  allocator->GetRecorder()->RecordCreateImage(
16990  allocator->GetCurrentFrameIndex(),
16991  *pImageCreateInfo,
16992  *pAllocationCreateInfo,
16993  *pAllocation);
16994  }
16995 #endif
16996 
16997  if(res >= 0)
16998  {
16999  // 3. Bind image with memory.
17000  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
17001  {
17002  res = allocator->BindImageMemory(*pAllocation, *pImage);
17003  }
17004  if(res >= 0)
17005  {
17006  // All steps succeeded.
17007  #if VMA_STATS_STRING_ENABLED
17008  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
17009  #endif
17010  if(pAllocationInfo != VMA_NULL)
17011  {
17012  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17013  }
17014 
17015  return VK_SUCCESS;
17016  }
17017  allocator->FreeMemory(
17018  1, // allocationCount
17019  pAllocation);
17020  *pAllocation = VK_NULL_HANDLE;
17021  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17022  *pImage = VK_NULL_HANDLE;
17023  return res;
17024  }
17025  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17026  *pImage = VK_NULL_HANDLE;
17027  return res;
17028  }
17029  return res;
17030 }
17031 
17032 void vmaDestroyImage(
17033  VmaAllocator allocator,
17034  VkImage image,
17035  VmaAllocation allocation)
17036 {
17037  VMA_ASSERT(allocator);
17038 
17039  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
17040  {
17041  return;
17042  }
17043 
17044  VMA_DEBUG_LOG("vmaDestroyImage");
17045 
17046  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17047 
17048 #if VMA_RECORDING_ENABLED
17049  if(allocator->GetRecorder() != VMA_NULL)
17050  {
17051  allocator->GetRecorder()->RecordDestroyImage(
17052  allocator->GetCurrentFrameIndex(),
17053  allocation);
17054  }
17055 #endif
17056 
17057  if(image != VK_NULL_HANDLE)
17058  {
17059  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
17060  }
17061  if(allocation != VK_NULL_HANDLE)
17062  {
17063  allocator->FreeMemory(
17064  1, // allocationCount
17065  &allocation);
17066  }
17067 }
17068 
17069 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1786
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2086
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1844
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:2897
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1818
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2417
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1798
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2048
Definition: vk_mem_alloc.h:2152
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:2850
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1790
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2517
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1841
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2933
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2306
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1685
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2398
Definition: vk_mem_alloc.h:2123
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:2853
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1779
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2205
Definition: vk_mem_alloc.h:2075
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1853
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2334
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
Definition: vk_mem_alloc.h:1907
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1838
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2079
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1979
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1795
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:2887
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1978
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2937
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1870
VmaStatInfo total
Definition: vk_mem_alloc.h:1988
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2945
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2189
Definition: vk_mem_alloc.h:2147
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2928
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1796
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1721
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1847
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2348
Definition: vk_mem_alloc.h:2342
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:1802
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1914
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2527
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1791
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1816
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2226
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2368
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2404
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1777
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2351
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2902
VmaMemoryUsage
Definition: vk_mem_alloc.h:2026
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:2862
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2923
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:2941
Definition: vk_mem_alloc.h:2065
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2213
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1794
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1984
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1727
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:2841
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
Definition: vk_mem_alloc.h:2839
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:2868
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1748
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1820
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1753
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2943
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2200
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2414
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1787
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1967
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:2363
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1740
Definition: vk_mem_alloc.h:2338
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2130
Represents Opaque object that represents started defragmentation process.
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1980
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1744
Definition: vk_mem_alloc.h:2163
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2354
Definition: vk_mem_alloc.h:2074
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1793
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2195
Definition: vk_mem_alloc.h:2186
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1970
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1789
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2376
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1856
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2407
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2184
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2892
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2219
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1895
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1986
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2110
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1979
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1800
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1826
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:2838
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:2916
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1742
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1799
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2390
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1792
Definition: vk_mem_alloc.h:2141
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1834
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2541
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:1850
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1979
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1976
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2395
Parameters for defragmentation.
Definition: vk_mem_alloc.h:2847
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
Definition: vk_mem_alloc.h:2156
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:2522
Definition: vk_mem_alloc.h:2170
Definition: vk_mem_alloc.h:2182
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2939
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1785
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1974
Definition: vk_mem_alloc.h:2031
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2344
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1823
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1972
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1797
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1801
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2097
Definition: vk_mem_alloc.h:2177
Definition: vk_mem_alloc.h:2058
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2536
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1775
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1788
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2323
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Tries to resize an allocation in place, if there is enough free memory after it.
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2503
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2167
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2288
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1980
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1810
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1987
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2401
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1980
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:2907
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2508
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:2871