Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1685 /*
1686 Define this macro to 0/1 to disable/enable support for recording functionality,
1687 available through VmaAllocatorCreateInfo::pRecordSettings.
1688 */
1689 #ifndef VMA_RECORDING_ENABLED
1690  #define VMA_RECORDING_ENABLED 0
1691 #endif
1692 
1693 #ifndef NOMINMAX
1694  #define NOMINMAX // For windows.h
1695 #endif
1696 
1697 #ifndef VULKAN_H_
1698  #include <vulkan/vulkan.h>
1699 #endif
1700 
1701 #if VMA_RECORDING_ENABLED
1702  #include <windows.h>
1703 #endif
1704 
1705 #if !defined(VMA_DEDICATED_ALLOCATION)
1706  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1707  #define VMA_DEDICATED_ALLOCATION 1
1708  #else
1709  #define VMA_DEDICATED_ALLOCATION 0
1710  #endif
1711 #endif
1712 
1713 #if !defined(VMA_BIND_MEMORY2)
1714  #if VK_KHR_bind_memory2
1715  #define VMA_BIND_MEMORY2 1
1716  #else
1717  #define VMA_BIND_MEMORY2 0
1718  #endif
1719 #endif
1720 
1730 VK_DEFINE_HANDLE(VmaAllocator)
1731 
1732 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1734  VmaAllocator allocator,
1735  uint32_t memoryType,
1736  VkDeviceMemory memory,
1737  VkDeviceSize size);
1739 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1740  VmaAllocator allocator,
1741  uint32_t memoryType,
1742  VkDeviceMemory memory,
1743  VkDeviceSize size);
1744 
1758 
1800 
1803 typedef VkFlags VmaAllocatorCreateFlags;
1804 
1809 typedef struct VmaVulkanFunctions {
1810  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1811  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1812  PFN_vkAllocateMemory vkAllocateMemory;
1813  PFN_vkFreeMemory vkFreeMemory;
1814  PFN_vkMapMemory vkMapMemory;
1815  PFN_vkUnmapMemory vkUnmapMemory;
1816  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1817  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1818  PFN_vkBindBufferMemory vkBindBufferMemory;
1819  PFN_vkBindImageMemory vkBindImageMemory;
1820  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1821  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1822  PFN_vkCreateBuffer vkCreateBuffer;
1823  PFN_vkDestroyBuffer vkDestroyBuffer;
1824  PFN_vkCreateImage vkCreateImage;
1825  PFN_vkDestroyImage vkDestroyImage;
1826  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1827 #if VMA_DEDICATED_ALLOCATION
1828  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1829  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1830 #endif
1831 #if VMA_BIND_MEMORY2
1832  PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;
1833  PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;
1834 #endif
1836 
1838 typedef enum VmaRecordFlagBits {
1845 
1848 typedef VkFlags VmaRecordFlags;
1849 
1851 typedef struct VmaRecordSettings
1852 {
1862  const char* pFilePath;
1864 
1867 {
1871 
1872  VkPhysicalDevice physicalDevice;
1874 
1875  VkDevice device;
1877 
1880 
1881  const VkAllocationCallbacks* pAllocationCallbacks;
1883 
1923  const VkDeviceSize* pHeapSizeLimit;
1944 
1946 VkResult vmaCreateAllocator(
1947  const VmaAllocatorCreateInfo* pCreateInfo,
1948  VmaAllocator* pAllocator);
1949 
1951 void vmaDestroyAllocator(
1952  VmaAllocator allocator);
1953 
1959  VmaAllocator allocator,
1960  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1961 
1967  VmaAllocator allocator,
1968  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1969 
1977  VmaAllocator allocator,
1978  uint32_t memoryTypeIndex,
1979  VkMemoryPropertyFlags* pFlags);
1980 
1990  VmaAllocator allocator,
1991  uint32_t frameIndex);
1992 
1995 typedef struct VmaStatInfo
1996 {
1998  uint32_t blockCount;
2004  VkDeviceSize usedBytes;
2006  VkDeviceSize unusedBytes;
2009 } VmaStatInfo;
2010 
2012 typedef struct VmaStats
2013 {
2014  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
2015  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
2017 } VmaStats;
2018 
2020 void vmaCalculateStats(
2021  VmaAllocator allocator,
2022  VmaStats* pStats);
2023 
2024 #ifndef VMA_STATS_STRING_ENABLED
2025 #define VMA_STATS_STRING_ENABLED 1
2026 #endif
2027 
2028 #if VMA_STATS_STRING_ENABLED
2029 
2031 
2033 void vmaBuildStatsString(
2034  VmaAllocator allocator,
2035  char** ppStatsString,
2036  VkBool32 detailedMap);
2037 
2038 void vmaFreeStatsString(
2039  VmaAllocator allocator,
2040  char* pStatsString);
2041 
2042 #endif // #if VMA_STATS_STRING_ENABLED
2043 
2052 VK_DEFINE_HANDLE(VmaPool)
2053 
2054 typedef enum VmaMemoryUsage
2055 {
2104 } VmaMemoryUsage;
2105 
2115 
2176 
2192 
2202 
2209 
2213 
2215 {
2228  VkMemoryPropertyFlags requiredFlags;
2233  VkMemoryPropertyFlags preferredFlags;
2241  uint32_t memoryTypeBits;
2254  void* pUserData;
2256 
2273 VkResult vmaFindMemoryTypeIndex(
2274  VmaAllocator allocator,
2275  uint32_t memoryTypeBits,
2276  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2277  uint32_t* pMemoryTypeIndex);
2278 
2292  VmaAllocator allocator,
2293  const VkBufferCreateInfo* pBufferCreateInfo,
2294  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2295  uint32_t* pMemoryTypeIndex);
2296 
2310  VmaAllocator allocator,
2311  const VkImageCreateInfo* pImageCreateInfo,
2312  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2313  uint32_t* pMemoryTypeIndex);
2314 
2335 
2352 
2363 
2369 
2372 typedef VkFlags VmaPoolCreateFlags;
2373 
2376 typedef struct VmaPoolCreateInfo {
2391  VkDeviceSize blockSize;
2420 
2423 typedef struct VmaPoolStats {
2426  VkDeviceSize size;
2429  VkDeviceSize unusedSize;
2442  VkDeviceSize unusedRangeSizeMax;
2445  size_t blockCount;
2446 } VmaPoolStats;
2447 
2454 VkResult vmaCreatePool(
2455  VmaAllocator allocator,
2456  const VmaPoolCreateInfo* pCreateInfo,
2457  VmaPool* pPool);
2458 
2461 void vmaDestroyPool(
2462  VmaAllocator allocator,
2463  VmaPool pool);
2464 
2471 void vmaGetPoolStats(
2472  VmaAllocator allocator,
2473  VmaPool pool,
2474  VmaPoolStats* pPoolStats);
2475 
2483  VmaAllocator allocator,
2484  VmaPool pool,
2485  size_t* pLostAllocationCount);
2486 
2501 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2502 
2527 VK_DEFINE_HANDLE(VmaAllocation)
2528 
2529 
2531 typedef struct VmaAllocationInfo {
2536  uint32_t memoryType;
2545  VkDeviceMemory deviceMemory;
2550  VkDeviceSize offset;
2555  VkDeviceSize size;
2569  void* pUserData;
2571 
2582 VkResult vmaAllocateMemory(
2583  VmaAllocator allocator,
2584  const VkMemoryRequirements* pVkMemoryRequirements,
2585  const VmaAllocationCreateInfo* pCreateInfo,
2586  VmaAllocation* pAllocation,
2587  VmaAllocationInfo* pAllocationInfo);
2588 
2608 VkResult vmaAllocateMemoryPages(
2609  VmaAllocator allocator,
2610  const VkMemoryRequirements* pVkMemoryRequirements,
2611  const VmaAllocationCreateInfo* pCreateInfo,
2612  size_t allocationCount,
2613  VmaAllocation* pAllocations,
2614  VmaAllocationInfo* pAllocationInfo);
2615 
2623  VmaAllocator allocator,
2624  VkBuffer buffer,
2625  const VmaAllocationCreateInfo* pCreateInfo,
2626  VmaAllocation* pAllocation,
2627  VmaAllocationInfo* pAllocationInfo);
2628 
2630 VkResult vmaAllocateMemoryForImage(
2631  VmaAllocator allocator,
2632  VkImage image,
2633  const VmaAllocationCreateInfo* pCreateInfo,
2634  VmaAllocation* pAllocation,
2635  VmaAllocationInfo* pAllocationInfo);
2636 
2641 void vmaFreeMemory(
2642  VmaAllocator allocator,
2643  VmaAllocation allocation);
2644 
2655 void vmaFreeMemoryPages(
2656  VmaAllocator allocator,
2657  size_t allocationCount,
2658  VmaAllocation* pAllocations);
2659 
2666 VkResult vmaResizeAllocation(
2667  VmaAllocator allocator,
2668  VmaAllocation allocation,
2669  VkDeviceSize newSize);
2670 
2688  VmaAllocator allocator,
2689  VmaAllocation allocation,
2690  VmaAllocationInfo* pAllocationInfo);
2691 
2706 VkBool32 vmaTouchAllocation(
2707  VmaAllocator allocator,
2708  VmaAllocation allocation);
2709 
2724  VmaAllocator allocator,
2725  VmaAllocation allocation,
2726  void* pUserData);
2727 
2739  VmaAllocator allocator,
2740  VmaAllocation* pAllocation);
2741 
2776 VkResult vmaMapMemory(
2777  VmaAllocator allocator,
2778  VmaAllocation allocation,
2779  void** ppData);
2780 
2785 void vmaUnmapMemory(
2786  VmaAllocator allocator,
2787  VmaAllocation allocation);
2788 
2805 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2806 
2823 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2824 
2841 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2842 
2849 VK_DEFINE_HANDLE(VmaDefragmentationContext)
2850 
2851 typedef enum VmaDefragmentationFlagBits {
2855 typedef VkFlags VmaDefragmentationFlags;
2856 
2861 typedef struct VmaDefragmentationInfo2 {
2885  uint32_t poolCount;
2906  VkDeviceSize maxCpuBytesToMove;
2916  VkDeviceSize maxGpuBytesToMove;
2930  VkCommandBuffer commandBuffer;
2932 
2937 typedef struct VmaDefragmentationInfo {
2942  VkDeviceSize maxBytesToMove;
2949 
2951 typedef struct VmaDefragmentationStats {
2953  VkDeviceSize bytesMoved;
2955  VkDeviceSize bytesFreed;
2961 
2991 VkResult vmaDefragmentationBegin(
2992  VmaAllocator allocator,
2993  const VmaDefragmentationInfo2* pInfo,
2994  VmaDefragmentationStats* pStats,
2995  VmaDefragmentationContext *pContext);
2996 
3002 VkResult vmaDefragmentationEnd(
3003  VmaAllocator allocator,
3004  VmaDefragmentationContext context);
3005 
3046 VkResult vmaDefragment(
3047  VmaAllocator allocator,
3048  VmaAllocation* pAllocations,
3049  size_t allocationCount,
3050  VkBool32* pAllocationsChanged,
3051  const VmaDefragmentationInfo *pDefragmentationInfo,
3052  VmaDefragmentationStats* pDefragmentationStats);
3053 
3066 VkResult vmaBindBufferMemory(
3067  VmaAllocator allocator,
3068  VmaAllocation allocation,
3069  VkBuffer buffer);
3070 
3081 VkResult vmaBindBufferMemory2(
3082  VmaAllocator allocator,
3083  VmaAllocation allocation,
3084  VkDeviceSize allocationLocalOffset,
3085  VkBuffer buffer,
3086  const void* pNext);
3087 
3100 VkResult vmaBindImageMemory(
3101  VmaAllocator allocator,
3102  VmaAllocation allocation,
3103  VkImage image);
3104 
3115 VkResult vmaBindImageMemory2(
3116  VmaAllocator allocator,
3117  VmaAllocation allocation,
3118  VkDeviceSize allocationLocalOffset,
3119  VkImage image,
3120  const void* pNext);
3121 
3148 VkResult vmaCreateBuffer(
3149  VmaAllocator allocator,
3150  const VkBufferCreateInfo* pBufferCreateInfo,
3151  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3152  VkBuffer* pBuffer,
3153  VmaAllocation* pAllocation,
3154  VmaAllocationInfo* pAllocationInfo);
3155 
3167 void vmaDestroyBuffer(
3168  VmaAllocator allocator,
3169  VkBuffer buffer,
3170  VmaAllocation allocation);
3171 
3173 VkResult vmaCreateImage(
3174  VmaAllocator allocator,
3175  const VkImageCreateInfo* pImageCreateInfo,
3176  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3177  VkImage* pImage,
3178  VmaAllocation* pAllocation,
3179  VmaAllocationInfo* pAllocationInfo);
3180 
3192 void vmaDestroyImage(
3193  VmaAllocator allocator,
3194  VkImage image,
3195  VmaAllocation allocation);
3196 
3197 #ifdef __cplusplus
3198 }
3199 #endif
3200 
3201 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3202 
3203 // For Visual Studio IntelliSense.
3204 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3205 #define VMA_IMPLEMENTATION
3206 #endif
3207 
3208 #ifdef VMA_IMPLEMENTATION
3209 #undef VMA_IMPLEMENTATION
3210 
3211 #include <cstdint>
3212 #include <cstdlib>
3213 #include <cstring>
3214 
3215 /*******************************************************************************
3216 CONFIGURATION SECTION
3217 
3218 Define some of these macros before each #include of this header or change them
3219 here if you need other then default behavior depending on your environment.
3220 */
3221 
3222 /*
3223 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3224 internally, like:
3225 
3226  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3227 
3228 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3229 VmaAllocatorCreateInfo::pVulkanFunctions.
3230 */
3231 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3232 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3233 #endif
3234 
3235 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3236 //#define VMA_USE_STL_CONTAINERS 1
3237 
3238 /* Set this macro to 1 to make the library including and using STL containers:
3239 std::pair, std::vector, std::list, std::unordered_map.
3240 
3241 Set it to 0 or undefined to make the library using its own implementation of
3242 the containers.
3243 */
3244 #if VMA_USE_STL_CONTAINERS
3245  #define VMA_USE_STL_VECTOR 1
3246  #define VMA_USE_STL_UNORDERED_MAP 1
3247  #define VMA_USE_STL_LIST 1
3248 #endif
3249 
3250 #ifndef VMA_USE_STL_SHARED_MUTEX
3251  // Compiler conforms to C++17.
3252  #if __cplusplus >= 201703L
3253  #define VMA_USE_STL_SHARED_MUTEX 1
3254  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3255  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3256  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3257  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3258  #define VMA_USE_STL_SHARED_MUTEX 1
3259  #else
3260  #define VMA_USE_STL_SHARED_MUTEX 0
3261  #endif
3262 #endif
3263 
3264 /*
3265 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
3266 Library has its own container implementation.
3267 */
3268 #if VMA_USE_STL_VECTOR
3269  #include <vector>
3270 #endif
3271 
3272 #if VMA_USE_STL_UNORDERED_MAP
3273  #include <unordered_map>
3274 #endif
3275 
3276 #if VMA_USE_STL_LIST
3277  #include <list>
3278 #endif
3279 
3280 /*
3281 Following headers are used in this CONFIGURATION section only, so feel free to
3282 remove them if not needed.
3283 */
3284 #include <cassert> // for assert
3285 #include <algorithm> // for min, max
3286 #include <mutex>
3287 
3288 #ifndef VMA_NULL
3289  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3290  #define VMA_NULL nullptr
3291 #endif
3292 
3293 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3294 #include <cstdlib>
3295 void *aligned_alloc(size_t alignment, size_t size)
3296 {
3297  // alignment must be >= sizeof(void*)
3298  if(alignment < sizeof(void*))
3299  {
3300  alignment = sizeof(void*);
3301  }
3302 
3303  return memalign(alignment, size);
3304 }
3305 #elif defined(__APPLE__) || defined(__ANDROID__)
3306 #include <cstdlib>
3307 void *aligned_alloc(size_t alignment, size_t size)
3308 {
3309  // alignment must be >= sizeof(void*)
3310  if(alignment < sizeof(void*))
3311  {
3312  alignment = sizeof(void*);
3313  }
3314 
3315  void *pointer;
3316  if(posix_memalign(&pointer, alignment, size) == 0)
3317  return pointer;
3318  return VMA_NULL;
3319 }
3320 #endif
3321 
3322 // If your compiler is not compatible with C++11 and definition of
3323 // aligned_alloc() function is missing, uncommeting following line may help:
3324 
3325 //#include <malloc.h>
3326 
3327 // Normal assert to check for programmer's errors, especially in Debug configuration.
3328 #ifndef VMA_ASSERT
3329  #ifdef _DEBUG
3330  #define VMA_ASSERT(expr) assert(expr)
3331  #else
3332  #define VMA_ASSERT(expr)
3333  #endif
3334 #endif
3335 
3336 // Assert that will be called very often, like inside data structures e.g. operator[].
3337 // Making it non-empty can make program slow.
3338 #ifndef VMA_HEAVY_ASSERT
3339  #ifdef _DEBUG
3340  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3341  #else
3342  #define VMA_HEAVY_ASSERT(expr)
3343  #endif
3344 #endif
3345 
3346 #ifndef VMA_ALIGN_OF
3347  #define VMA_ALIGN_OF(type) (__alignof(type))
3348 #endif
3349 
3350 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3351  #if defined(_WIN32)
3352  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3353  #else
3354  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3355  #endif
3356 #endif
3357 
3358 #ifndef VMA_SYSTEM_FREE
3359  #if defined(_WIN32)
3360  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3361  #else
3362  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3363  #endif
3364 #endif
3365 
3366 #ifndef VMA_MIN
3367  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3368 #endif
3369 
3370 #ifndef VMA_MAX
3371  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3372 #endif
3373 
3374 #ifndef VMA_SWAP
3375  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3376 #endif
3377 
3378 #ifndef VMA_SORT
3379  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3380 #endif
3381 
3382 #ifndef VMA_DEBUG_LOG
3383  #define VMA_DEBUG_LOG(format, ...)
3384  /*
3385  #define VMA_DEBUG_LOG(format, ...) do { \
3386  printf(format, __VA_ARGS__); \
3387  printf("\n"); \
3388  } while(false)
3389  */
3390 #endif
3391 
3392 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3393 #if VMA_STATS_STRING_ENABLED
3394  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3395  {
3396  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3397  }
3398  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3399  {
3400  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3401  }
3402  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3403  {
3404  snprintf(outStr, strLen, "%p", ptr);
3405  }
3406 #endif
3407 
3408 #ifndef VMA_MUTEX
3409  class VmaMutex
3410  {
3411  public:
3412  void Lock() { m_Mutex.lock(); }
3413  void Unlock() { m_Mutex.unlock(); }
3414  private:
3415  std::mutex m_Mutex;
3416  };
3417  #define VMA_MUTEX VmaMutex
3418 #endif
3419 
3420 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3421 #ifndef VMA_RW_MUTEX
3422  #if VMA_USE_STL_SHARED_MUTEX
3423  // Use std::shared_mutex from C++17.
3424  #include <shared_mutex>
3425  class VmaRWMutex
3426  {
3427  public:
3428  void LockRead() { m_Mutex.lock_shared(); }
3429  void UnlockRead() { m_Mutex.unlock_shared(); }
3430  void LockWrite() { m_Mutex.lock(); }
3431  void UnlockWrite() { m_Mutex.unlock(); }
3432  private:
3433  std::shared_mutex m_Mutex;
3434  };
3435  #define VMA_RW_MUTEX VmaRWMutex
3436  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
3437  // Use SRWLOCK from WinAPI.
3438  // Minimum supported client = Windows Vista, server = Windows Server 2008.
3439  class VmaRWMutex
3440  {
3441  public:
3442  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3443  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3444  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3445  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3446  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3447  private:
3448  SRWLOCK m_Lock;
3449  };
3450  #define VMA_RW_MUTEX VmaRWMutex
3451  #else
3452  // Less efficient fallback: Use normal mutex.
3453  class VmaRWMutex
3454  {
3455  public:
3456  void LockRead() { m_Mutex.Lock(); }
3457  void UnlockRead() { m_Mutex.Unlock(); }
3458  void LockWrite() { m_Mutex.Lock(); }
3459  void UnlockWrite() { m_Mutex.Unlock(); }
3460  private:
3461  VMA_MUTEX m_Mutex;
3462  };
3463  #define VMA_RW_MUTEX VmaRWMutex
3464  #endif // #if VMA_USE_STL_SHARED_MUTEX
3465 #endif // #ifndef VMA_RW_MUTEX
3466 
3467 /*
3468 If providing your own implementation, you need to implement a subset of std::atomic:
3469 
3470 - Constructor(uint32_t desired)
3471 - uint32_t load() const
3472 - void store(uint32_t desired)
3473 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3474 */
3475 #ifndef VMA_ATOMIC_UINT32
3476  #include <atomic>
3477  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3478 #endif
3479 
3480 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3481 
3485  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3486 #endif
3487 
3488 #ifndef VMA_DEBUG_ALIGNMENT
3489 
3493  #define VMA_DEBUG_ALIGNMENT (1)
3494 #endif
3495 
3496 #ifndef VMA_DEBUG_MARGIN
3497 
3501  #define VMA_DEBUG_MARGIN (0)
3502 #endif
3503 
3504 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3505 
3509  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3510 #endif
3511 
3512 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3513 
3518  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3519 #endif
3520 
3521 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3522 
3526  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3527 #endif
3528 
3529 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3530 
3534  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3535 #endif
3536 
3537 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3538  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3540 #endif
3541 
3542 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3543  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3545 #endif
3546 
3547 #ifndef VMA_CLASS_NO_COPY
3548  #define VMA_CLASS_NO_COPY(className) \
3549  private: \
3550  className(const className&) = delete; \
3551  className& operator=(const className&) = delete;
3552 #endif
3553 
3554 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3555 
3556 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3557 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3558 
3559 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3560 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3561 
3562 /*******************************************************************************
3563 END OF CONFIGURATION
3564 */
3565 
3566 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3567 
3568 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3569  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3570 
3571 // Returns number of bits set to 1 in (v).
3572 static inline uint32_t VmaCountBitsSet(uint32_t v)
3573 {
3574  uint32_t c = v - ((v >> 1) & 0x55555555);
3575  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3576  c = ((c >> 4) + c) & 0x0F0F0F0F;
3577  c = ((c >> 8) + c) & 0x00FF00FF;
3578  c = ((c >> 16) + c) & 0x0000FFFF;
3579  return c;
3580 }
3581 
3582 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3583 // Use types like uint32_t, uint64_t as T.
3584 template <typename T>
3585 static inline T VmaAlignUp(T val, T align)
3586 {
3587  return (val + align - 1) / align * align;
3588 }
3589 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3590 // Use types like uint32_t, uint64_t as T.
3591 template <typename T>
3592 static inline T VmaAlignDown(T val, T align)
3593 {
3594  return val / align * align;
3595 }
3596 
3597 // Division with mathematical rounding to nearest number.
3598 template <typename T>
3599 static inline T VmaRoundDiv(T x, T y)
3600 {
3601  return (x + (y / (T)2)) / y;
3602 }
3603 
3604 /*
3605 Returns true if given number is a power of two.
3606 T must be unsigned integer number or signed integer but always nonnegative.
3607 For 0 returns true.
3608 */
3609 template <typename T>
3610 inline bool VmaIsPow2(T x)
3611 {
3612  return (x & (x-1)) == 0;
3613 }
3614 
3615 // Returns smallest power of 2 greater or equal to v.
3616 static inline uint32_t VmaNextPow2(uint32_t v)
3617 {
3618  v--;
3619  v |= v >> 1;
3620  v |= v >> 2;
3621  v |= v >> 4;
3622  v |= v >> 8;
3623  v |= v >> 16;
3624  v++;
3625  return v;
3626 }
3627 static inline uint64_t VmaNextPow2(uint64_t v)
3628 {
3629  v--;
3630  v |= v >> 1;
3631  v |= v >> 2;
3632  v |= v >> 4;
3633  v |= v >> 8;
3634  v |= v >> 16;
3635  v |= v >> 32;
3636  v++;
3637  return v;
3638 }
3639 
3640 // Returns largest power of 2 less or equal to v.
3641 static inline uint32_t VmaPrevPow2(uint32_t v)
3642 {
3643  v |= v >> 1;
3644  v |= v >> 2;
3645  v |= v >> 4;
3646  v |= v >> 8;
3647  v |= v >> 16;
3648  v = v ^ (v >> 1);
3649  return v;
3650 }
3651 static inline uint64_t VmaPrevPow2(uint64_t v)
3652 {
3653  v |= v >> 1;
3654  v |= v >> 2;
3655  v |= v >> 4;
3656  v |= v >> 8;
3657  v |= v >> 16;
3658  v |= v >> 32;
3659  v = v ^ (v >> 1);
3660  return v;
3661 }
3662 
3663 static inline bool VmaStrIsEmpty(const char* pStr)
3664 {
3665  return pStr == VMA_NULL || *pStr == '\0';
3666 }
3667 
3668 #if VMA_STATS_STRING_ENABLED
3669 
3670 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3671 {
3672  switch(algorithm)
3673  {
3675  return "Linear";
3677  return "Buddy";
3678  case 0:
3679  return "Default";
3680  default:
3681  VMA_ASSERT(0);
3682  return "";
3683  }
3684 }
3685 
3686 #endif // #if VMA_STATS_STRING_ENABLED
3687 
3688 #ifndef VMA_SORT
3689 
3690 template<typename Iterator, typename Compare>
3691 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3692 {
3693  Iterator centerValue = end; --centerValue;
3694  Iterator insertIndex = beg;
3695  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3696  {
3697  if(cmp(*memTypeIndex, *centerValue))
3698  {
3699  if(insertIndex != memTypeIndex)
3700  {
3701  VMA_SWAP(*memTypeIndex, *insertIndex);
3702  }
3703  ++insertIndex;
3704  }
3705  }
3706  if(insertIndex != centerValue)
3707  {
3708  VMA_SWAP(*insertIndex, *centerValue);
3709  }
3710  return insertIndex;
3711 }
3712 
3713 template<typename Iterator, typename Compare>
3714 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3715 {
3716  if(beg < end)
3717  {
3718  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3719  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3720  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3721  }
3722 }
3723 
3724 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3725 
3726 #endif // #ifndef VMA_SORT
3727 
3728 /*
3729 Returns true if two memory blocks occupy overlapping pages.
3730 ResourceA must be in less memory offset than ResourceB.
3731 
3732 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3733 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3734 */
3735 static inline bool VmaBlocksOnSamePage(
3736  VkDeviceSize resourceAOffset,
3737  VkDeviceSize resourceASize,
3738  VkDeviceSize resourceBOffset,
3739  VkDeviceSize pageSize)
3740 {
3741  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3742  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3743  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3744  VkDeviceSize resourceBStart = resourceBOffset;
3745  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3746  return resourceAEndPage == resourceBStartPage;
3747 }
3748 
3749 enum VmaSuballocationType
3750 {
3751  VMA_SUBALLOCATION_TYPE_FREE = 0,
3752  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3753  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3754  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3755  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3756  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3757  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3758 };
3759 
3760 /*
3761 Returns true if given suballocation types could conflict and must respect
3762 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3763 or linear image and another one is optimal image. If type is unknown, behave
3764 conservatively.
3765 */
3766 static inline bool VmaIsBufferImageGranularityConflict(
3767  VmaSuballocationType suballocType1,
3768  VmaSuballocationType suballocType2)
3769 {
3770  if(suballocType1 > suballocType2)
3771  {
3772  VMA_SWAP(suballocType1, suballocType2);
3773  }
3774 
3775  switch(suballocType1)
3776  {
3777  case VMA_SUBALLOCATION_TYPE_FREE:
3778  return false;
3779  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3780  return true;
3781  case VMA_SUBALLOCATION_TYPE_BUFFER:
3782  return
3783  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3784  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3785  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3786  return
3787  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3788  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3789  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3790  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3791  return
3792  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3793  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3794  return false;
3795  default:
3796  VMA_ASSERT(0);
3797  return true;
3798  }
3799 }
3800 
3801 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3802 {
3803 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
3804  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3805  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3806  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3807  {
3808  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3809  }
3810 #else
3811  // no-op
3812 #endif
3813 }
3814 
3815 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3816 {
3817 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
3818  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3819  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3820  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3821  {
3822  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3823  {
3824  return false;
3825  }
3826  }
3827 #endif
3828  return true;
3829 }
3830 
3831 /*
3832 Fills structure with parameters of an example buffer to be used for transfers
3833 during GPU memory defragmentation.
3834 */
3835 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
3836 {
3837  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
3838  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
3839  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
3840  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
3841 }
3842 
3843 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3844 struct VmaMutexLock
3845 {
3846  VMA_CLASS_NO_COPY(VmaMutexLock)
3847 public:
3848  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
3849  m_pMutex(useMutex ? &mutex : VMA_NULL)
3850  { if(m_pMutex) { m_pMutex->Lock(); } }
3851  ~VmaMutexLock()
3852  { if(m_pMutex) { m_pMutex->Unlock(); } }
3853 private:
3854  VMA_MUTEX* m_pMutex;
3855 };
3856 
3857 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3858 struct VmaMutexLockRead
3859 {
3860  VMA_CLASS_NO_COPY(VmaMutexLockRead)
3861 public:
3862  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3863  m_pMutex(useMutex ? &mutex : VMA_NULL)
3864  { if(m_pMutex) { m_pMutex->LockRead(); } }
3865  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
3866 private:
3867  VMA_RW_MUTEX* m_pMutex;
3868 };
3869 
3870 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3871 struct VmaMutexLockWrite
3872 {
3873  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3874 public:
3875  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
3876  m_pMutex(useMutex ? &mutex : VMA_NULL)
3877  { if(m_pMutex) { m_pMutex->LockWrite(); } }
3878  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3879 private:
3880  VMA_RW_MUTEX* m_pMutex;
3881 };
3882 
3883 #if VMA_DEBUG_GLOBAL_MUTEX
3884  static VMA_MUTEX gDebugGlobalMutex;
3885  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3886 #else
3887  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3888 #endif
3889 
3890 // Minimum size of a free suballocation to register it in the free suballocation collection.
3891 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3892 
3893 /*
3894 Performs binary search and returns iterator to first element that is greater or
3895 equal to (key), according to comparison (cmp).
3896 
3897 Cmp should return true if first argument is less than second argument.
3898 
3899 Returned value is the found element, if present in the collection or place where
3900 new element with value (key) should be inserted.
3901 */
3902 template <typename CmpLess, typename IterT, typename KeyT>
3903 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
3904 {
3905  size_t down = 0, up = (end - beg);
3906  while(down < up)
3907  {
3908  const size_t mid = (down + up) / 2;
3909  if(cmp(*(beg+mid), key))
3910  {
3911  down = mid + 1;
3912  }
3913  else
3914  {
3915  up = mid;
3916  }
3917  }
3918  return beg + down;
3919 }
3920 
3921 template<typename CmpLess, typename IterT, typename KeyT>
3922 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
3923 {
3924  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3925  beg, end, value, cmp);
3926  if(it == end ||
3927  (!cmp(*it, value) && !cmp(value, *it)))
3928  {
3929  return it;
3930  }
3931  return end;
3932 }
3933 
3934 /*
3935 Returns true if all pointers in the array are not-null and unique.
3936 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3937 T must be pointer type, e.g. VmaAllocation, VmaPool.
3938 */
3939 template<typename T>
3940 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3941 {
3942  for(uint32_t i = 0; i < count; ++i)
3943  {
3944  const T iPtr = arr[i];
3945  if(iPtr == VMA_NULL)
3946  {
3947  return false;
3948  }
3949  for(uint32_t j = i + 1; j < count; ++j)
3950  {
3951  if(iPtr == arr[j])
3952  {
3953  return false;
3954  }
3955  }
3956  }
3957  return true;
3958 }
3959 
3961 // Memory allocation
3962 
3963 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3964 {
3965  if((pAllocationCallbacks != VMA_NULL) &&
3966  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3967  {
3968  return (*pAllocationCallbacks->pfnAllocation)(
3969  pAllocationCallbacks->pUserData,
3970  size,
3971  alignment,
3972  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3973  }
3974  else
3975  {
3976  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3977  }
3978 }
3979 
3980 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3981 {
3982  if((pAllocationCallbacks != VMA_NULL) &&
3983  (pAllocationCallbacks->pfnFree != VMA_NULL))
3984  {
3985  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3986  }
3987  else
3988  {
3989  VMA_SYSTEM_FREE(ptr);
3990  }
3991 }
3992 
3993 template<typename T>
3994 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3995 {
3996  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3997 }
3998 
3999 template<typename T>
4000 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
4001 {
4002  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
4003 }
4004 
4005 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
4006 
4007 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
4008 
4009 template<typename T>
4010 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
4011 {
4012  ptr->~T();
4013  VmaFree(pAllocationCallbacks, ptr);
4014 }
4015 
4016 template<typename T>
4017 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
4018 {
4019  if(ptr != VMA_NULL)
4020  {
4021  for(size_t i = count; i--; )
4022  {
4023  ptr[i].~T();
4024  }
4025  VmaFree(pAllocationCallbacks, ptr);
4026  }
4027 }
4028 
4029 // STL-compatible allocator.
4030 template<typename T>
4031 class VmaStlAllocator
4032 {
4033 public:
4034  const VkAllocationCallbacks* const m_pCallbacks;
4035  typedef T value_type;
4036 
4037  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
4038  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
4039 
4040  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
4041  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
4042 
4043  template<typename U>
4044  bool operator==(const VmaStlAllocator<U>& rhs) const
4045  {
4046  return m_pCallbacks == rhs.m_pCallbacks;
4047  }
4048  template<typename U>
4049  bool operator!=(const VmaStlAllocator<U>& rhs) const
4050  {
4051  return m_pCallbacks != rhs.m_pCallbacks;
4052  }
4053 
4054  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
4055 };
4056 
4057 #if VMA_USE_STL_VECTOR
4058 
4059 #define VmaVector std::vector
4060 
4061 template<typename T, typename allocatorT>
4062 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
4063 {
4064  vec.insert(vec.begin() + index, item);
4065 }
4066 
4067 template<typename T, typename allocatorT>
4068 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
4069 {
4070  vec.erase(vec.begin() + index);
4071 }
4072 
4073 #else // #if VMA_USE_STL_VECTOR
4074 
4075 /* Class with interface compatible with subset of std::vector.
4076 T must be POD because constructors and destructors are not called and memcpy is
4077 used for these objects. */
4078 template<typename T, typename AllocatorT>
4079 class VmaVector
4080 {
4081 public:
4082  typedef T value_type;
4083 
4084  VmaVector(const AllocatorT& allocator) :
4085  m_Allocator(allocator),
4086  m_pArray(VMA_NULL),
4087  m_Count(0),
4088  m_Capacity(0)
4089  {
4090  }
4091 
4092  VmaVector(size_t count, const AllocatorT& allocator) :
4093  m_Allocator(allocator),
4094  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
4095  m_Count(count),
4096  m_Capacity(count)
4097  {
4098  }
4099 
4100  VmaVector(const VmaVector<T, AllocatorT>& src) :
4101  m_Allocator(src.m_Allocator),
4102  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
4103  m_Count(src.m_Count),
4104  m_Capacity(src.m_Count)
4105  {
4106  if(m_Count != 0)
4107  {
4108  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4109  }
4110  }
4111 
4112  ~VmaVector()
4113  {
4114  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4115  }
4116 
4117  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
4118  {
4119  if(&rhs != this)
4120  {
4121  resize(rhs.m_Count);
4122  if(m_Count != 0)
4123  {
4124  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4125  }
4126  }
4127  return *this;
4128  }
4129 
4130  bool empty() const { return m_Count == 0; }
4131  size_t size() const { return m_Count; }
4132  T* data() { return m_pArray; }
4133  const T* data() const { return m_pArray; }
4134 
4135  T& operator[](size_t index)
4136  {
4137  VMA_HEAVY_ASSERT(index < m_Count);
4138  return m_pArray[index];
4139  }
4140  const T& operator[](size_t index) const
4141  {
4142  VMA_HEAVY_ASSERT(index < m_Count);
4143  return m_pArray[index];
4144  }
4145 
4146  T& front()
4147  {
4148  VMA_HEAVY_ASSERT(m_Count > 0);
4149  return m_pArray[0];
4150  }
4151  const T& front() const
4152  {
4153  VMA_HEAVY_ASSERT(m_Count > 0);
4154  return m_pArray[0];
4155  }
4156  T& back()
4157  {
4158  VMA_HEAVY_ASSERT(m_Count > 0);
4159  return m_pArray[m_Count - 1];
4160  }
4161  const T& back() const
4162  {
4163  VMA_HEAVY_ASSERT(m_Count > 0);
4164  return m_pArray[m_Count - 1];
4165  }
4166 
4167  void reserve(size_t newCapacity, bool freeMemory = false)
4168  {
4169  newCapacity = VMA_MAX(newCapacity, m_Count);
4170 
4171  if((newCapacity < m_Capacity) && !freeMemory)
4172  {
4173  newCapacity = m_Capacity;
4174  }
4175 
4176  if(newCapacity != m_Capacity)
4177  {
4178  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4179  if(m_Count != 0)
4180  {
4181  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4182  }
4183  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4184  m_Capacity = newCapacity;
4185  m_pArray = newArray;
4186  }
4187  }
4188 
4189  void resize(size_t newCount, bool freeMemory = false)
4190  {
4191  size_t newCapacity = m_Capacity;
4192  if(newCount > m_Capacity)
4193  {
4194  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4195  }
4196  else if(freeMemory)
4197  {
4198  newCapacity = newCount;
4199  }
4200 
4201  if(newCapacity != m_Capacity)
4202  {
4203  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4204  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4205  if(elementsToCopy != 0)
4206  {
4207  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4208  }
4209  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4210  m_Capacity = newCapacity;
4211  m_pArray = newArray;
4212  }
4213 
4214  m_Count = newCount;
4215  }
4216 
4217  void clear(bool freeMemory = false)
4218  {
4219  resize(0, freeMemory);
4220  }
4221 
4222  void insert(size_t index, const T& src)
4223  {
4224  VMA_HEAVY_ASSERT(index <= m_Count);
4225  const size_t oldCount = size();
4226  resize(oldCount + 1);
4227  if(index < oldCount)
4228  {
4229  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4230  }
4231  m_pArray[index] = src;
4232  }
4233 
4234  void remove(size_t index)
4235  {
4236  VMA_HEAVY_ASSERT(index < m_Count);
4237  const size_t oldCount = size();
4238  if(index < oldCount - 1)
4239  {
4240  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4241  }
4242  resize(oldCount - 1);
4243  }
4244 
4245  void push_back(const T& src)
4246  {
4247  const size_t newIndex = size();
4248  resize(newIndex + 1);
4249  m_pArray[newIndex] = src;
4250  }
4251 
4252  void pop_back()
4253  {
4254  VMA_HEAVY_ASSERT(m_Count > 0);
4255  resize(size() - 1);
4256  }
4257 
4258  void push_front(const T& src)
4259  {
4260  insert(0, src);
4261  }
4262 
4263  void pop_front()
4264  {
4265  VMA_HEAVY_ASSERT(m_Count > 0);
4266  remove(0);
4267  }
4268 
4269  typedef T* iterator;
4270 
4271  iterator begin() { return m_pArray; }
4272  iterator end() { return m_pArray + m_Count; }
4273 
4274 private:
4275  AllocatorT m_Allocator;
4276  T* m_pArray;
4277  size_t m_Count;
4278  size_t m_Capacity;
4279 };
4280 
4281 template<typename T, typename allocatorT>
4282 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4283 {
4284  vec.insert(index, item);
4285 }
4286 
4287 template<typename T, typename allocatorT>
4288 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4289 {
4290  vec.remove(index);
4291 }
4292 
4293 #endif // #if VMA_USE_STL_VECTOR
4294 
4295 template<typename CmpLess, typename VectorT>
4296 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4297 {
4298  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4299  vector.data(),
4300  vector.data() + vector.size(),
4301  value,
4302  CmpLess()) - vector.data();
4303  VmaVectorInsert(vector, indexToInsert, value);
4304  return indexToInsert;
4305 }
4306 
4307 template<typename CmpLess, typename VectorT>
4308 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4309 {
4310  CmpLess comparator;
4311  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4312  vector.begin(),
4313  vector.end(),
4314  value,
4315  comparator);
4316  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4317  {
4318  size_t indexToRemove = it - vector.begin();
4319  VmaVectorRemove(vector, indexToRemove);
4320  return true;
4321  }
4322  return false;
4323 }
4324 
4326 // class VmaPoolAllocator
4327 
4328 /*
4329 Allocator for objects of type T using a list of arrays (pools) to speed up
4330 allocation. Number of elements that can be allocated is not bounded because
4331 allocator can create multiple blocks.
4332 */
4333 template<typename T>
4334 class VmaPoolAllocator
4335 {
4336  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4337 public:
4338  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4339  ~VmaPoolAllocator();
4340  void Clear();
4341  T* Alloc();
4342  void Free(T* ptr);
4343 
4344 private:
4345  union Item
4346  {
4347  uint32_t NextFreeIndex;
4348  T Value;
4349  };
4350 
4351  struct ItemBlock
4352  {
4353  Item* pItems;
4354  uint32_t Capacity;
4355  uint32_t FirstFreeIndex;
4356  };
4357 
4358  const VkAllocationCallbacks* m_pAllocationCallbacks;
4359  const uint32_t m_FirstBlockCapacity;
4360  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4361 
4362  ItemBlock& CreateNewBlock();
4363 };
4364 
4365 template<typename T>
4366 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
4367  m_pAllocationCallbacks(pAllocationCallbacks),
4368  m_FirstBlockCapacity(firstBlockCapacity),
4369  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4370 {
4371  VMA_ASSERT(m_FirstBlockCapacity > 1);
4372 }
4373 
4374 template<typename T>
4375 VmaPoolAllocator<T>::~VmaPoolAllocator()
4376 {
4377  Clear();
4378 }
4379 
4380 template<typename T>
4381 void VmaPoolAllocator<T>::Clear()
4382 {
4383  for(size_t i = m_ItemBlocks.size(); i--; )
4384  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4385  m_ItemBlocks.clear();
4386 }
4387 
4388 template<typename T>
4389 T* VmaPoolAllocator<T>::Alloc()
4390 {
4391  for(size_t i = m_ItemBlocks.size(); i--; )
4392  {
4393  ItemBlock& block = m_ItemBlocks[i];
4394  // This block has some free items: Use first one.
4395  if(block.FirstFreeIndex != UINT32_MAX)
4396  {
4397  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4398  block.FirstFreeIndex = pItem->NextFreeIndex;
4399  return &pItem->Value;
4400  }
4401  }
4402 
4403  // No block has free item: Create new one and use it.
4404  ItemBlock& newBlock = CreateNewBlock();
4405  Item* const pItem = &newBlock.pItems[0];
4406  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4407  return &pItem->Value;
4408 }
4409 
4410 template<typename T>
4411 void VmaPoolAllocator<T>::Free(T* ptr)
4412 {
4413  // Search all memory blocks to find ptr.
4414  for(size_t i = m_ItemBlocks.size(); i--; )
4415  {
4416  ItemBlock& block = m_ItemBlocks[i];
4417 
4418  // Casting to union.
4419  Item* pItemPtr;
4420  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4421 
4422  // Check if pItemPtr is in address range of this block.
4423  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4424  {
4425  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4426  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4427  block.FirstFreeIndex = index;
4428  return;
4429  }
4430  }
4431  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4432 }
4433 
4434 template<typename T>
4435 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4436 {
4437  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4438  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4439 
4440  const ItemBlock newBlock = {
4441  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4442  newBlockCapacity,
4443  0 };
4444 
4445  m_ItemBlocks.push_back(newBlock);
4446 
4447  // Setup singly-linked list of all free items in this block.
4448  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4449  newBlock.pItems[i].NextFreeIndex = i + 1;
4450  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4451  return m_ItemBlocks.back();
4452 }
4453 
4455 // class VmaRawList, VmaList
4456 
4457 #if VMA_USE_STL_LIST
4458 
4459 #define VmaList std::list
4460 
4461 #else // #if VMA_USE_STL_LIST
4462 
4463 template<typename T>
4464 struct VmaListItem
4465 {
4466  VmaListItem* pPrev;
4467  VmaListItem* pNext;
4468  T Value;
4469 };
4470 
4471 // Doubly linked list.
4472 template<typename T>
4473 class VmaRawList
4474 {
4475  VMA_CLASS_NO_COPY(VmaRawList)
4476 public:
4477  typedef VmaListItem<T> ItemType;
4478 
4479  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4480  ~VmaRawList();
4481  void Clear();
4482 
4483  size_t GetCount() const { return m_Count; }
4484  bool IsEmpty() const { return m_Count == 0; }
4485 
4486  ItemType* Front() { return m_pFront; }
4487  const ItemType* Front() const { return m_pFront; }
4488  ItemType* Back() { return m_pBack; }
4489  const ItemType* Back() const { return m_pBack; }
4490 
4491  ItemType* PushBack();
4492  ItemType* PushFront();
4493  ItemType* PushBack(const T& value);
4494  ItemType* PushFront(const T& value);
4495  void PopBack();
4496  void PopFront();
4497 
4498  // Item can be null - it means PushBack.
4499  ItemType* InsertBefore(ItemType* pItem);
4500  // Item can be null - it means PushFront.
4501  ItemType* InsertAfter(ItemType* pItem);
4502 
4503  ItemType* InsertBefore(ItemType* pItem, const T& value);
4504  ItemType* InsertAfter(ItemType* pItem, const T& value);
4505 
4506  void Remove(ItemType* pItem);
4507 
4508 private:
4509  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4510  VmaPoolAllocator<ItemType> m_ItemAllocator;
4511  ItemType* m_pFront;
4512  ItemType* m_pBack;
4513  size_t m_Count;
4514 };
4515 
4516 template<typename T>
4517 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4518  m_pAllocationCallbacks(pAllocationCallbacks),
4519  m_ItemAllocator(pAllocationCallbacks, 128),
4520  m_pFront(VMA_NULL),
4521  m_pBack(VMA_NULL),
4522  m_Count(0)
4523 {
4524 }
4525 
4526 template<typename T>
4527 VmaRawList<T>::~VmaRawList()
4528 {
4529  // Intentionally not calling Clear, because that would be unnecessary
4530  // computations to return all items to m_ItemAllocator as free.
4531 }
4532 
4533 template<typename T>
4534 void VmaRawList<T>::Clear()
4535 {
4536  if(IsEmpty() == false)
4537  {
4538  ItemType* pItem = m_pBack;
4539  while(pItem != VMA_NULL)
4540  {
4541  ItemType* const pPrevItem = pItem->pPrev;
4542  m_ItemAllocator.Free(pItem);
4543  pItem = pPrevItem;
4544  }
4545  m_pFront = VMA_NULL;
4546  m_pBack = VMA_NULL;
4547  m_Count = 0;
4548  }
4549 }
4550 
4551 template<typename T>
4552 VmaListItem<T>* VmaRawList<T>::PushBack()
4553 {
4554  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4555  pNewItem->pNext = VMA_NULL;
4556  if(IsEmpty())
4557  {
4558  pNewItem->pPrev = VMA_NULL;
4559  m_pFront = pNewItem;
4560  m_pBack = pNewItem;
4561  m_Count = 1;
4562  }
4563  else
4564  {
4565  pNewItem->pPrev = m_pBack;
4566  m_pBack->pNext = pNewItem;
4567  m_pBack = pNewItem;
4568  ++m_Count;
4569  }
4570  return pNewItem;
4571 }
4572 
4573 template<typename T>
4574 VmaListItem<T>* VmaRawList<T>::PushFront()
4575 {
4576  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4577  pNewItem->pPrev = VMA_NULL;
4578  if(IsEmpty())
4579  {
4580  pNewItem->pNext = VMA_NULL;
4581  m_pFront = pNewItem;
4582  m_pBack = pNewItem;
4583  m_Count = 1;
4584  }
4585  else
4586  {
4587  pNewItem->pNext = m_pFront;
4588  m_pFront->pPrev = pNewItem;
4589  m_pFront = pNewItem;
4590  ++m_Count;
4591  }
4592  return pNewItem;
4593 }
4594 
4595 template<typename T>
4596 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4597 {
4598  ItemType* const pNewItem = PushBack();
4599  pNewItem->Value = value;
4600  return pNewItem;
4601 }
4602 
4603 template<typename T>
4604 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4605 {
4606  ItemType* const pNewItem = PushFront();
4607  pNewItem->Value = value;
4608  return pNewItem;
4609 }
4610 
4611 template<typename T>
4612 void VmaRawList<T>::PopBack()
4613 {
4614  VMA_HEAVY_ASSERT(m_Count > 0);
4615  ItemType* const pBackItem = m_pBack;
4616  ItemType* const pPrevItem = pBackItem->pPrev;
4617  if(pPrevItem != VMA_NULL)
4618  {
4619  pPrevItem->pNext = VMA_NULL;
4620  }
4621  m_pBack = pPrevItem;
4622  m_ItemAllocator.Free(pBackItem);
4623  --m_Count;
4624 }
4625 
4626 template<typename T>
4627 void VmaRawList<T>::PopFront()
4628 {
4629  VMA_HEAVY_ASSERT(m_Count > 0);
4630  ItemType* const pFrontItem = m_pFront;
4631  ItemType* const pNextItem = pFrontItem->pNext;
4632  if(pNextItem != VMA_NULL)
4633  {
4634  pNextItem->pPrev = VMA_NULL;
4635  }
4636  m_pFront = pNextItem;
4637  m_ItemAllocator.Free(pFrontItem);
4638  --m_Count;
4639 }
4640 
4641 template<typename T>
4642 void VmaRawList<T>::Remove(ItemType* pItem)
4643 {
4644  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4645  VMA_HEAVY_ASSERT(m_Count > 0);
4646 
4647  if(pItem->pPrev != VMA_NULL)
4648  {
4649  pItem->pPrev->pNext = pItem->pNext;
4650  }
4651  else
4652  {
4653  VMA_HEAVY_ASSERT(m_pFront == pItem);
4654  m_pFront = pItem->pNext;
4655  }
4656 
4657  if(pItem->pNext != VMA_NULL)
4658  {
4659  pItem->pNext->pPrev = pItem->pPrev;
4660  }
4661  else
4662  {
4663  VMA_HEAVY_ASSERT(m_pBack == pItem);
4664  m_pBack = pItem->pPrev;
4665  }
4666 
4667  m_ItemAllocator.Free(pItem);
4668  --m_Count;
4669 }
4670 
4671 template<typename T>
4672 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4673 {
4674  if(pItem != VMA_NULL)
4675  {
4676  ItemType* const prevItem = pItem->pPrev;
4677  ItemType* const newItem = m_ItemAllocator.Alloc();
4678  newItem->pPrev = prevItem;
4679  newItem->pNext = pItem;
4680  pItem->pPrev = newItem;
4681  if(prevItem != VMA_NULL)
4682  {
4683  prevItem->pNext = newItem;
4684  }
4685  else
4686  {
4687  VMA_HEAVY_ASSERT(m_pFront == pItem);
4688  m_pFront = newItem;
4689  }
4690  ++m_Count;
4691  return newItem;
4692  }
4693  else
4694  return PushBack();
4695 }
4696 
4697 template<typename T>
4698 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4699 {
4700  if(pItem != VMA_NULL)
4701  {
4702  ItemType* const nextItem = pItem->pNext;
4703  ItemType* const newItem = m_ItemAllocator.Alloc();
4704  newItem->pNext = nextItem;
4705  newItem->pPrev = pItem;
4706  pItem->pNext = newItem;
4707  if(nextItem != VMA_NULL)
4708  {
4709  nextItem->pPrev = newItem;
4710  }
4711  else
4712  {
4713  VMA_HEAVY_ASSERT(m_pBack == pItem);
4714  m_pBack = newItem;
4715  }
4716  ++m_Count;
4717  return newItem;
4718  }
4719  else
4720  return PushFront();
4721 }
4722 
4723 template<typename T>
4724 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4725 {
4726  ItemType* const newItem = InsertBefore(pItem);
4727  newItem->Value = value;
4728  return newItem;
4729 }
4730 
4731 template<typename T>
4732 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4733 {
4734  ItemType* const newItem = InsertAfter(pItem);
4735  newItem->Value = value;
4736  return newItem;
4737 }
4738 
4739 template<typename T, typename AllocatorT>
4740 class VmaList
4741 {
4742  VMA_CLASS_NO_COPY(VmaList)
4743 public:
4744  class iterator
4745  {
4746  public:
4747  iterator() :
4748  m_pList(VMA_NULL),
4749  m_pItem(VMA_NULL)
4750  {
4751  }
4752 
4753  T& operator*() const
4754  {
4755  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4756  return m_pItem->Value;
4757  }
4758  T* operator->() const
4759  {
4760  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4761  return &m_pItem->Value;
4762  }
4763 
4764  iterator& operator++()
4765  {
4766  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4767  m_pItem = m_pItem->pNext;
4768  return *this;
4769  }
4770  iterator& operator--()
4771  {
4772  if(m_pItem != VMA_NULL)
4773  {
4774  m_pItem = m_pItem->pPrev;
4775  }
4776  else
4777  {
4778  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4779  m_pItem = m_pList->Back();
4780  }
4781  return *this;
4782  }
4783 
4784  iterator operator++(int)
4785  {
4786  iterator result = *this;
4787  ++*this;
4788  return result;
4789  }
4790  iterator operator--(int)
4791  {
4792  iterator result = *this;
4793  --*this;
4794  return result;
4795  }
4796 
4797  bool operator==(const iterator& rhs) const
4798  {
4799  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4800  return m_pItem == rhs.m_pItem;
4801  }
4802  bool operator!=(const iterator& rhs) const
4803  {
4804  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4805  return m_pItem != rhs.m_pItem;
4806  }
4807 
4808  private:
4809  VmaRawList<T>* m_pList;
4810  VmaListItem<T>* m_pItem;
4811 
4812  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4813  m_pList(pList),
4814  m_pItem(pItem)
4815  {
4816  }
4817 
4818  friend class VmaList<T, AllocatorT>;
4819  };
4820 
4821  class const_iterator
4822  {
4823  public:
4824  const_iterator() :
4825  m_pList(VMA_NULL),
4826  m_pItem(VMA_NULL)
4827  {
4828  }
4829 
4830  const_iterator(const iterator& src) :
4831  m_pList(src.m_pList),
4832  m_pItem(src.m_pItem)
4833  {
4834  }
4835 
4836  const T& operator*() const
4837  {
4838  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4839  return m_pItem->Value;
4840  }
4841  const T* operator->() const
4842  {
4843  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4844  return &m_pItem->Value;
4845  }
4846 
4847  const_iterator& operator++()
4848  {
4849  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4850  m_pItem = m_pItem->pNext;
4851  return *this;
4852  }
4853  const_iterator& operator--()
4854  {
4855  if(m_pItem != VMA_NULL)
4856  {
4857  m_pItem = m_pItem->pPrev;
4858  }
4859  else
4860  {
4861  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4862  m_pItem = m_pList->Back();
4863  }
4864  return *this;
4865  }
4866 
4867  const_iterator operator++(int)
4868  {
4869  const_iterator result = *this;
4870  ++*this;
4871  return result;
4872  }
4873  const_iterator operator--(int)
4874  {
4875  const_iterator result = *this;
4876  --*this;
4877  return result;
4878  }
4879 
4880  bool operator==(const const_iterator& rhs) const
4881  {
4882  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4883  return m_pItem == rhs.m_pItem;
4884  }
4885  bool operator!=(const const_iterator& rhs) const
4886  {
4887  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4888  return m_pItem != rhs.m_pItem;
4889  }
4890 
4891  private:
4892  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4893  m_pList(pList),
4894  m_pItem(pItem)
4895  {
4896  }
4897 
4898  const VmaRawList<T>* m_pList;
4899  const VmaListItem<T>* m_pItem;
4900 
4901  friend class VmaList<T, AllocatorT>;
4902  };
4903 
4904  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4905 
4906  bool empty() const { return m_RawList.IsEmpty(); }
4907  size_t size() const { return m_RawList.GetCount(); }
4908 
4909  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4910  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4911 
4912  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4913  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4914 
4915  void clear() { m_RawList.Clear(); }
4916  void push_back(const T& value) { m_RawList.PushBack(value); }
4917  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4918  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4919 
4920 private:
4921  VmaRawList<T> m_RawList;
4922 };
4923 
4924 #endif // #if VMA_USE_STL_LIST
4925 
4927 // class VmaMap
4928 
4929 // Unused in this version.
4930 #if 0
4931 
4932 #if VMA_USE_STL_UNORDERED_MAP
4933 
4934 #define VmaPair std::pair
4935 
4936 #define VMA_MAP_TYPE(KeyT, ValueT) \
4937  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4938 
4939 #else // #if VMA_USE_STL_UNORDERED_MAP
4940 
4941 template<typename T1, typename T2>
4942 struct VmaPair
4943 {
4944  T1 first;
4945  T2 second;
4946 
4947  VmaPair() : first(), second() { }
4948  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4949 };
4950 
4951 /* Class compatible with subset of interface of std::unordered_map.
4952 KeyT, ValueT must be POD because they will be stored in VmaVector.
4953 */
4954 template<typename KeyT, typename ValueT>
4955 class VmaMap
4956 {
4957 public:
4958  typedef VmaPair<KeyT, ValueT> PairType;
4959  typedef PairType* iterator;
4960 
4961  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4962 
4963  iterator begin() { return m_Vector.begin(); }
4964  iterator end() { return m_Vector.end(); }
4965 
4966  void insert(const PairType& pair);
4967  iterator find(const KeyT& key);
4968  void erase(iterator it);
4969 
4970 private:
4971  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4972 };
4973 
4974 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4975 
4976 template<typename FirstT, typename SecondT>
4977 struct VmaPairFirstLess
4978 {
4979  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4980  {
4981  return lhs.first < rhs.first;
4982  }
4983  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4984  {
4985  return lhs.first < rhsFirst;
4986  }
4987 };
4988 
4989 template<typename KeyT, typename ValueT>
4990 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4991 {
4992  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4993  m_Vector.data(),
4994  m_Vector.data() + m_Vector.size(),
4995  pair,
4996  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4997  VmaVectorInsert(m_Vector, indexToInsert, pair);
4998 }
4999 
5000 template<typename KeyT, typename ValueT>
5001 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
5002 {
5003  PairType* it = VmaBinaryFindFirstNotLess(
5004  m_Vector.data(),
5005  m_Vector.data() + m_Vector.size(),
5006  key,
5007  VmaPairFirstLess<KeyT, ValueT>());
5008  if((it != m_Vector.end()) && (it->first == key))
5009  {
5010  return it;
5011  }
5012  else
5013  {
5014  return m_Vector.end();
5015  }
5016 }
5017 
5018 template<typename KeyT, typename ValueT>
5019 void VmaMap<KeyT, ValueT>::erase(iterator it)
5020 {
5021  VmaVectorRemove(m_Vector, it - m_Vector.begin());
5022 }
5023 
5024 #endif // #if VMA_USE_STL_UNORDERED_MAP
5025 
5026 #endif // #if 0
5027 
5029 
5030 class VmaDeviceMemoryBlock;
5031 
5032 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
5033 
5034 struct VmaAllocation_T
5035 {
5036 private:
5037  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
5038 
5039  enum FLAGS
5040  {
5041  FLAG_USER_DATA_STRING = 0x01,
5042  };
5043 
5044 public:
5045  enum ALLOCATION_TYPE
5046  {
5047  ALLOCATION_TYPE_NONE,
5048  ALLOCATION_TYPE_BLOCK,
5049  ALLOCATION_TYPE_DEDICATED,
5050  };
5051 
5052  /*
5053  This struct cannot have constructor or destructor. It must be POD because it is
5054  allocated using VmaPoolAllocator.
5055  */
5056 
5057  void Ctor(uint32_t currentFrameIndex, bool userDataString)
5058  {
5059  m_Alignment = 1;
5060  m_Size = 0;
5061  m_pUserData = VMA_NULL;
5062  m_LastUseFrameIndex = currentFrameIndex;
5063  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
5064  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
5065  m_MapCount = 0;
5066  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
5067 
5068 #if VMA_STATS_STRING_ENABLED
5069  m_CreationFrameIndex = currentFrameIndex;
5070  m_BufferImageUsage = 0;
5071 #endif
5072  }
5073 
5074  void Dtor()
5075  {
5076  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
5077 
5078  // Check if owned string was freed.
5079  VMA_ASSERT(m_pUserData == VMA_NULL);
5080  }
5081 
5082  void InitBlockAllocation(
5083  VmaDeviceMemoryBlock* block,
5084  VkDeviceSize offset,
5085  VkDeviceSize alignment,
5086  VkDeviceSize size,
5087  VmaSuballocationType suballocationType,
5088  bool mapped,
5089  bool canBecomeLost)
5090  {
5091  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5092  VMA_ASSERT(block != VMA_NULL);
5093  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5094  m_Alignment = alignment;
5095  m_Size = size;
5096  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5097  m_SuballocationType = (uint8_t)suballocationType;
5098  m_BlockAllocation.m_Block = block;
5099  m_BlockAllocation.m_Offset = offset;
5100  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
5101  }
5102 
5103  void InitLost()
5104  {
5105  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5106  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
5107  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5108  m_BlockAllocation.m_Block = VMA_NULL;
5109  m_BlockAllocation.m_Offset = 0;
5110  m_BlockAllocation.m_CanBecomeLost = true;
5111  }
5112 
5113  void ChangeBlockAllocation(
5114  VmaAllocator hAllocator,
5115  VmaDeviceMemoryBlock* block,
5116  VkDeviceSize offset);
5117 
5118  void ChangeOffset(VkDeviceSize newOffset);
5119 
5120  // pMappedData not null means allocation is created with MAPPED flag.
5121  void InitDedicatedAllocation(
5122  uint32_t memoryTypeIndex,
5123  VkDeviceMemory hMemory,
5124  VmaSuballocationType suballocationType,
5125  void* pMappedData,
5126  VkDeviceSize size)
5127  {
5128  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5129  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
5130  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
5131  m_Alignment = 0;
5132  m_Size = size;
5133  m_SuballocationType = (uint8_t)suballocationType;
5134  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5135  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
5136  m_DedicatedAllocation.m_hMemory = hMemory;
5137  m_DedicatedAllocation.m_pMappedData = pMappedData;
5138  }
5139 
5140  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5141  VkDeviceSize GetAlignment() const { return m_Alignment; }
5142  VkDeviceSize GetSize() const { return m_Size; }
5143  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5144  void* GetUserData() const { return m_pUserData; }
5145  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5146  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5147 
5148  VmaDeviceMemoryBlock* GetBlock() const
5149  {
5150  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5151  return m_BlockAllocation.m_Block;
5152  }
5153  VkDeviceSize GetOffset() const;
5154  VkDeviceMemory GetMemory() const;
5155  uint32_t GetMemoryTypeIndex() const;
5156  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5157  void* GetMappedData() const;
5158  bool CanBecomeLost() const;
5159 
5160  uint32_t GetLastUseFrameIndex() const
5161  {
5162  return m_LastUseFrameIndex.load();
5163  }
5164  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5165  {
5166  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5167  }
5168  /*
5169  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5170  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5171  - Else, returns false.
5172 
5173  If hAllocation is already lost, assert - you should not call it then.
5174  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5175  */
5176  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5177 
5178  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5179  {
5180  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5181  outInfo.blockCount = 1;
5182  outInfo.allocationCount = 1;
5183  outInfo.unusedRangeCount = 0;
5184  outInfo.usedBytes = m_Size;
5185  outInfo.unusedBytes = 0;
5186  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5187  outInfo.unusedRangeSizeMin = UINT64_MAX;
5188  outInfo.unusedRangeSizeMax = 0;
5189  }
5190 
5191  void BlockAllocMap();
5192  void BlockAllocUnmap();
5193  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5194  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5195 
5196 #if VMA_STATS_STRING_ENABLED
5197  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5198  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5199 
5200  void InitBufferImageUsage(uint32_t bufferImageUsage)
5201  {
5202  VMA_ASSERT(m_BufferImageUsage == 0);
5203  m_BufferImageUsage = bufferImageUsage;
5204  }
5205 
5206  void PrintParameters(class VmaJsonWriter& json) const;
5207 #endif
5208 
5209 private:
5210  VkDeviceSize m_Alignment;
5211  VkDeviceSize m_Size;
5212  void* m_pUserData;
5213  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5214  uint8_t m_Type; // ALLOCATION_TYPE
5215  uint8_t m_SuballocationType; // VmaSuballocationType
5216  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5217  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5218  uint8_t m_MapCount;
5219  uint8_t m_Flags; // enum FLAGS
5220 
5221  // Allocation out of VmaDeviceMemoryBlock.
5222  struct BlockAllocation
5223  {
5224  VmaDeviceMemoryBlock* m_Block;
5225  VkDeviceSize m_Offset;
5226  bool m_CanBecomeLost;
5227  };
5228 
5229  // Allocation for an object that has its own private VkDeviceMemory.
5230  struct DedicatedAllocation
5231  {
5232  uint32_t m_MemoryTypeIndex;
5233  VkDeviceMemory m_hMemory;
5234  void* m_pMappedData; // Not null means memory is mapped.
5235  };
5236 
5237  union
5238  {
5239  // Allocation out of VmaDeviceMemoryBlock.
5240  BlockAllocation m_BlockAllocation;
5241  // Allocation for an object that has its own private VkDeviceMemory.
5242  DedicatedAllocation m_DedicatedAllocation;
5243  };
5244 
5245 #if VMA_STATS_STRING_ENABLED
5246  uint32_t m_CreationFrameIndex;
5247  uint32_t m_BufferImageUsage; // 0 if unknown.
5248 #endif
5249 
5250  void FreeUserDataString(VmaAllocator hAllocator);
5251 };
5252 
5253 /*
5254 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5255 allocated memory block or free.
5256 */
5257 struct VmaSuballocation
5258 {
5259  VkDeviceSize offset;
5260  VkDeviceSize size;
5261  VmaAllocation hAllocation;
5262  VmaSuballocationType type;
5263 };
5264 
5265 // Comparator for offsets.
5266 struct VmaSuballocationOffsetLess
5267 {
5268  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5269  {
5270  return lhs.offset < rhs.offset;
5271  }
5272 };
5273 struct VmaSuballocationOffsetGreater
5274 {
5275  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5276  {
5277  return lhs.offset > rhs.offset;
5278  }
5279 };
5280 
5281 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5282 
5283 // Cost of one additional allocation lost, as equivalent in bytes.
5284 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5285 
5286 enum class VmaAllocationRequestType
5287 {
5288  Normal,
5289  // Used by "Linear" algorithm.
5290  UpperAddress,
5291  EndOf1st,
5292  EndOf2nd,
5293 };
5294 
5295 /*
5296 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5297 
5298 If canMakeOtherLost was false:
5299 - item points to a FREE suballocation.
5300 - itemsToMakeLostCount is 0.
5301 
5302 If canMakeOtherLost was true:
5303 - item points to first of sequence of suballocations, which are either FREE,
5304  or point to VmaAllocations that can become lost.
5305 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5306  the requested allocation to succeed.
5307 */
5308 struct VmaAllocationRequest
5309 {
5310  VkDeviceSize offset;
5311  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5312  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5313  VmaSuballocationList::iterator item;
5314  size_t itemsToMakeLostCount;
5315  void* customData;
5316  VmaAllocationRequestType type;
5317 
5318  VkDeviceSize CalcCost() const
5319  {
5320  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5321  }
5322 };
5323 
5324 /*
5325 Data structure used for bookkeeping of allocations and unused ranges of memory
5326 in a single VkDeviceMemory block.
5327 */
5328 class VmaBlockMetadata
5329 {
5330 public:
5331  VmaBlockMetadata(VmaAllocator hAllocator);
5332  virtual ~VmaBlockMetadata() { }
5333  virtual void Init(VkDeviceSize size) { m_Size = size; }
5334 
5335  // Validates all data structures inside this object. If not valid, returns false.
5336  virtual bool Validate() const = 0;
5337  VkDeviceSize GetSize() const { return m_Size; }
5338  virtual size_t GetAllocationCount() const = 0;
5339  virtual VkDeviceSize GetSumFreeSize() const = 0;
5340  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5341  // Returns true if this block is empty - contains only single free suballocation.
5342  virtual bool IsEmpty() const = 0;
5343 
5344  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5345  // Shouldn't modify blockCount.
5346  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5347 
5348 #if VMA_STATS_STRING_ENABLED
5349  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5350 #endif
5351 
5352  // Tries to find a place for suballocation with given parameters inside this block.
5353  // If succeeded, fills pAllocationRequest and returns true.
5354  // If failed, returns false.
5355  virtual bool CreateAllocationRequest(
5356  uint32_t currentFrameIndex,
5357  uint32_t frameInUseCount,
5358  VkDeviceSize bufferImageGranularity,
5359  VkDeviceSize allocSize,
5360  VkDeviceSize allocAlignment,
5361  bool upperAddress,
5362  VmaSuballocationType allocType,
5363  bool canMakeOtherLost,
5364  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5365  uint32_t strategy,
5366  VmaAllocationRequest* pAllocationRequest) = 0;
5367 
5368  virtual bool MakeRequestedAllocationsLost(
5369  uint32_t currentFrameIndex,
5370  uint32_t frameInUseCount,
5371  VmaAllocationRequest* pAllocationRequest) = 0;
5372 
5373  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5374 
5375  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5376 
5377  // Makes actual allocation based on request. Request must already be checked and valid.
5378  virtual void Alloc(
5379  const VmaAllocationRequest& request,
5380  VmaSuballocationType type,
5381  VkDeviceSize allocSize,
5382  VmaAllocation hAllocation) = 0;
5383 
5384  // Frees suballocation assigned to given memory region.
5385  virtual void Free(const VmaAllocation allocation) = 0;
5386  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5387 
5388 protected:
5389  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5390 
5391 #if VMA_STATS_STRING_ENABLED
5392  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5393  VkDeviceSize unusedBytes,
5394  size_t allocationCount,
5395  size_t unusedRangeCount) const;
5396  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5397  VkDeviceSize offset,
5398  VmaAllocation hAllocation) const;
5399  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5400  VkDeviceSize offset,
5401  VkDeviceSize size) const;
5402  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5403 #endif
5404 
5405 private:
5406  VkDeviceSize m_Size;
5407  const VkAllocationCallbacks* m_pAllocationCallbacks;
5408 };
5409 
5410 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5411  VMA_ASSERT(0 && "Validation failed: " #cond); \
5412  return false; \
5413  } } while(false)
5414 
5415 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5416 {
5417  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5418 public:
5419  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5420  virtual ~VmaBlockMetadata_Generic();
5421  virtual void Init(VkDeviceSize size);
5422 
5423  virtual bool Validate() const;
5424  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5425  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5426  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5427  virtual bool IsEmpty() const;
5428 
5429  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5430  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5431 
5432 #if VMA_STATS_STRING_ENABLED
5433  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5434 #endif
5435 
5436  virtual bool CreateAllocationRequest(
5437  uint32_t currentFrameIndex,
5438  uint32_t frameInUseCount,
5439  VkDeviceSize bufferImageGranularity,
5440  VkDeviceSize allocSize,
5441  VkDeviceSize allocAlignment,
5442  bool upperAddress,
5443  VmaSuballocationType allocType,
5444  bool canMakeOtherLost,
5445  uint32_t strategy,
5446  VmaAllocationRequest* pAllocationRequest);
5447 
5448  virtual bool MakeRequestedAllocationsLost(
5449  uint32_t currentFrameIndex,
5450  uint32_t frameInUseCount,
5451  VmaAllocationRequest* pAllocationRequest);
5452 
5453  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5454 
5455  virtual VkResult CheckCorruption(const void* pBlockData);
5456 
5457  virtual void Alloc(
5458  const VmaAllocationRequest& request,
5459  VmaSuballocationType type,
5460  VkDeviceSize allocSize,
5461  VmaAllocation hAllocation);
5462 
5463  virtual void Free(const VmaAllocation allocation);
5464  virtual void FreeAtOffset(VkDeviceSize offset);
5465 
5467  // For defragmentation
5468 
5469  bool IsBufferImageGranularityConflictPossible(
5470  VkDeviceSize bufferImageGranularity,
5471  VmaSuballocationType& inOutPrevSuballocType) const;
5472 
5473 private:
5474  friend class VmaDefragmentationAlgorithm_Generic;
5475  friend class VmaDefragmentationAlgorithm_Fast;
5476 
5477  uint32_t m_FreeCount;
5478  VkDeviceSize m_SumFreeSize;
5479  VmaSuballocationList m_Suballocations;
5480  // Suballocations that are free and have size greater than certain threshold.
5481  // Sorted by size, ascending.
5482  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5483 
5484  bool ValidateFreeSuballocationList() const;
5485 
5486  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5487  // If yes, fills pOffset and returns true. If no, returns false.
5488  bool CheckAllocation(
5489  uint32_t currentFrameIndex,
5490  uint32_t frameInUseCount,
5491  VkDeviceSize bufferImageGranularity,
5492  VkDeviceSize allocSize,
5493  VkDeviceSize allocAlignment,
5494  VmaSuballocationType allocType,
5495  VmaSuballocationList::const_iterator suballocItem,
5496  bool canMakeOtherLost,
5497  VkDeviceSize* pOffset,
5498  size_t* itemsToMakeLostCount,
5499  VkDeviceSize* pSumFreeSize,
5500  VkDeviceSize* pSumItemSize) const;
5501  // Given free suballocation, it merges it with following one, which must also be free.
5502  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5503  // Releases given suballocation, making it free.
5504  // Merges it with adjacent free suballocations if applicable.
5505  // Returns iterator to new free suballocation at this place.
5506  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5507  // Given free suballocation, it inserts it into sorted list of
5508  // m_FreeSuballocationsBySize if it's suitable.
5509  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5510  // Given free suballocation, it removes it from sorted list of
5511  // m_FreeSuballocationsBySize if it's suitable.
5512  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5513 };
5514 
5515 /*
5516 Allocations and their references in internal data structure look like this:
5517 
5518 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5519 
5520  0 +-------+
5521  | |
5522  | |
5523  | |
5524  +-------+
5525  | Alloc | 1st[m_1stNullItemsBeginCount]
5526  +-------+
5527  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5528  +-------+
5529  | ... |
5530  +-------+
5531  | Alloc | 1st[1st.size() - 1]
5532  +-------+
5533  | |
5534  | |
5535  | |
5536 GetSize() +-------+
5537 
5538 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5539 
5540  0 +-------+
5541  | Alloc | 2nd[0]
5542  +-------+
5543  | Alloc | 2nd[1]
5544  +-------+
5545  | ... |
5546  +-------+
5547  | Alloc | 2nd[2nd.size() - 1]
5548  +-------+
5549  | |
5550  | |
5551  | |
5552  +-------+
5553  | Alloc | 1st[m_1stNullItemsBeginCount]
5554  +-------+
5555  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5556  +-------+
5557  | ... |
5558  +-------+
5559  | Alloc | 1st[1st.size() - 1]
5560  +-------+
5561  | |
5562 GetSize() +-------+
5563 
5564 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5565 
5566  0 +-------+
5567  | |
5568  | |
5569  | |
5570  +-------+
5571  | Alloc | 1st[m_1stNullItemsBeginCount]
5572  +-------+
5573  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5574  +-------+
5575  | ... |
5576  +-------+
5577  | Alloc | 1st[1st.size() - 1]
5578  +-------+
5579  | |
5580  | |
5581  | |
5582  +-------+
5583  | Alloc | 2nd[2nd.size() - 1]
5584  +-------+
5585  | ... |
5586  +-------+
5587  | Alloc | 2nd[1]
5588  +-------+
5589  | Alloc | 2nd[0]
5590 GetSize() +-------+
5591 
5592 */
5593 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5594 {
5595  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5596 public:
5597  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5598  virtual ~VmaBlockMetadata_Linear();
5599  virtual void Init(VkDeviceSize size);
5600 
5601  virtual bool Validate() const;
5602  virtual size_t GetAllocationCount() const;
5603  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5604  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5605  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5606 
5607  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5608  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5609 
5610 #if VMA_STATS_STRING_ENABLED
5611  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5612 #endif
5613 
5614  virtual bool CreateAllocationRequest(
5615  uint32_t currentFrameIndex,
5616  uint32_t frameInUseCount,
5617  VkDeviceSize bufferImageGranularity,
5618  VkDeviceSize allocSize,
5619  VkDeviceSize allocAlignment,
5620  bool upperAddress,
5621  VmaSuballocationType allocType,
5622  bool canMakeOtherLost,
5623  uint32_t strategy,
5624  VmaAllocationRequest* pAllocationRequest);
5625 
5626  virtual bool MakeRequestedAllocationsLost(
5627  uint32_t currentFrameIndex,
5628  uint32_t frameInUseCount,
5629  VmaAllocationRequest* pAllocationRequest);
5630 
5631  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5632 
5633  virtual VkResult CheckCorruption(const void* pBlockData);
5634 
5635  virtual void Alloc(
5636  const VmaAllocationRequest& request,
5637  VmaSuballocationType type,
5638  VkDeviceSize allocSize,
5639  VmaAllocation hAllocation);
5640 
5641  virtual void Free(const VmaAllocation allocation);
5642  virtual void FreeAtOffset(VkDeviceSize offset);
5643 
5644 private:
5645  /*
5646  There are two suballocation vectors, used in ping-pong way.
5647  The one with index m_1stVectorIndex is called 1st.
5648  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5649  2nd can be non-empty only when 1st is not empty.
5650  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5651  */
5652  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5653 
5654  enum SECOND_VECTOR_MODE
5655  {
5656  SECOND_VECTOR_EMPTY,
5657  /*
5658  Suballocations in 2nd vector are created later than the ones in 1st, but they
5659  all have smaller offset.
5660  */
5661  SECOND_VECTOR_RING_BUFFER,
5662  /*
5663  Suballocations in 2nd vector are upper side of double stack.
5664  They all have offsets higher than those in 1st vector.
5665  Top of this stack means smaller offsets, but higher indices in this vector.
5666  */
5667  SECOND_VECTOR_DOUBLE_STACK,
5668  };
5669 
5670  VkDeviceSize m_SumFreeSize;
5671  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5672  uint32_t m_1stVectorIndex;
5673  SECOND_VECTOR_MODE m_2ndVectorMode;
5674 
5675  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5676  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5677  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5678  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5679 
5680  // Number of items in 1st vector with hAllocation = null at the beginning.
5681  size_t m_1stNullItemsBeginCount;
5682  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5683  size_t m_1stNullItemsMiddleCount;
5684  // Number of items in 2nd vector with hAllocation = null.
5685  size_t m_2ndNullItemsCount;
5686 
5687  bool ShouldCompact1st() const;
5688  void CleanupAfterFree();
5689 
5690  bool CreateAllocationRequest_LowerAddress(
5691  uint32_t currentFrameIndex,
5692  uint32_t frameInUseCount,
5693  VkDeviceSize bufferImageGranularity,
5694  VkDeviceSize allocSize,
5695  VkDeviceSize allocAlignment,
5696  VmaSuballocationType allocType,
5697  bool canMakeOtherLost,
5698  uint32_t strategy,
5699  VmaAllocationRequest* pAllocationRequest);
5700  bool CreateAllocationRequest_UpperAddress(
5701  uint32_t currentFrameIndex,
5702  uint32_t frameInUseCount,
5703  VkDeviceSize bufferImageGranularity,
5704  VkDeviceSize allocSize,
5705  VkDeviceSize allocAlignment,
5706  VmaSuballocationType allocType,
5707  bool canMakeOtherLost,
5708  uint32_t strategy,
5709  VmaAllocationRequest* pAllocationRequest);
5710 };
5711 
5712 /*
5713 - GetSize() is the original size of allocated memory block.
5714 - m_UsableSize is this size aligned down to a power of two.
5715  All allocations and calculations happen relative to m_UsableSize.
5716 - GetUnusableSize() is the difference between them.
5717  It is repoted as separate, unused range, not available for allocations.
5718 
5719 Node at level 0 has size = m_UsableSize.
5720 Each next level contains nodes with size 2 times smaller than current level.
5721 m_LevelCount is the maximum number of levels to use in the current object.
5722 */
5723 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5724 {
5725  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5726 public:
5727  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5728  virtual ~VmaBlockMetadata_Buddy();
5729  virtual void Init(VkDeviceSize size);
5730 
5731  virtual bool Validate() const;
5732  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5733  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5734  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5735  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5736 
5737  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5738  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5739 
5740 #if VMA_STATS_STRING_ENABLED
5741  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5742 #endif
5743 
5744  virtual bool CreateAllocationRequest(
5745  uint32_t currentFrameIndex,
5746  uint32_t frameInUseCount,
5747  VkDeviceSize bufferImageGranularity,
5748  VkDeviceSize allocSize,
5749  VkDeviceSize allocAlignment,
5750  bool upperAddress,
5751  VmaSuballocationType allocType,
5752  bool canMakeOtherLost,
5753  uint32_t strategy,
5754  VmaAllocationRequest* pAllocationRequest);
5755 
5756  virtual bool MakeRequestedAllocationsLost(
5757  uint32_t currentFrameIndex,
5758  uint32_t frameInUseCount,
5759  VmaAllocationRequest* pAllocationRequest);
5760 
5761  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5762 
5763  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5764 
5765  virtual void Alloc(
5766  const VmaAllocationRequest& request,
5767  VmaSuballocationType type,
5768  VkDeviceSize allocSize,
5769  VmaAllocation hAllocation);
5770 
5771  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5772  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5773 
5774 private:
5775  static const VkDeviceSize MIN_NODE_SIZE = 32;
5776  static const size_t MAX_LEVELS = 30;
5777 
5778  struct ValidationContext
5779  {
5780  size_t calculatedAllocationCount;
5781  size_t calculatedFreeCount;
5782  VkDeviceSize calculatedSumFreeSize;
5783 
5784  ValidationContext() :
5785  calculatedAllocationCount(0),
5786  calculatedFreeCount(0),
5787  calculatedSumFreeSize(0) { }
5788  };
5789 
5790  struct Node
5791  {
5792  VkDeviceSize offset;
5793  enum TYPE
5794  {
5795  TYPE_FREE,
5796  TYPE_ALLOCATION,
5797  TYPE_SPLIT,
5798  TYPE_COUNT
5799  } type;
5800  Node* parent;
5801  Node* buddy;
5802 
5803  union
5804  {
5805  struct
5806  {
5807  Node* prev;
5808  Node* next;
5809  } free;
5810  struct
5811  {
5812  VmaAllocation alloc;
5813  } allocation;
5814  struct
5815  {
5816  Node* leftChild;
5817  } split;
5818  };
5819  };
5820 
5821  // Size of the memory block aligned down to a power of two.
5822  VkDeviceSize m_UsableSize;
5823  uint32_t m_LevelCount;
5824 
5825  Node* m_Root;
5826  struct {
5827  Node* front;
5828  Node* back;
5829  } m_FreeList[MAX_LEVELS];
5830  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5831  size_t m_AllocationCount;
5832  // Number of nodes in the tree with type == TYPE_FREE.
5833  size_t m_FreeCount;
5834  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5835  VkDeviceSize m_SumFreeSize;
5836 
5837  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5838  void DeleteNode(Node* node);
5839  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5840  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5841  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5842  // Alloc passed just for validation. Can be null.
5843  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5844  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5845  // Adds node to the front of FreeList at given level.
5846  // node->type must be FREE.
5847  // node->free.prev, next can be undefined.
5848  void AddToFreeListFront(uint32_t level, Node* node);
5849  // Removes node from FreeList at given level.
5850  // node->type must be FREE.
5851  // node->free.prev, next stay untouched.
5852  void RemoveFromFreeList(uint32_t level, Node* node);
5853 
5854 #if VMA_STATS_STRING_ENABLED
5855  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5856 #endif
5857 };
5858 
5859 /*
5860 Represents a single block of device memory (`VkDeviceMemory`) with all the
5861 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5862 
5863 Thread-safety: This class must be externally synchronized.
5864 */
5865 class VmaDeviceMemoryBlock
5866 {
5867  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5868 public:
5869  VmaBlockMetadata* m_pMetadata;
5870 
5871  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5872 
5873  ~VmaDeviceMemoryBlock()
5874  {
5875  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5876  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5877  }
5878 
5879  // Always call after construction.
5880  void Init(
5881  VmaAllocator hAllocator,
5882  VmaPool hParentPool,
5883  uint32_t newMemoryTypeIndex,
5884  VkDeviceMemory newMemory,
5885  VkDeviceSize newSize,
5886  uint32_t id,
5887  uint32_t algorithm);
5888  // Always call before destruction.
5889  void Destroy(VmaAllocator allocator);
5890 
5891  VmaPool GetParentPool() const { return m_hParentPool; }
5892  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5893  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5894  uint32_t GetId() const { return m_Id; }
5895  void* GetMappedData() const { return m_pMappedData; }
5896 
5897  // Validates all data structures inside this object. If not valid, returns false.
5898  bool Validate() const;
5899 
5900  VkResult CheckCorruption(VmaAllocator hAllocator);
5901 
5902  // ppData can be null.
5903  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5904  void Unmap(VmaAllocator hAllocator, uint32_t count);
5905 
5906  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5907  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5908 
5909  VkResult BindBufferMemory(
5910  const VmaAllocator hAllocator,
5911  const VmaAllocation hAllocation,
5912  VkDeviceSize allocationLocalOffset,
5913  VkBuffer hBuffer,
5914  const void* pNext);
5915  VkResult BindImageMemory(
5916  const VmaAllocator hAllocator,
5917  const VmaAllocation hAllocation,
5918  VkDeviceSize allocationLocalOffset,
5919  VkImage hImage,
5920  const void* pNext);
5921 
5922 private:
5923  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
5924  uint32_t m_MemoryTypeIndex;
5925  uint32_t m_Id;
5926  VkDeviceMemory m_hMemory;
5927 
5928  /*
5929  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5930  Also protects m_MapCount, m_pMappedData.
5931  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5932  */
5933  VMA_MUTEX m_Mutex;
5934  uint32_t m_MapCount;
5935  void* m_pMappedData;
5936 };
5937 
5938 struct VmaPointerLess
5939 {
5940  bool operator()(const void* lhs, const void* rhs) const
5941  {
5942  return lhs < rhs;
5943  }
5944 };
5945 
5946 struct VmaDefragmentationMove
5947 {
5948  size_t srcBlockIndex;
5949  size_t dstBlockIndex;
5950  VkDeviceSize srcOffset;
5951  VkDeviceSize dstOffset;
5952  VkDeviceSize size;
5953 };
5954 
5955 class VmaDefragmentationAlgorithm;
5956 
5957 /*
5958 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5959 Vulkan memory type.
5960 
5961 Synchronized internally with a mutex.
5962 */
5963 struct VmaBlockVector
5964 {
5965  VMA_CLASS_NO_COPY(VmaBlockVector)
5966 public:
5967  VmaBlockVector(
5968  VmaAllocator hAllocator,
5969  VmaPool hParentPool,
5970  uint32_t memoryTypeIndex,
5971  VkDeviceSize preferredBlockSize,
5972  size_t minBlockCount,
5973  size_t maxBlockCount,
5974  VkDeviceSize bufferImageGranularity,
5975  uint32_t frameInUseCount,
5976  bool isCustomPool,
5977  bool explicitBlockSize,
5978  uint32_t algorithm);
5979  ~VmaBlockVector();
5980 
5981  VkResult CreateMinBlocks();
5982 
5983  VmaPool GetParentPool() const { return m_hParentPool; }
5984  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5985  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5986  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5987  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5988  uint32_t GetAlgorithm() const { return m_Algorithm; }
5989 
5990  void GetPoolStats(VmaPoolStats* pStats);
5991 
5992  bool IsEmpty() const { return m_Blocks.empty(); }
5993  bool IsCorruptionDetectionEnabled() const;
5994 
5995  VkResult Allocate(
5996  uint32_t currentFrameIndex,
5997  VkDeviceSize size,
5998  VkDeviceSize alignment,
5999  const VmaAllocationCreateInfo& createInfo,
6000  VmaSuballocationType suballocType,
6001  size_t allocationCount,
6002  VmaAllocation* pAllocations);
6003 
6004  void Free(
6005  VmaAllocation hAllocation);
6006 
6007  // Adds statistics of this BlockVector to pStats.
6008  void AddStats(VmaStats* pStats);
6009 
6010 #if VMA_STATS_STRING_ENABLED
6011  void PrintDetailedMap(class VmaJsonWriter& json);
6012 #endif
6013 
6014  void MakePoolAllocationsLost(
6015  uint32_t currentFrameIndex,
6016  size_t* pLostAllocationCount);
6017  VkResult CheckCorruption();
6018 
6019  // Saves results in pCtx->res.
6020  void Defragment(
6021  class VmaBlockVectorDefragmentationContext* pCtx,
6022  VmaDefragmentationStats* pStats,
6023  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
6024  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
6025  VkCommandBuffer commandBuffer);
6026  void DefragmentationEnd(
6027  class VmaBlockVectorDefragmentationContext* pCtx,
6028  VmaDefragmentationStats* pStats);
6029 
6031  // To be used only while the m_Mutex is locked. Used during defragmentation.
6032 
6033  size_t GetBlockCount() const { return m_Blocks.size(); }
6034  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
6035  size_t CalcAllocationCount() const;
6036  bool IsBufferImageGranularityConflictPossible() const;
6037 
6038 private:
6039  friend class VmaDefragmentationAlgorithm_Generic;
6040 
6041  const VmaAllocator m_hAllocator;
6042  const VmaPool m_hParentPool;
6043  const uint32_t m_MemoryTypeIndex;
6044  const VkDeviceSize m_PreferredBlockSize;
6045  const size_t m_MinBlockCount;
6046  const size_t m_MaxBlockCount;
6047  const VkDeviceSize m_BufferImageGranularity;
6048  const uint32_t m_FrameInUseCount;
6049  const bool m_IsCustomPool;
6050  const bool m_ExplicitBlockSize;
6051  const uint32_t m_Algorithm;
6052  /* There can be at most one allocation that is completely empty - a
6053  hysteresis to avoid pessimistic case of alternating creation and destruction
6054  of a VkDeviceMemory. */
6055  bool m_HasEmptyBlock;
6056  VMA_RW_MUTEX m_Mutex;
6057  // Incrementally sorted by sumFreeSize, ascending.
6058  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
6059  uint32_t m_NextBlockId;
6060 
6061  VkDeviceSize CalcMaxBlockSize() const;
6062 
6063  // Finds and removes given block from vector.
6064  void Remove(VmaDeviceMemoryBlock* pBlock);
6065 
6066  // Performs single step in sorting m_Blocks. They may not be fully sorted
6067  // after this call.
6068  void IncrementallySortBlocks();
6069 
6070  VkResult AllocatePage(
6071  uint32_t currentFrameIndex,
6072  VkDeviceSize size,
6073  VkDeviceSize alignment,
6074  const VmaAllocationCreateInfo& createInfo,
6075  VmaSuballocationType suballocType,
6076  VmaAllocation* pAllocation);
6077 
6078  // To be used only without CAN_MAKE_OTHER_LOST flag.
6079  VkResult AllocateFromBlock(
6080  VmaDeviceMemoryBlock* pBlock,
6081  uint32_t currentFrameIndex,
6082  VkDeviceSize size,
6083  VkDeviceSize alignment,
6084  VmaAllocationCreateFlags allocFlags,
6085  void* pUserData,
6086  VmaSuballocationType suballocType,
6087  uint32_t strategy,
6088  VmaAllocation* pAllocation);
6089 
6090  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
6091 
6092  // Saves result to pCtx->res.
6093  void ApplyDefragmentationMovesCpu(
6094  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6095  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
6096  // Saves result to pCtx->res.
6097  void ApplyDefragmentationMovesGpu(
6098  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6099  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6100  VkCommandBuffer commandBuffer);
6101 
6102  /*
6103  Used during defragmentation. pDefragmentationStats is optional. It's in/out
6104  - updated with new data.
6105  */
6106  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
6107 };
6108 
6109 struct VmaPool_T
6110 {
6111  VMA_CLASS_NO_COPY(VmaPool_T)
6112 public:
6113  VmaBlockVector m_BlockVector;
6114 
6115  VmaPool_T(
6116  VmaAllocator hAllocator,
6117  const VmaPoolCreateInfo& createInfo,
6118  VkDeviceSize preferredBlockSize);
6119  ~VmaPool_T();
6120 
6121  uint32_t GetId() const { return m_Id; }
6122  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
6123 
6124 #if VMA_STATS_STRING_ENABLED
6125  //void PrintDetailedMap(class VmaStringBuilder& sb);
6126 #endif
6127 
6128 private:
6129  uint32_t m_Id;
6130 };
6131 
6132 /*
6133 Performs defragmentation:
6134 
6135 - Updates `pBlockVector->m_pMetadata`.
6136 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
6137 - Does not move actual data, only returns requested moves as `moves`.
6138 */
6139 class VmaDefragmentationAlgorithm
6140 {
6141  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
6142 public:
6143  VmaDefragmentationAlgorithm(
6144  VmaAllocator hAllocator,
6145  VmaBlockVector* pBlockVector,
6146  uint32_t currentFrameIndex) :
6147  m_hAllocator(hAllocator),
6148  m_pBlockVector(pBlockVector),
6149  m_CurrentFrameIndex(currentFrameIndex)
6150  {
6151  }
6152  virtual ~VmaDefragmentationAlgorithm()
6153  {
6154  }
6155 
6156  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
6157  virtual void AddAll() = 0;
6158 
6159  virtual VkResult Defragment(
6160  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6161  VkDeviceSize maxBytesToMove,
6162  uint32_t maxAllocationsToMove) = 0;
6163 
6164  virtual VkDeviceSize GetBytesMoved() const = 0;
6165  virtual uint32_t GetAllocationsMoved() const = 0;
6166 
6167 protected:
6168  VmaAllocator const m_hAllocator;
6169  VmaBlockVector* const m_pBlockVector;
6170  const uint32_t m_CurrentFrameIndex;
6171 
6172  struct AllocationInfo
6173  {
6174  VmaAllocation m_hAllocation;
6175  VkBool32* m_pChanged;
6176 
6177  AllocationInfo() :
6178  m_hAllocation(VK_NULL_HANDLE),
6179  m_pChanged(VMA_NULL)
6180  {
6181  }
6182  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6183  m_hAllocation(hAlloc),
6184  m_pChanged(pChanged)
6185  {
6186  }
6187  };
6188 };
6189 
6190 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6191 {
6192  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6193 public:
6194  VmaDefragmentationAlgorithm_Generic(
6195  VmaAllocator hAllocator,
6196  VmaBlockVector* pBlockVector,
6197  uint32_t currentFrameIndex,
6198  bool overlappingMoveSupported);
6199  virtual ~VmaDefragmentationAlgorithm_Generic();
6200 
6201  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6202  virtual void AddAll() { m_AllAllocations = true; }
6203 
6204  virtual VkResult Defragment(
6205  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6206  VkDeviceSize maxBytesToMove,
6207  uint32_t maxAllocationsToMove);
6208 
6209  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6210  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6211 
6212 private:
6213  uint32_t m_AllocationCount;
6214  bool m_AllAllocations;
6215 
6216  VkDeviceSize m_BytesMoved;
6217  uint32_t m_AllocationsMoved;
6218 
6219  struct AllocationInfoSizeGreater
6220  {
6221  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6222  {
6223  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6224  }
6225  };
6226 
6227  struct AllocationInfoOffsetGreater
6228  {
6229  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6230  {
6231  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6232  }
6233  };
6234 
6235  struct BlockInfo
6236  {
6237  size_t m_OriginalBlockIndex;
6238  VmaDeviceMemoryBlock* m_pBlock;
6239  bool m_HasNonMovableAllocations;
6240  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6241 
6242  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6243  m_OriginalBlockIndex(SIZE_MAX),
6244  m_pBlock(VMA_NULL),
6245  m_HasNonMovableAllocations(true),
6246  m_Allocations(pAllocationCallbacks)
6247  {
6248  }
6249 
6250  void CalcHasNonMovableAllocations()
6251  {
6252  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6253  const size_t defragmentAllocCount = m_Allocations.size();
6254  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6255  }
6256 
6257  void SortAllocationsBySizeDescending()
6258  {
6259  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6260  }
6261 
6262  void SortAllocationsByOffsetDescending()
6263  {
6264  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6265  }
6266  };
6267 
6268  struct BlockPointerLess
6269  {
6270  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6271  {
6272  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6273  }
6274  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6275  {
6276  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6277  }
6278  };
6279 
6280  // 1. Blocks with some non-movable allocations go first.
6281  // 2. Blocks with smaller sumFreeSize go first.
6282  struct BlockInfoCompareMoveDestination
6283  {
6284  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6285  {
6286  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6287  {
6288  return true;
6289  }
6290  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6291  {
6292  return false;
6293  }
6294  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6295  {
6296  return true;
6297  }
6298  return false;
6299  }
6300  };
6301 
6302  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6303  BlockInfoVector m_Blocks;
6304 
6305  VkResult DefragmentRound(
6306  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6307  VkDeviceSize maxBytesToMove,
6308  uint32_t maxAllocationsToMove);
6309 
6310  size_t CalcBlocksWithNonMovableCount() const;
6311 
6312  static bool MoveMakesSense(
6313  size_t dstBlockIndex, VkDeviceSize dstOffset,
6314  size_t srcBlockIndex, VkDeviceSize srcOffset);
6315 };
6316 
6317 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6318 {
6319  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6320 public:
6321  VmaDefragmentationAlgorithm_Fast(
6322  VmaAllocator hAllocator,
6323  VmaBlockVector* pBlockVector,
6324  uint32_t currentFrameIndex,
6325  bool overlappingMoveSupported);
6326  virtual ~VmaDefragmentationAlgorithm_Fast();
6327 
6328  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6329  virtual void AddAll() { m_AllAllocations = true; }
6330 
6331  virtual VkResult Defragment(
6332  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6333  VkDeviceSize maxBytesToMove,
6334  uint32_t maxAllocationsToMove);
6335 
6336  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6337  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6338 
6339 private:
6340  struct BlockInfo
6341  {
6342  size_t origBlockIndex;
6343  };
6344 
6345  class FreeSpaceDatabase
6346  {
6347  public:
6348  FreeSpaceDatabase()
6349  {
6350  FreeSpace s = {};
6351  s.blockInfoIndex = SIZE_MAX;
6352  for(size_t i = 0; i < MAX_COUNT; ++i)
6353  {
6354  m_FreeSpaces[i] = s;
6355  }
6356  }
6357 
6358  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6359  {
6360  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6361  {
6362  return;
6363  }
6364 
6365  // Find first invalid or the smallest structure.
6366  size_t bestIndex = SIZE_MAX;
6367  for(size_t i = 0; i < MAX_COUNT; ++i)
6368  {
6369  // Empty structure.
6370  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6371  {
6372  bestIndex = i;
6373  break;
6374  }
6375  if(m_FreeSpaces[i].size < size &&
6376  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6377  {
6378  bestIndex = i;
6379  }
6380  }
6381 
6382  if(bestIndex != SIZE_MAX)
6383  {
6384  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6385  m_FreeSpaces[bestIndex].offset = offset;
6386  m_FreeSpaces[bestIndex].size = size;
6387  }
6388  }
6389 
6390  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6391  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6392  {
6393  size_t bestIndex = SIZE_MAX;
6394  VkDeviceSize bestFreeSpaceAfter = 0;
6395  for(size_t i = 0; i < MAX_COUNT; ++i)
6396  {
6397  // Structure is valid.
6398  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6399  {
6400  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6401  // Allocation fits into this structure.
6402  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6403  {
6404  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6405  (dstOffset + size);
6406  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6407  {
6408  bestIndex = i;
6409  bestFreeSpaceAfter = freeSpaceAfter;
6410  }
6411  }
6412  }
6413  }
6414 
6415  if(bestIndex != SIZE_MAX)
6416  {
6417  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6418  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6419 
6420  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6421  {
6422  // Leave this structure for remaining empty space.
6423  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6424  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6425  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6426  }
6427  else
6428  {
6429  // This structure becomes invalid.
6430  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6431  }
6432 
6433  return true;
6434  }
6435 
6436  return false;
6437  }
6438 
6439  private:
6440  static const size_t MAX_COUNT = 4;
6441 
6442  struct FreeSpace
6443  {
6444  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6445  VkDeviceSize offset;
6446  VkDeviceSize size;
6447  } m_FreeSpaces[MAX_COUNT];
6448  };
6449 
6450  const bool m_OverlappingMoveSupported;
6451 
6452  uint32_t m_AllocationCount;
6453  bool m_AllAllocations;
6454 
6455  VkDeviceSize m_BytesMoved;
6456  uint32_t m_AllocationsMoved;
6457 
6458  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6459 
6460  void PreprocessMetadata();
6461  void PostprocessMetadata();
6462  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6463 };
6464 
6465 struct VmaBlockDefragmentationContext
6466 {
6467  enum BLOCK_FLAG
6468  {
6469  BLOCK_FLAG_USED = 0x00000001,
6470  };
6471  uint32_t flags;
6472  VkBuffer hBuffer;
6473 };
6474 
6475 class VmaBlockVectorDefragmentationContext
6476 {
6477  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6478 public:
6479  VkResult res;
6480  bool mutexLocked;
6481  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6482 
6483  VmaBlockVectorDefragmentationContext(
6484  VmaAllocator hAllocator,
6485  VmaPool hCustomPool, // Optional.
6486  VmaBlockVector* pBlockVector,
6487  uint32_t currFrameIndex);
6488  ~VmaBlockVectorDefragmentationContext();
6489 
6490  VmaPool GetCustomPool() const { return m_hCustomPool; }
6491  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6492  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6493 
6494  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6495  void AddAll() { m_AllAllocations = true; }
6496 
6497  void Begin(bool overlappingMoveSupported);
6498 
6499 private:
6500  const VmaAllocator m_hAllocator;
6501  // Null if not from custom pool.
6502  const VmaPool m_hCustomPool;
6503  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6504  VmaBlockVector* const m_pBlockVector;
6505  const uint32_t m_CurrFrameIndex;
6506  // Owner of this object.
6507  VmaDefragmentationAlgorithm* m_pAlgorithm;
6508 
6509  struct AllocInfo
6510  {
6511  VmaAllocation hAlloc;
6512  VkBool32* pChanged;
6513  };
6514  // Used between constructor and Begin.
6515  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6516  bool m_AllAllocations;
6517 };
6518 
6519 struct VmaDefragmentationContext_T
6520 {
6521 private:
6522  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6523 public:
6524  VmaDefragmentationContext_T(
6525  VmaAllocator hAllocator,
6526  uint32_t currFrameIndex,
6527  uint32_t flags,
6528  VmaDefragmentationStats* pStats);
6529  ~VmaDefragmentationContext_T();
6530 
6531  void AddPools(uint32_t poolCount, VmaPool* pPools);
6532  void AddAllocations(
6533  uint32_t allocationCount,
6534  VmaAllocation* pAllocations,
6535  VkBool32* pAllocationsChanged);
6536 
6537  /*
6538  Returns:
6539  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6540  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6541  - Negative value if error occured and object can be destroyed immediately.
6542  */
6543  VkResult Defragment(
6544  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6545  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6546  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6547 
6548 private:
6549  const VmaAllocator m_hAllocator;
6550  const uint32_t m_CurrFrameIndex;
6551  const uint32_t m_Flags;
6552  VmaDefragmentationStats* const m_pStats;
6553  // Owner of these objects.
6554  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6555  // Owner of these objects.
6556  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6557 };
6558 
6559 #if VMA_RECORDING_ENABLED
6560 
6561 class VmaRecorder
6562 {
6563 public:
6564  VmaRecorder();
6565  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6566  void WriteConfiguration(
6567  const VkPhysicalDeviceProperties& devProps,
6568  const VkPhysicalDeviceMemoryProperties& memProps,
6569  bool dedicatedAllocationExtensionEnabled,
6570  bool bindMemory2ExtensionEnabled);
6571  ~VmaRecorder();
6572 
6573  void RecordCreateAllocator(uint32_t frameIndex);
6574  void RecordDestroyAllocator(uint32_t frameIndex);
6575  void RecordCreatePool(uint32_t frameIndex,
6576  const VmaPoolCreateInfo& createInfo,
6577  VmaPool pool);
6578  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6579  void RecordAllocateMemory(uint32_t frameIndex,
6580  const VkMemoryRequirements& vkMemReq,
6581  const VmaAllocationCreateInfo& createInfo,
6582  VmaAllocation allocation);
6583  void RecordAllocateMemoryPages(uint32_t frameIndex,
6584  const VkMemoryRequirements& vkMemReq,
6585  const VmaAllocationCreateInfo& createInfo,
6586  uint64_t allocationCount,
6587  const VmaAllocation* pAllocations);
6588  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6589  const VkMemoryRequirements& vkMemReq,
6590  bool requiresDedicatedAllocation,
6591  bool prefersDedicatedAllocation,
6592  const VmaAllocationCreateInfo& createInfo,
6593  VmaAllocation allocation);
6594  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6595  const VkMemoryRequirements& vkMemReq,
6596  bool requiresDedicatedAllocation,
6597  bool prefersDedicatedAllocation,
6598  const VmaAllocationCreateInfo& createInfo,
6599  VmaAllocation allocation);
6600  void RecordFreeMemory(uint32_t frameIndex,
6601  VmaAllocation allocation);
6602  void RecordFreeMemoryPages(uint32_t frameIndex,
6603  uint64_t allocationCount,
6604  const VmaAllocation* pAllocations);
6605  void RecordSetAllocationUserData(uint32_t frameIndex,
6606  VmaAllocation allocation,
6607  const void* pUserData);
6608  void RecordCreateLostAllocation(uint32_t frameIndex,
6609  VmaAllocation allocation);
6610  void RecordMapMemory(uint32_t frameIndex,
6611  VmaAllocation allocation);
6612  void RecordUnmapMemory(uint32_t frameIndex,
6613  VmaAllocation allocation);
6614  void RecordFlushAllocation(uint32_t frameIndex,
6615  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6616  void RecordInvalidateAllocation(uint32_t frameIndex,
6617  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6618  void RecordCreateBuffer(uint32_t frameIndex,
6619  const VkBufferCreateInfo& bufCreateInfo,
6620  const VmaAllocationCreateInfo& allocCreateInfo,
6621  VmaAllocation allocation);
6622  void RecordCreateImage(uint32_t frameIndex,
6623  const VkImageCreateInfo& imageCreateInfo,
6624  const VmaAllocationCreateInfo& allocCreateInfo,
6625  VmaAllocation allocation);
6626  void RecordDestroyBuffer(uint32_t frameIndex,
6627  VmaAllocation allocation);
6628  void RecordDestroyImage(uint32_t frameIndex,
6629  VmaAllocation allocation);
6630  void RecordTouchAllocation(uint32_t frameIndex,
6631  VmaAllocation allocation);
6632  void RecordGetAllocationInfo(uint32_t frameIndex,
6633  VmaAllocation allocation);
6634  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6635  VmaPool pool);
6636  void RecordDefragmentationBegin(uint32_t frameIndex,
6637  const VmaDefragmentationInfo2& info,
6639  void RecordDefragmentationEnd(uint32_t frameIndex,
6641 
6642 private:
6643  struct CallParams
6644  {
6645  uint32_t threadId;
6646  double time;
6647  };
6648 
6649  class UserDataString
6650  {
6651  public:
6652  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6653  const char* GetString() const { return m_Str; }
6654 
6655  private:
6656  char m_PtrStr[17];
6657  const char* m_Str;
6658  };
6659 
6660  bool m_UseMutex;
6661  VmaRecordFlags m_Flags;
6662  FILE* m_File;
6663  VMA_MUTEX m_FileMutex;
6664  int64_t m_Freq;
6665  int64_t m_StartCounter;
6666 
6667  void GetBasicParams(CallParams& outParams);
6668 
6669  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6670  template<typename T>
6671  void PrintPointerList(uint64_t count, const T* pItems)
6672  {
6673  if(count)
6674  {
6675  fprintf(m_File, "%p", pItems[0]);
6676  for(uint64_t i = 1; i < count; ++i)
6677  {
6678  fprintf(m_File, " %p", pItems[i]);
6679  }
6680  }
6681  }
6682 
6683  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6684  void Flush();
6685 };
6686 
6687 #endif // #if VMA_RECORDING_ENABLED
6688 
6689 /*
6690 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
6691 */
6692 class VmaAllocationObjectAllocator
6693 {
6694  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
6695 public:
6696  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
6697 
6698  VmaAllocation Allocate();
6699  void Free(VmaAllocation hAlloc);
6700 
6701 private:
6702  VMA_MUTEX m_Mutex;
6703  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
6704 };
6705 
6706 // Main allocator object.
6707 struct VmaAllocator_T
6708 {
6709  VMA_CLASS_NO_COPY(VmaAllocator_T)
6710 public:
6711  bool m_UseMutex;
6712  bool m_UseKhrDedicatedAllocation;
6713  bool m_UseKhrBindMemory2;
6714  VkDevice m_hDevice;
6715  bool m_AllocationCallbacksSpecified;
6716  VkAllocationCallbacks m_AllocationCallbacks;
6717  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6718  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
6719 
6720  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
6721  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6722  VMA_MUTEX m_HeapSizeLimitMutex;
6723 
6724  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6725  VkPhysicalDeviceMemoryProperties m_MemProps;
6726 
6727  // Default pools.
6728  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6729 
6730  // Each vector is sorted by memory (handle value).
6731  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6732  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6733  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6734 
6735  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6736  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6737  ~VmaAllocator_T();
6738 
6739  const VkAllocationCallbacks* GetAllocationCallbacks() const
6740  {
6741  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6742  }
6743  const VmaVulkanFunctions& GetVulkanFunctions() const
6744  {
6745  return m_VulkanFunctions;
6746  }
6747 
6748  VkDeviceSize GetBufferImageGranularity() const
6749  {
6750  return VMA_MAX(
6751  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6752  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6753  }
6754 
6755  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
6756  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6757 
6758  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6759  {
6760  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6761  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6762  }
6763  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
6764  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6765  {
6766  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6767  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6768  }
6769  // Minimum alignment for all allocations in specific memory type.
6770  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6771  {
6772  return IsMemoryTypeNonCoherent(memTypeIndex) ?
6773  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6774  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6775  }
6776 
6777  bool IsIntegratedGpu() const
6778  {
6779  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6780  }
6781 
6782 #if VMA_RECORDING_ENABLED
6783  VmaRecorder* GetRecorder() const { return m_pRecorder; }
6784 #endif
6785 
6786  void GetBufferMemoryRequirements(
6787  VkBuffer hBuffer,
6788  VkMemoryRequirements& memReq,
6789  bool& requiresDedicatedAllocation,
6790  bool& prefersDedicatedAllocation) const;
6791  void GetImageMemoryRequirements(
6792  VkImage hImage,
6793  VkMemoryRequirements& memReq,
6794  bool& requiresDedicatedAllocation,
6795  bool& prefersDedicatedAllocation) const;
6796 
6797  // Main allocation function.
6798  VkResult AllocateMemory(
6799  const VkMemoryRequirements& vkMemReq,
6800  bool requiresDedicatedAllocation,
6801  bool prefersDedicatedAllocation,
6802  VkBuffer dedicatedBuffer,
6803  VkImage dedicatedImage,
6804  const VmaAllocationCreateInfo& createInfo,
6805  VmaSuballocationType suballocType,
6806  size_t allocationCount,
6807  VmaAllocation* pAllocations);
6808 
6809  // Main deallocation function.
6810  void FreeMemory(
6811  size_t allocationCount,
6812  const VmaAllocation* pAllocations);
6813 
6814  VkResult ResizeAllocation(
6815  const VmaAllocation alloc,
6816  VkDeviceSize newSize);
6817 
6818  void CalculateStats(VmaStats* pStats);
6819 
6820 #if VMA_STATS_STRING_ENABLED
6821  void PrintDetailedMap(class VmaJsonWriter& json);
6822 #endif
6823 
6824  VkResult DefragmentationBegin(
6825  const VmaDefragmentationInfo2& info,
6826  VmaDefragmentationStats* pStats,
6827  VmaDefragmentationContext* pContext);
6828  VkResult DefragmentationEnd(
6829  VmaDefragmentationContext context);
6830 
6831  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
6832  bool TouchAllocation(VmaAllocation hAllocation);
6833 
6834  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
6835  void DestroyPool(VmaPool pool);
6836  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
6837 
6838  void SetCurrentFrameIndex(uint32_t frameIndex);
6839  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6840 
6841  void MakePoolAllocationsLost(
6842  VmaPool hPool,
6843  size_t* pLostAllocationCount);
6844  VkResult CheckPoolCorruption(VmaPool hPool);
6845  VkResult CheckCorruption(uint32_t memoryTypeBits);
6846 
6847  void CreateLostAllocation(VmaAllocation* pAllocation);
6848 
6849  // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
6850  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6851  // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
6852  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6853  // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
6854  VkResult BindVulkanBuffer(
6855  VkDeviceMemory memory,
6856  VkDeviceSize memoryOffset,
6857  VkBuffer buffer,
6858  const void* pNext);
6859  // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
6860  VkResult BindVulkanImage(
6861  VkDeviceMemory memory,
6862  VkDeviceSize memoryOffset,
6863  VkImage image,
6864  const void* pNext);
6865 
6866  VkResult Map(VmaAllocation hAllocation, void** ppData);
6867  void Unmap(VmaAllocation hAllocation);
6868 
6869  VkResult BindBufferMemory(
6870  VmaAllocation hAllocation,
6871  VkDeviceSize allocationLocalOffset,
6872  VkBuffer hBuffer,
6873  const void* pNext);
6874  VkResult BindImageMemory(
6875  VmaAllocation hAllocation,
6876  VkDeviceSize allocationLocalOffset,
6877  VkImage hImage,
6878  const void* pNext);
6879 
6880  void FlushOrInvalidateAllocation(
6881  VmaAllocation hAllocation,
6882  VkDeviceSize offset, VkDeviceSize size,
6883  VMA_CACHE_OPERATION op);
6884 
6885  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
6886 
6887  /*
6888  Returns bit mask of memory types that can support defragmentation on GPU as
6889  they support creation of required buffer for copy operations.
6890  */
6891  uint32_t GetGpuDefragmentationMemoryTypeBits();
6892 
6893 private:
6894  VkDeviceSize m_PreferredLargeHeapBlockSize;
6895 
6896  VkPhysicalDevice m_PhysicalDevice;
6897  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6898  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
6899 
6900  VMA_RW_MUTEX m_PoolsMutex;
6901  // Protected by m_PoolsMutex. Sorted by pointer value.
6902  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6903  uint32_t m_NextPoolId;
6904 
6905  VmaVulkanFunctions m_VulkanFunctions;
6906 
6907 #if VMA_RECORDING_ENABLED
6908  VmaRecorder* m_pRecorder;
6909 #endif
6910 
6911  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
6912 
6913  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6914 
6915  VkResult AllocateMemoryOfType(
6916  VkDeviceSize size,
6917  VkDeviceSize alignment,
6918  bool dedicatedAllocation,
6919  VkBuffer dedicatedBuffer,
6920  VkImage dedicatedImage,
6921  const VmaAllocationCreateInfo& createInfo,
6922  uint32_t memTypeIndex,
6923  VmaSuballocationType suballocType,
6924  size_t allocationCount,
6925  VmaAllocation* pAllocations);
6926 
6927  // Helper function only to be used inside AllocateDedicatedMemory.
6928  VkResult AllocateDedicatedMemoryPage(
6929  VkDeviceSize size,
6930  VmaSuballocationType suballocType,
6931  uint32_t memTypeIndex,
6932  const VkMemoryAllocateInfo& allocInfo,
6933  bool map,
6934  bool isUserDataString,
6935  void* pUserData,
6936  VmaAllocation* pAllocation);
6937 
6938  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
6939  VkResult AllocateDedicatedMemory(
6940  VkDeviceSize size,
6941  VmaSuballocationType suballocType,
6942  uint32_t memTypeIndex,
6943  bool map,
6944  bool isUserDataString,
6945  void* pUserData,
6946  VkBuffer dedicatedBuffer,
6947  VkImage dedicatedImage,
6948  size_t allocationCount,
6949  VmaAllocation* pAllocations);
6950 
6951  void FreeDedicatedMemory(VmaAllocation allocation);
6952 
6953  /*
6954  Calculates and returns bit mask of memory types that can support defragmentation
6955  on GPU as they support creation of required buffer for copy operations.
6956  */
6957  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
6958 };
6959 
6961 // Memory allocation #2 after VmaAllocator_T definition
6962 
6963 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
6964 {
6965  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6966 }
6967 
6968 static void VmaFree(VmaAllocator hAllocator, void* ptr)
6969 {
6970  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6971 }
6972 
6973 template<typename T>
6974 static T* VmaAllocate(VmaAllocator hAllocator)
6975 {
6976  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
6977 }
6978 
6979 template<typename T>
6980 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
6981 {
6982  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
6983 }
6984 
6985 template<typename T>
6986 static void vma_delete(VmaAllocator hAllocator, T* ptr)
6987 {
6988  if(ptr != VMA_NULL)
6989  {
6990  ptr->~T();
6991  VmaFree(hAllocator, ptr);
6992  }
6993 }
6994 
6995 template<typename T>
6996 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6997 {
6998  if(ptr != VMA_NULL)
6999  {
7000  for(size_t i = count; i--; )
7001  ptr[i].~T();
7002  VmaFree(hAllocator, ptr);
7003  }
7004 }
7005 
7007 // VmaStringBuilder
7008 
7009 #if VMA_STATS_STRING_ENABLED
7010 
7011 class VmaStringBuilder
7012 {
7013 public:
7014  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
7015  size_t GetLength() const { return m_Data.size(); }
7016  const char* GetData() const { return m_Data.data(); }
7017 
7018  void Add(char ch) { m_Data.push_back(ch); }
7019  void Add(const char* pStr);
7020  void AddNewLine() { Add('\n'); }
7021  void AddNumber(uint32_t num);
7022  void AddNumber(uint64_t num);
7023  void AddPointer(const void* ptr);
7024 
7025 private:
7026  VmaVector< char, VmaStlAllocator<char> > m_Data;
7027 };
7028 
7029 void VmaStringBuilder::Add(const char* pStr)
7030 {
7031  const size_t strLen = strlen(pStr);
7032  if(strLen > 0)
7033  {
7034  const size_t oldCount = m_Data.size();
7035  m_Data.resize(oldCount + strLen);
7036  memcpy(m_Data.data() + oldCount, pStr, strLen);
7037  }
7038 }
7039 
7040 void VmaStringBuilder::AddNumber(uint32_t num)
7041 {
7042  char buf[11];
7043  VmaUint32ToStr(buf, sizeof(buf), num);
7044  Add(buf);
7045 }
7046 
7047 void VmaStringBuilder::AddNumber(uint64_t num)
7048 {
7049  char buf[21];
7050  VmaUint64ToStr(buf, sizeof(buf), num);
7051  Add(buf);
7052 }
7053 
7054 void VmaStringBuilder::AddPointer(const void* ptr)
7055 {
7056  char buf[21];
7057  VmaPtrToStr(buf, sizeof(buf), ptr);
7058  Add(buf);
7059 }
7060 
7061 #endif // #if VMA_STATS_STRING_ENABLED
7062 
7064 // VmaJsonWriter
7065 
7066 #if VMA_STATS_STRING_ENABLED
7067 
7068 class VmaJsonWriter
7069 {
7070  VMA_CLASS_NO_COPY(VmaJsonWriter)
7071 public:
7072  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
7073  ~VmaJsonWriter();
7074 
7075  void BeginObject(bool singleLine = false);
7076  void EndObject();
7077 
7078  void BeginArray(bool singleLine = false);
7079  void EndArray();
7080 
7081  void WriteString(const char* pStr);
7082  void BeginString(const char* pStr = VMA_NULL);
7083  void ContinueString(const char* pStr);
7084  void ContinueString(uint32_t n);
7085  void ContinueString(uint64_t n);
7086  void ContinueString_Pointer(const void* ptr);
7087  void EndString(const char* pStr = VMA_NULL);
7088 
7089  void WriteNumber(uint32_t n);
7090  void WriteNumber(uint64_t n);
7091  void WriteBool(bool b);
7092  void WriteNull();
7093 
7094 private:
7095  static const char* const INDENT;
7096 
7097  enum COLLECTION_TYPE
7098  {
7099  COLLECTION_TYPE_OBJECT,
7100  COLLECTION_TYPE_ARRAY,
7101  };
7102  struct StackItem
7103  {
7104  COLLECTION_TYPE type;
7105  uint32_t valueCount;
7106  bool singleLineMode;
7107  };
7108 
7109  VmaStringBuilder& m_SB;
7110  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
7111  bool m_InsideString;
7112 
7113  void BeginValue(bool isString);
7114  void WriteIndent(bool oneLess = false);
7115 };
7116 
7117 const char* const VmaJsonWriter::INDENT = " ";
7118 
7119 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
7120  m_SB(sb),
7121  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
7122  m_InsideString(false)
7123 {
7124 }
7125 
7126 VmaJsonWriter::~VmaJsonWriter()
7127 {
7128  VMA_ASSERT(!m_InsideString);
7129  VMA_ASSERT(m_Stack.empty());
7130 }
7131 
7132 void VmaJsonWriter::BeginObject(bool singleLine)
7133 {
7134  VMA_ASSERT(!m_InsideString);
7135 
7136  BeginValue(false);
7137  m_SB.Add('{');
7138 
7139  StackItem item;
7140  item.type = COLLECTION_TYPE_OBJECT;
7141  item.valueCount = 0;
7142  item.singleLineMode = singleLine;
7143  m_Stack.push_back(item);
7144 }
7145 
7146 void VmaJsonWriter::EndObject()
7147 {
7148  VMA_ASSERT(!m_InsideString);
7149 
7150  WriteIndent(true);
7151  m_SB.Add('}');
7152 
7153  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
7154  m_Stack.pop_back();
7155 }
7156 
7157 void VmaJsonWriter::BeginArray(bool singleLine)
7158 {
7159  VMA_ASSERT(!m_InsideString);
7160 
7161  BeginValue(false);
7162  m_SB.Add('[');
7163 
7164  StackItem item;
7165  item.type = COLLECTION_TYPE_ARRAY;
7166  item.valueCount = 0;
7167  item.singleLineMode = singleLine;
7168  m_Stack.push_back(item);
7169 }
7170 
7171 void VmaJsonWriter::EndArray()
7172 {
7173  VMA_ASSERT(!m_InsideString);
7174 
7175  WriteIndent(true);
7176  m_SB.Add(']');
7177 
7178  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
7179  m_Stack.pop_back();
7180 }
7181 
7182 void VmaJsonWriter::WriteString(const char* pStr)
7183 {
7184  BeginString(pStr);
7185  EndString();
7186 }
7187 
7188 void VmaJsonWriter::BeginString(const char* pStr)
7189 {
7190  VMA_ASSERT(!m_InsideString);
7191 
7192  BeginValue(true);
7193  m_SB.Add('"');
7194  m_InsideString = true;
7195  if(pStr != VMA_NULL && pStr[0] != '\0')
7196  {
7197  ContinueString(pStr);
7198  }
7199 }
7200 
7201 void VmaJsonWriter::ContinueString(const char* pStr)
7202 {
7203  VMA_ASSERT(m_InsideString);
7204 
7205  const size_t strLen = strlen(pStr);
7206  for(size_t i = 0; i < strLen; ++i)
7207  {
7208  char ch = pStr[i];
7209  if(ch == '\\')
7210  {
7211  m_SB.Add("\\\\");
7212  }
7213  else if(ch == '"')
7214  {
7215  m_SB.Add("\\\"");
7216  }
7217  else if(ch >= 32)
7218  {
7219  m_SB.Add(ch);
7220  }
7221  else switch(ch)
7222  {
7223  case '\b':
7224  m_SB.Add("\\b");
7225  break;
7226  case '\f':
7227  m_SB.Add("\\f");
7228  break;
7229  case '\n':
7230  m_SB.Add("\\n");
7231  break;
7232  case '\r':
7233  m_SB.Add("\\r");
7234  break;
7235  case '\t':
7236  m_SB.Add("\\t");
7237  break;
7238  default:
7239  VMA_ASSERT(0 && "Character not currently supported.");
7240  break;
7241  }
7242  }
7243 }
7244 
7245 void VmaJsonWriter::ContinueString(uint32_t n)
7246 {
7247  VMA_ASSERT(m_InsideString);
7248  m_SB.AddNumber(n);
7249 }
7250 
7251 void VmaJsonWriter::ContinueString(uint64_t n)
7252 {
7253  VMA_ASSERT(m_InsideString);
7254  m_SB.AddNumber(n);
7255 }
7256 
7257 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7258 {
7259  VMA_ASSERT(m_InsideString);
7260  m_SB.AddPointer(ptr);
7261 }
7262 
7263 void VmaJsonWriter::EndString(const char* pStr)
7264 {
7265  VMA_ASSERT(m_InsideString);
7266  if(pStr != VMA_NULL && pStr[0] != '\0')
7267  {
7268  ContinueString(pStr);
7269  }
7270  m_SB.Add('"');
7271  m_InsideString = false;
7272 }
7273 
7274 void VmaJsonWriter::WriteNumber(uint32_t n)
7275 {
7276  VMA_ASSERT(!m_InsideString);
7277  BeginValue(false);
7278  m_SB.AddNumber(n);
7279 }
7280 
7281 void VmaJsonWriter::WriteNumber(uint64_t n)
7282 {
7283  VMA_ASSERT(!m_InsideString);
7284  BeginValue(false);
7285  m_SB.AddNumber(n);
7286 }
7287 
7288 void VmaJsonWriter::WriteBool(bool b)
7289 {
7290  VMA_ASSERT(!m_InsideString);
7291  BeginValue(false);
7292  m_SB.Add(b ? "true" : "false");
7293 }
7294 
7295 void VmaJsonWriter::WriteNull()
7296 {
7297  VMA_ASSERT(!m_InsideString);
7298  BeginValue(false);
7299  m_SB.Add("null");
7300 }
7301 
7302 void VmaJsonWriter::BeginValue(bool isString)
7303 {
7304  if(!m_Stack.empty())
7305  {
7306  StackItem& currItem = m_Stack.back();
7307  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7308  currItem.valueCount % 2 == 0)
7309  {
7310  VMA_ASSERT(isString);
7311  }
7312 
7313  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7314  currItem.valueCount % 2 != 0)
7315  {
7316  m_SB.Add(": ");
7317  }
7318  else if(currItem.valueCount > 0)
7319  {
7320  m_SB.Add(", ");
7321  WriteIndent();
7322  }
7323  else
7324  {
7325  WriteIndent();
7326  }
7327  ++currItem.valueCount;
7328  }
7329 }
7330 
7331 void VmaJsonWriter::WriteIndent(bool oneLess)
7332 {
7333  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7334  {
7335  m_SB.AddNewLine();
7336 
7337  size_t count = m_Stack.size();
7338  if(count > 0 && oneLess)
7339  {
7340  --count;
7341  }
7342  for(size_t i = 0; i < count; ++i)
7343  {
7344  m_SB.Add(INDENT);
7345  }
7346  }
7347 }
7348 
7349 #endif // #if VMA_STATS_STRING_ENABLED
7350 
7352 
7353 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7354 {
7355  if(IsUserDataString())
7356  {
7357  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7358 
7359  FreeUserDataString(hAllocator);
7360 
7361  if(pUserData != VMA_NULL)
7362  {
7363  const char* const newStrSrc = (char*)pUserData;
7364  const size_t newStrLen = strlen(newStrSrc);
7365  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
7366  memcpy(newStrDst, newStrSrc, newStrLen + 1);
7367  m_pUserData = newStrDst;
7368  }
7369  }
7370  else
7371  {
7372  m_pUserData = pUserData;
7373  }
7374 }
7375 
7376 void VmaAllocation_T::ChangeBlockAllocation(
7377  VmaAllocator hAllocator,
7378  VmaDeviceMemoryBlock* block,
7379  VkDeviceSize offset)
7380 {
7381  VMA_ASSERT(block != VMA_NULL);
7382  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7383 
7384  // Move mapping reference counter from old block to new block.
7385  if(block != m_BlockAllocation.m_Block)
7386  {
7387  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7388  if(IsPersistentMap())
7389  ++mapRefCount;
7390  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7391  block->Map(hAllocator, mapRefCount, VMA_NULL);
7392  }
7393 
7394  m_BlockAllocation.m_Block = block;
7395  m_BlockAllocation.m_Offset = offset;
7396 }
7397 
7398 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7399 {
7400  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7401  m_BlockAllocation.m_Offset = newOffset;
7402 }
7403 
7404 VkDeviceSize VmaAllocation_T::GetOffset() const
7405 {
7406  switch(m_Type)
7407  {
7408  case ALLOCATION_TYPE_BLOCK:
7409  return m_BlockAllocation.m_Offset;
7410  case ALLOCATION_TYPE_DEDICATED:
7411  return 0;
7412  default:
7413  VMA_ASSERT(0);
7414  return 0;
7415  }
7416 }
7417 
7418 VkDeviceMemory VmaAllocation_T::GetMemory() const
7419 {
7420  switch(m_Type)
7421  {
7422  case ALLOCATION_TYPE_BLOCK:
7423  return m_BlockAllocation.m_Block->GetDeviceMemory();
7424  case ALLOCATION_TYPE_DEDICATED:
7425  return m_DedicatedAllocation.m_hMemory;
7426  default:
7427  VMA_ASSERT(0);
7428  return VK_NULL_HANDLE;
7429  }
7430 }
7431 
7432 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7433 {
7434  switch(m_Type)
7435  {
7436  case ALLOCATION_TYPE_BLOCK:
7437  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7438  case ALLOCATION_TYPE_DEDICATED:
7439  return m_DedicatedAllocation.m_MemoryTypeIndex;
7440  default:
7441  VMA_ASSERT(0);
7442  return UINT32_MAX;
7443  }
7444 }
7445 
7446 void* VmaAllocation_T::GetMappedData() const
7447 {
7448  switch(m_Type)
7449  {
7450  case ALLOCATION_TYPE_BLOCK:
7451  if(m_MapCount != 0)
7452  {
7453  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7454  VMA_ASSERT(pBlockData != VMA_NULL);
7455  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7456  }
7457  else
7458  {
7459  return VMA_NULL;
7460  }
7461  break;
7462  case ALLOCATION_TYPE_DEDICATED:
7463  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7464  return m_DedicatedAllocation.m_pMappedData;
7465  default:
7466  VMA_ASSERT(0);
7467  return VMA_NULL;
7468  }
7469 }
7470 
7471 bool VmaAllocation_T::CanBecomeLost() const
7472 {
7473  switch(m_Type)
7474  {
7475  case ALLOCATION_TYPE_BLOCK:
7476  return m_BlockAllocation.m_CanBecomeLost;
7477  case ALLOCATION_TYPE_DEDICATED:
7478  return false;
7479  default:
7480  VMA_ASSERT(0);
7481  return false;
7482  }
7483 }
7484 
7485 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7486 {
7487  VMA_ASSERT(CanBecomeLost());
7488 
7489  /*
7490  Warning: This is a carefully designed algorithm.
7491  Do not modify unless you really know what you're doing :)
7492  */
7493  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7494  for(;;)
7495  {
7496  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7497  {
7498  VMA_ASSERT(0);
7499  return false;
7500  }
7501  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7502  {
7503  return false;
7504  }
7505  else // Last use time earlier than current time.
7506  {
7507  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7508  {
7509  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7510  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7511  return true;
7512  }
7513  }
7514  }
7515 }
7516 
7517 #if VMA_STATS_STRING_ENABLED
7518 
7519 // Correspond to values of enum VmaSuballocationType.
7520 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7521  "FREE",
7522  "UNKNOWN",
7523  "BUFFER",
7524  "IMAGE_UNKNOWN",
7525  "IMAGE_LINEAR",
7526  "IMAGE_OPTIMAL",
7527 };
7528 
7529 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7530 {
7531  json.WriteString("Type");
7532  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7533 
7534  json.WriteString("Size");
7535  json.WriteNumber(m_Size);
7536 
7537  if(m_pUserData != VMA_NULL)
7538  {
7539  json.WriteString("UserData");
7540  if(IsUserDataString())
7541  {
7542  json.WriteString((const char*)m_pUserData);
7543  }
7544  else
7545  {
7546  json.BeginString();
7547  json.ContinueString_Pointer(m_pUserData);
7548  json.EndString();
7549  }
7550  }
7551 
7552  json.WriteString("CreationFrameIndex");
7553  json.WriteNumber(m_CreationFrameIndex);
7554 
7555  json.WriteString("LastUseFrameIndex");
7556  json.WriteNumber(GetLastUseFrameIndex());
7557 
7558  if(m_BufferImageUsage != 0)
7559  {
7560  json.WriteString("Usage");
7561  json.WriteNumber(m_BufferImageUsage);
7562  }
7563 }
7564 
7565 #endif
7566 
7567 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7568 {
7569  VMA_ASSERT(IsUserDataString());
7570  if(m_pUserData != VMA_NULL)
7571  {
7572  char* const oldStr = (char*)m_pUserData;
7573  const size_t oldStrLen = strlen(oldStr);
7574  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7575  m_pUserData = VMA_NULL;
7576  }
7577 }
7578 
7579 void VmaAllocation_T::BlockAllocMap()
7580 {
7581  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7582 
7583  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7584  {
7585  ++m_MapCount;
7586  }
7587  else
7588  {
7589  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7590  }
7591 }
7592 
7593 void VmaAllocation_T::BlockAllocUnmap()
7594 {
7595  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7596 
7597  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7598  {
7599  --m_MapCount;
7600  }
7601  else
7602  {
7603  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7604  }
7605 }
7606 
7607 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7608 {
7609  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7610 
7611  if(m_MapCount != 0)
7612  {
7613  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7614  {
7615  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7616  *ppData = m_DedicatedAllocation.m_pMappedData;
7617  ++m_MapCount;
7618  return VK_SUCCESS;
7619  }
7620  else
7621  {
7622  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7623  return VK_ERROR_MEMORY_MAP_FAILED;
7624  }
7625  }
7626  else
7627  {
7628  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7629  hAllocator->m_hDevice,
7630  m_DedicatedAllocation.m_hMemory,
7631  0, // offset
7632  VK_WHOLE_SIZE,
7633  0, // flags
7634  ppData);
7635  if(result == VK_SUCCESS)
7636  {
7637  m_DedicatedAllocation.m_pMappedData = *ppData;
7638  m_MapCount = 1;
7639  }
7640  return result;
7641  }
7642 }
7643 
7644 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7645 {
7646  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7647 
7648  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7649  {
7650  --m_MapCount;
7651  if(m_MapCount == 0)
7652  {
7653  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7654  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7655  hAllocator->m_hDevice,
7656  m_DedicatedAllocation.m_hMemory);
7657  }
7658  }
7659  else
7660  {
7661  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7662  }
7663 }
7664 
7665 #if VMA_STATS_STRING_ENABLED
7666 
7667 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7668 {
7669  json.BeginObject();
7670 
7671  json.WriteString("Blocks");
7672  json.WriteNumber(stat.blockCount);
7673 
7674  json.WriteString("Allocations");
7675  json.WriteNumber(stat.allocationCount);
7676 
7677  json.WriteString("UnusedRanges");
7678  json.WriteNumber(stat.unusedRangeCount);
7679 
7680  json.WriteString("UsedBytes");
7681  json.WriteNumber(stat.usedBytes);
7682 
7683  json.WriteString("UnusedBytes");
7684  json.WriteNumber(stat.unusedBytes);
7685 
7686  if(stat.allocationCount > 1)
7687  {
7688  json.WriteString("AllocationSize");
7689  json.BeginObject(true);
7690  json.WriteString("Min");
7691  json.WriteNumber(stat.allocationSizeMin);
7692  json.WriteString("Avg");
7693  json.WriteNumber(stat.allocationSizeAvg);
7694  json.WriteString("Max");
7695  json.WriteNumber(stat.allocationSizeMax);
7696  json.EndObject();
7697  }
7698 
7699  if(stat.unusedRangeCount > 1)
7700  {
7701  json.WriteString("UnusedRangeSize");
7702  json.BeginObject(true);
7703  json.WriteString("Min");
7704  json.WriteNumber(stat.unusedRangeSizeMin);
7705  json.WriteString("Avg");
7706  json.WriteNumber(stat.unusedRangeSizeAvg);
7707  json.WriteString("Max");
7708  json.WriteNumber(stat.unusedRangeSizeMax);
7709  json.EndObject();
7710  }
7711 
7712  json.EndObject();
7713 }
7714 
7715 #endif // #if VMA_STATS_STRING_ENABLED
7716 
7717 struct VmaSuballocationItemSizeLess
7718 {
7719  bool operator()(
7720  const VmaSuballocationList::iterator lhs,
7721  const VmaSuballocationList::iterator rhs) const
7722  {
7723  return lhs->size < rhs->size;
7724  }
7725  bool operator()(
7726  const VmaSuballocationList::iterator lhs,
7727  VkDeviceSize rhsSize) const
7728  {
7729  return lhs->size < rhsSize;
7730  }
7731 };
7732 
7733 
7735 // class VmaBlockMetadata
7736 
7737 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7738  m_Size(0),
7739  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7740 {
7741 }
7742 
7743 #if VMA_STATS_STRING_ENABLED
7744 
7745 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7746  VkDeviceSize unusedBytes,
7747  size_t allocationCount,
7748  size_t unusedRangeCount) const
7749 {
7750  json.BeginObject();
7751 
7752  json.WriteString("TotalBytes");
7753  json.WriteNumber(GetSize());
7754 
7755  json.WriteString("UnusedBytes");
7756  json.WriteNumber(unusedBytes);
7757 
7758  json.WriteString("Allocations");
7759  json.WriteNumber((uint64_t)allocationCount);
7760 
7761  json.WriteString("UnusedRanges");
7762  json.WriteNumber((uint64_t)unusedRangeCount);
7763 
7764  json.WriteString("Suballocations");
7765  json.BeginArray();
7766 }
7767 
7768 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
7769  VkDeviceSize offset,
7770  VmaAllocation hAllocation) const
7771 {
7772  json.BeginObject(true);
7773 
7774  json.WriteString("Offset");
7775  json.WriteNumber(offset);
7776 
7777  hAllocation->PrintParameters(json);
7778 
7779  json.EndObject();
7780 }
7781 
7782 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
7783  VkDeviceSize offset,
7784  VkDeviceSize size) const
7785 {
7786  json.BeginObject(true);
7787 
7788  json.WriteString("Offset");
7789  json.WriteNumber(offset);
7790 
7791  json.WriteString("Type");
7792  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7793 
7794  json.WriteString("Size");
7795  json.WriteNumber(size);
7796 
7797  json.EndObject();
7798 }
7799 
7800 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
7801 {
7802  json.EndArray();
7803  json.EndObject();
7804 }
7805 
7806 #endif // #if VMA_STATS_STRING_ENABLED
7807 
7809 // class VmaBlockMetadata_Generic
7810 
7811 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
7812  VmaBlockMetadata(hAllocator),
7813  m_FreeCount(0),
7814  m_SumFreeSize(0),
7815  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7816  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7817 {
7818 }
7819 
7820 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7821 {
7822 }
7823 
7824 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7825 {
7826  VmaBlockMetadata::Init(size);
7827 
7828  m_FreeCount = 1;
7829  m_SumFreeSize = size;
7830 
7831  VmaSuballocation suballoc = {};
7832  suballoc.offset = 0;
7833  suballoc.size = size;
7834  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7835  suballoc.hAllocation = VK_NULL_HANDLE;
7836 
7837  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7838  m_Suballocations.push_back(suballoc);
7839  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7840  --suballocItem;
7841  m_FreeSuballocationsBySize.push_back(suballocItem);
7842 }
7843 
7844 bool VmaBlockMetadata_Generic::Validate() const
7845 {
7846  VMA_VALIDATE(!m_Suballocations.empty());
7847 
7848  // Expected offset of new suballocation as calculated from previous ones.
7849  VkDeviceSize calculatedOffset = 0;
7850  // Expected number of free suballocations as calculated from traversing their list.
7851  uint32_t calculatedFreeCount = 0;
7852  // Expected sum size of free suballocations as calculated from traversing their list.
7853  VkDeviceSize calculatedSumFreeSize = 0;
7854  // Expected number of free suballocations that should be registered in
7855  // m_FreeSuballocationsBySize calculated from traversing their list.
7856  size_t freeSuballocationsToRegister = 0;
7857  // True if previous visited suballocation was free.
7858  bool prevFree = false;
7859 
7860  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7861  suballocItem != m_Suballocations.cend();
7862  ++suballocItem)
7863  {
7864  const VmaSuballocation& subAlloc = *suballocItem;
7865 
7866  // Actual offset of this suballocation doesn't match expected one.
7867  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7868 
7869  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7870  // Two adjacent free suballocations are invalid. They should be merged.
7871  VMA_VALIDATE(!prevFree || !currFree);
7872 
7873  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7874 
7875  if(currFree)
7876  {
7877  calculatedSumFreeSize += subAlloc.size;
7878  ++calculatedFreeCount;
7879  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7880  {
7881  ++freeSuballocationsToRegister;
7882  }
7883 
7884  // Margin required between allocations - every free space must be at least that large.
7885  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7886  }
7887  else
7888  {
7889  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7890  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7891 
7892  // Margin required between allocations - previous allocation must be free.
7893  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7894  }
7895 
7896  calculatedOffset += subAlloc.size;
7897  prevFree = currFree;
7898  }
7899 
7900  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
7901  // match expected one.
7902  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7903 
7904  VkDeviceSize lastSize = 0;
7905  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7906  {
7907  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7908 
7909  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
7910  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7911  // They must be sorted by size ascending.
7912  VMA_VALIDATE(suballocItem->size >= lastSize);
7913 
7914  lastSize = suballocItem->size;
7915  }
7916 
7917  // Check if totals match calculacted values.
7918  VMA_VALIDATE(ValidateFreeSuballocationList());
7919  VMA_VALIDATE(calculatedOffset == GetSize());
7920  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7921  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7922 
7923  return true;
7924 }
7925 
7926 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
7927 {
7928  if(!m_FreeSuballocationsBySize.empty())
7929  {
7930  return m_FreeSuballocationsBySize.back()->size;
7931  }
7932  else
7933  {
7934  return 0;
7935  }
7936 }
7937 
7938 bool VmaBlockMetadata_Generic::IsEmpty() const
7939 {
7940  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7941 }
7942 
7943 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7944 {
7945  outInfo.blockCount = 1;
7946 
7947  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7948  outInfo.allocationCount = rangeCount - m_FreeCount;
7949  outInfo.unusedRangeCount = m_FreeCount;
7950 
7951  outInfo.unusedBytes = m_SumFreeSize;
7952  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
7953 
7954  outInfo.allocationSizeMin = UINT64_MAX;
7955  outInfo.allocationSizeMax = 0;
7956  outInfo.unusedRangeSizeMin = UINT64_MAX;
7957  outInfo.unusedRangeSizeMax = 0;
7958 
7959  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7960  suballocItem != m_Suballocations.cend();
7961  ++suballocItem)
7962  {
7963  const VmaSuballocation& suballoc = *suballocItem;
7964  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7965  {
7966  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7967  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
7968  }
7969  else
7970  {
7971  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
7972  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
7973  }
7974  }
7975 }
7976 
7977 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
7978 {
7979  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7980 
7981  inoutStats.size += GetSize();
7982  inoutStats.unusedSize += m_SumFreeSize;
7983  inoutStats.allocationCount += rangeCount - m_FreeCount;
7984  inoutStats.unusedRangeCount += m_FreeCount;
7985  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
7986 }
7987 
7988 #if VMA_STATS_STRING_ENABLED
7989 
7990 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7991 {
7992  PrintDetailedMap_Begin(json,
7993  m_SumFreeSize, // unusedBytes
7994  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7995  m_FreeCount); // unusedRangeCount
7996 
7997  size_t i = 0;
7998  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7999  suballocItem != m_Suballocations.cend();
8000  ++suballocItem, ++i)
8001  {
8002  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8003  {
8004  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
8005  }
8006  else
8007  {
8008  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
8009  }
8010  }
8011 
8012  PrintDetailedMap_End(json);
8013 }
8014 
8015 #endif // #if VMA_STATS_STRING_ENABLED
8016 
8017 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
8018  uint32_t currentFrameIndex,
8019  uint32_t frameInUseCount,
8020  VkDeviceSize bufferImageGranularity,
8021  VkDeviceSize allocSize,
8022  VkDeviceSize allocAlignment,
8023  bool upperAddress,
8024  VmaSuballocationType allocType,
8025  bool canMakeOtherLost,
8026  uint32_t strategy,
8027  VmaAllocationRequest* pAllocationRequest)
8028 {
8029  VMA_ASSERT(allocSize > 0);
8030  VMA_ASSERT(!upperAddress);
8031  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8032  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8033  VMA_HEAVY_ASSERT(Validate());
8034 
8035  pAllocationRequest->type = VmaAllocationRequestType::Normal;
8036 
8037  // There is not enough total free space in this block to fullfill the request: Early return.
8038  if(canMakeOtherLost == false &&
8039  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
8040  {
8041  return false;
8042  }
8043 
8044  // New algorithm, efficiently searching freeSuballocationsBySize.
8045  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
8046  if(freeSuballocCount > 0)
8047  {
8049  {
8050  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
8051  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8052  m_FreeSuballocationsBySize.data(),
8053  m_FreeSuballocationsBySize.data() + freeSuballocCount,
8054  allocSize + 2 * VMA_DEBUG_MARGIN,
8055  VmaSuballocationItemSizeLess());
8056  size_t index = it - m_FreeSuballocationsBySize.data();
8057  for(; index < freeSuballocCount; ++index)
8058  {
8059  if(CheckAllocation(
8060  currentFrameIndex,
8061  frameInUseCount,
8062  bufferImageGranularity,
8063  allocSize,
8064  allocAlignment,
8065  allocType,
8066  m_FreeSuballocationsBySize[index],
8067  false, // canMakeOtherLost
8068  &pAllocationRequest->offset,
8069  &pAllocationRequest->itemsToMakeLostCount,
8070  &pAllocationRequest->sumFreeSize,
8071  &pAllocationRequest->sumItemSize))
8072  {
8073  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8074  return true;
8075  }
8076  }
8077  }
8078  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
8079  {
8080  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8081  it != m_Suballocations.end();
8082  ++it)
8083  {
8084  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
8085  currentFrameIndex,
8086  frameInUseCount,
8087  bufferImageGranularity,
8088  allocSize,
8089  allocAlignment,
8090  allocType,
8091  it,
8092  false, // canMakeOtherLost
8093  &pAllocationRequest->offset,
8094  &pAllocationRequest->itemsToMakeLostCount,
8095  &pAllocationRequest->sumFreeSize,
8096  &pAllocationRequest->sumItemSize))
8097  {
8098  pAllocationRequest->item = it;
8099  return true;
8100  }
8101  }
8102  }
8103  else // WORST_FIT, FIRST_FIT
8104  {
8105  // Search staring from biggest suballocations.
8106  for(size_t index = freeSuballocCount; index--; )
8107  {
8108  if(CheckAllocation(
8109  currentFrameIndex,
8110  frameInUseCount,
8111  bufferImageGranularity,
8112  allocSize,
8113  allocAlignment,
8114  allocType,
8115  m_FreeSuballocationsBySize[index],
8116  false, // canMakeOtherLost
8117  &pAllocationRequest->offset,
8118  &pAllocationRequest->itemsToMakeLostCount,
8119  &pAllocationRequest->sumFreeSize,
8120  &pAllocationRequest->sumItemSize))
8121  {
8122  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8123  return true;
8124  }
8125  }
8126  }
8127  }
8128 
8129  if(canMakeOtherLost)
8130  {
8131  // Brute-force algorithm. TODO: Come up with something better.
8132 
8133  bool found = false;
8134  VmaAllocationRequest tmpAllocRequest = {};
8135  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
8136  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
8137  suballocIt != m_Suballocations.end();
8138  ++suballocIt)
8139  {
8140  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
8141  suballocIt->hAllocation->CanBecomeLost())
8142  {
8143  if(CheckAllocation(
8144  currentFrameIndex,
8145  frameInUseCount,
8146  bufferImageGranularity,
8147  allocSize,
8148  allocAlignment,
8149  allocType,
8150  suballocIt,
8151  canMakeOtherLost,
8152  &tmpAllocRequest.offset,
8153  &tmpAllocRequest.itemsToMakeLostCount,
8154  &tmpAllocRequest.sumFreeSize,
8155  &tmpAllocRequest.sumItemSize))
8156  {
8158  {
8159  *pAllocationRequest = tmpAllocRequest;
8160  pAllocationRequest->item = suballocIt;
8161  break;
8162  }
8163  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
8164  {
8165  *pAllocationRequest = tmpAllocRequest;
8166  pAllocationRequest->item = suballocIt;
8167  found = true;
8168  }
8169  }
8170  }
8171  }
8172 
8173  return found;
8174  }
8175 
8176  return false;
8177 }
8178 
8179 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
8180  uint32_t currentFrameIndex,
8181  uint32_t frameInUseCount,
8182  VmaAllocationRequest* pAllocationRequest)
8183 {
8184  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
8185 
8186  while(pAllocationRequest->itemsToMakeLostCount > 0)
8187  {
8188  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
8189  {
8190  ++pAllocationRequest->item;
8191  }
8192  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8193  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
8194  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
8195  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8196  {
8197  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
8198  --pAllocationRequest->itemsToMakeLostCount;
8199  }
8200  else
8201  {
8202  return false;
8203  }
8204  }
8205 
8206  VMA_HEAVY_ASSERT(Validate());
8207  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8208  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8209 
8210  return true;
8211 }
8212 
8213 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8214 {
8215  uint32_t lostAllocationCount = 0;
8216  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8217  it != m_Suballocations.end();
8218  ++it)
8219  {
8220  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8221  it->hAllocation->CanBecomeLost() &&
8222  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8223  {
8224  it = FreeSuballocation(it);
8225  ++lostAllocationCount;
8226  }
8227  }
8228  return lostAllocationCount;
8229 }
8230 
8231 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8232 {
8233  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8234  it != m_Suballocations.end();
8235  ++it)
8236  {
8237  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8238  {
8239  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8240  {
8241  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8242  return VK_ERROR_VALIDATION_FAILED_EXT;
8243  }
8244  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8245  {
8246  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8247  return VK_ERROR_VALIDATION_FAILED_EXT;
8248  }
8249  }
8250  }
8251 
8252  return VK_SUCCESS;
8253 }
8254 
8255 void VmaBlockMetadata_Generic::Alloc(
8256  const VmaAllocationRequest& request,
8257  VmaSuballocationType type,
8258  VkDeviceSize allocSize,
8259  VmaAllocation hAllocation)
8260 {
8261  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
8262  VMA_ASSERT(request.item != m_Suballocations.end());
8263  VmaSuballocation& suballoc = *request.item;
8264  // Given suballocation is a free block.
8265  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8266  // Given offset is inside this suballocation.
8267  VMA_ASSERT(request.offset >= suballoc.offset);
8268  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8269  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8270  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8271 
8272  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8273  // it to become used.
8274  UnregisterFreeSuballocation(request.item);
8275 
8276  suballoc.offset = request.offset;
8277  suballoc.size = allocSize;
8278  suballoc.type = type;
8279  suballoc.hAllocation = hAllocation;
8280 
8281  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8282  if(paddingEnd)
8283  {
8284  VmaSuballocation paddingSuballoc = {};
8285  paddingSuballoc.offset = request.offset + allocSize;
8286  paddingSuballoc.size = paddingEnd;
8287  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8288  VmaSuballocationList::iterator next = request.item;
8289  ++next;
8290  const VmaSuballocationList::iterator paddingEndItem =
8291  m_Suballocations.insert(next, paddingSuballoc);
8292  RegisterFreeSuballocation(paddingEndItem);
8293  }
8294 
8295  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8296  if(paddingBegin)
8297  {
8298  VmaSuballocation paddingSuballoc = {};
8299  paddingSuballoc.offset = request.offset - paddingBegin;
8300  paddingSuballoc.size = paddingBegin;
8301  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8302  const VmaSuballocationList::iterator paddingBeginItem =
8303  m_Suballocations.insert(request.item, paddingSuballoc);
8304  RegisterFreeSuballocation(paddingBeginItem);
8305  }
8306 
8307  // Update totals.
8308  m_FreeCount = m_FreeCount - 1;
8309  if(paddingBegin > 0)
8310  {
8311  ++m_FreeCount;
8312  }
8313  if(paddingEnd > 0)
8314  {
8315  ++m_FreeCount;
8316  }
8317  m_SumFreeSize -= allocSize;
8318 }
8319 
8320 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8321 {
8322  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8323  suballocItem != m_Suballocations.end();
8324  ++suballocItem)
8325  {
8326  VmaSuballocation& suballoc = *suballocItem;
8327  if(suballoc.hAllocation == allocation)
8328  {
8329  FreeSuballocation(suballocItem);
8330  VMA_HEAVY_ASSERT(Validate());
8331  return;
8332  }
8333  }
8334  VMA_ASSERT(0 && "Not found!");
8335 }
8336 
8337 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8338 {
8339  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8340  suballocItem != m_Suballocations.end();
8341  ++suballocItem)
8342  {
8343  VmaSuballocation& suballoc = *suballocItem;
8344  if(suballoc.offset == offset)
8345  {
8346  FreeSuballocation(suballocItem);
8347  return;
8348  }
8349  }
8350  VMA_ASSERT(0 && "Not found!");
8351 }
8352 
8353 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8354 {
8355  VkDeviceSize lastSize = 0;
8356  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8357  {
8358  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8359 
8360  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8361  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8362  VMA_VALIDATE(it->size >= lastSize);
8363  lastSize = it->size;
8364  }
8365  return true;
8366 }
8367 
8368 bool VmaBlockMetadata_Generic::CheckAllocation(
8369  uint32_t currentFrameIndex,
8370  uint32_t frameInUseCount,
8371  VkDeviceSize bufferImageGranularity,
8372  VkDeviceSize allocSize,
8373  VkDeviceSize allocAlignment,
8374  VmaSuballocationType allocType,
8375  VmaSuballocationList::const_iterator suballocItem,
8376  bool canMakeOtherLost,
8377  VkDeviceSize* pOffset,
8378  size_t* itemsToMakeLostCount,
8379  VkDeviceSize* pSumFreeSize,
8380  VkDeviceSize* pSumItemSize) const
8381 {
8382  VMA_ASSERT(allocSize > 0);
8383  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8384  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8385  VMA_ASSERT(pOffset != VMA_NULL);
8386 
8387  *itemsToMakeLostCount = 0;
8388  *pSumFreeSize = 0;
8389  *pSumItemSize = 0;
8390 
8391  if(canMakeOtherLost)
8392  {
8393  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8394  {
8395  *pSumFreeSize = suballocItem->size;
8396  }
8397  else
8398  {
8399  if(suballocItem->hAllocation->CanBecomeLost() &&
8400  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8401  {
8402  ++*itemsToMakeLostCount;
8403  *pSumItemSize = suballocItem->size;
8404  }
8405  else
8406  {
8407  return false;
8408  }
8409  }
8410 
8411  // Remaining size is too small for this request: Early return.
8412  if(GetSize() - suballocItem->offset < allocSize)
8413  {
8414  return false;
8415  }
8416 
8417  // Start from offset equal to beginning of this suballocation.
8418  *pOffset = suballocItem->offset;
8419 
8420  // Apply VMA_DEBUG_MARGIN at the beginning.
8421  if(VMA_DEBUG_MARGIN > 0)
8422  {
8423  *pOffset += VMA_DEBUG_MARGIN;
8424  }
8425 
8426  // Apply alignment.
8427  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8428 
8429  // Check previous suballocations for BufferImageGranularity conflicts.
8430  // Make bigger alignment if necessary.
8431  if(bufferImageGranularity > 1)
8432  {
8433  bool bufferImageGranularityConflict = false;
8434  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8435  while(prevSuballocItem != m_Suballocations.cbegin())
8436  {
8437  --prevSuballocItem;
8438  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8439  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8440  {
8441  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8442  {
8443  bufferImageGranularityConflict = true;
8444  break;
8445  }
8446  }
8447  else
8448  // Already on previous page.
8449  break;
8450  }
8451  if(bufferImageGranularityConflict)
8452  {
8453  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8454  }
8455  }
8456 
8457  // Now that we have final *pOffset, check if we are past suballocItem.
8458  // If yes, return false - this function should be called for another suballocItem as starting point.
8459  if(*pOffset >= suballocItem->offset + suballocItem->size)
8460  {
8461  return false;
8462  }
8463 
8464  // Calculate padding at the beginning based on current offset.
8465  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8466 
8467  // Calculate required margin at the end.
8468  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8469 
8470  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8471  // Another early return check.
8472  if(suballocItem->offset + totalSize > GetSize())
8473  {
8474  return false;
8475  }
8476 
8477  // Advance lastSuballocItem until desired size is reached.
8478  // Update itemsToMakeLostCount.
8479  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8480  if(totalSize > suballocItem->size)
8481  {
8482  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8483  while(remainingSize > 0)
8484  {
8485  ++lastSuballocItem;
8486  if(lastSuballocItem == m_Suballocations.cend())
8487  {
8488  return false;
8489  }
8490  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8491  {
8492  *pSumFreeSize += lastSuballocItem->size;
8493  }
8494  else
8495  {
8496  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8497  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8498  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8499  {
8500  ++*itemsToMakeLostCount;
8501  *pSumItemSize += lastSuballocItem->size;
8502  }
8503  else
8504  {
8505  return false;
8506  }
8507  }
8508  remainingSize = (lastSuballocItem->size < remainingSize) ?
8509  remainingSize - lastSuballocItem->size : 0;
8510  }
8511  }
8512 
8513  // Check next suballocations for BufferImageGranularity conflicts.
8514  // If conflict exists, we must mark more allocations lost or fail.
8515  if(bufferImageGranularity > 1)
8516  {
8517  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8518  ++nextSuballocItem;
8519  while(nextSuballocItem != m_Suballocations.cend())
8520  {
8521  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8522  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8523  {
8524  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8525  {
8526  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8527  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8528  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8529  {
8530  ++*itemsToMakeLostCount;
8531  }
8532  else
8533  {
8534  return false;
8535  }
8536  }
8537  }
8538  else
8539  {
8540  // Already on next page.
8541  break;
8542  }
8543  ++nextSuballocItem;
8544  }
8545  }
8546  }
8547  else
8548  {
8549  const VmaSuballocation& suballoc = *suballocItem;
8550  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8551 
8552  *pSumFreeSize = suballoc.size;
8553 
8554  // Size of this suballocation is too small for this request: Early return.
8555  if(suballoc.size < allocSize)
8556  {
8557  return false;
8558  }
8559 
8560  // Start from offset equal to beginning of this suballocation.
8561  *pOffset = suballoc.offset;
8562 
8563  // Apply VMA_DEBUG_MARGIN at the beginning.
8564  if(VMA_DEBUG_MARGIN > 0)
8565  {
8566  *pOffset += VMA_DEBUG_MARGIN;
8567  }
8568 
8569  // Apply alignment.
8570  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8571 
8572  // Check previous suballocations for BufferImageGranularity conflicts.
8573  // Make bigger alignment if necessary.
8574  if(bufferImageGranularity > 1)
8575  {
8576  bool bufferImageGranularityConflict = false;
8577  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8578  while(prevSuballocItem != m_Suballocations.cbegin())
8579  {
8580  --prevSuballocItem;
8581  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8582  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8583  {
8584  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8585  {
8586  bufferImageGranularityConflict = true;
8587  break;
8588  }
8589  }
8590  else
8591  // Already on previous page.
8592  break;
8593  }
8594  if(bufferImageGranularityConflict)
8595  {
8596  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8597  }
8598  }
8599 
8600  // Calculate padding at the beginning based on current offset.
8601  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8602 
8603  // Calculate required margin at the end.
8604  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8605 
8606  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8607  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8608  {
8609  return false;
8610  }
8611 
8612  // Check next suballocations for BufferImageGranularity conflicts.
8613  // If conflict exists, allocation cannot be made here.
8614  if(bufferImageGranularity > 1)
8615  {
8616  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8617  ++nextSuballocItem;
8618  while(nextSuballocItem != m_Suballocations.cend())
8619  {
8620  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8621  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8622  {
8623  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8624  {
8625  return false;
8626  }
8627  }
8628  else
8629  {
8630  // Already on next page.
8631  break;
8632  }
8633  ++nextSuballocItem;
8634  }
8635  }
8636  }
8637 
8638  // All tests passed: Success. pOffset is already filled.
8639  return true;
8640 }
8641 
8642 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8643 {
8644  VMA_ASSERT(item != m_Suballocations.end());
8645  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8646 
8647  VmaSuballocationList::iterator nextItem = item;
8648  ++nextItem;
8649  VMA_ASSERT(nextItem != m_Suballocations.end());
8650  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8651 
8652  item->size += nextItem->size;
8653  --m_FreeCount;
8654  m_Suballocations.erase(nextItem);
8655 }
8656 
8657 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8658 {
8659  // Change this suballocation to be marked as free.
8660  VmaSuballocation& suballoc = *suballocItem;
8661  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8662  suballoc.hAllocation = VK_NULL_HANDLE;
8663 
8664  // Update totals.
8665  ++m_FreeCount;
8666  m_SumFreeSize += suballoc.size;
8667 
8668  // Merge with previous and/or next suballocation if it's also free.
8669  bool mergeWithNext = false;
8670  bool mergeWithPrev = false;
8671 
8672  VmaSuballocationList::iterator nextItem = suballocItem;
8673  ++nextItem;
8674  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8675  {
8676  mergeWithNext = true;
8677  }
8678 
8679  VmaSuballocationList::iterator prevItem = suballocItem;
8680  if(suballocItem != m_Suballocations.begin())
8681  {
8682  --prevItem;
8683  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8684  {
8685  mergeWithPrev = true;
8686  }
8687  }
8688 
8689  if(mergeWithNext)
8690  {
8691  UnregisterFreeSuballocation(nextItem);
8692  MergeFreeWithNext(suballocItem);
8693  }
8694 
8695  if(mergeWithPrev)
8696  {
8697  UnregisterFreeSuballocation(prevItem);
8698  MergeFreeWithNext(prevItem);
8699  RegisterFreeSuballocation(prevItem);
8700  return prevItem;
8701  }
8702  else
8703  {
8704  RegisterFreeSuballocation(suballocItem);
8705  return suballocItem;
8706  }
8707 }
8708 
8709 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8710 {
8711  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8712  VMA_ASSERT(item->size > 0);
8713 
8714  // You may want to enable this validation at the beginning or at the end of
8715  // this function, depending on what do you want to check.
8716  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8717 
8718  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8719  {
8720  if(m_FreeSuballocationsBySize.empty())
8721  {
8722  m_FreeSuballocationsBySize.push_back(item);
8723  }
8724  else
8725  {
8726  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8727  }
8728  }
8729 
8730  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8731 }
8732 
8733 
8734 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8735 {
8736  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8737  VMA_ASSERT(item->size > 0);
8738 
8739  // You may want to enable this validation at the beginning or at the end of
8740  // this function, depending on what do you want to check.
8741  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8742 
8743  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8744  {
8745  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8746  m_FreeSuballocationsBySize.data(),
8747  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8748  item,
8749  VmaSuballocationItemSizeLess());
8750  for(size_t index = it - m_FreeSuballocationsBySize.data();
8751  index < m_FreeSuballocationsBySize.size();
8752  ++index)
8753  {
8754  if(m_FreeSuballocationsBySize[index] == item)
8755  {
8756  VmaVectorRemove(m_FreeSuballocationsBySize, index);
8757  return;
8758  }
8759  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8760  }
8761  VMA_ASSERT(0 && "Not found.");
8762  }
8763 
8764  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8765 }
8766 
8767 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8768  VkDeviceSize bufferImageGranularity,
8769  VmaSuballocationType& inOutPrevSuballocType) const
8770 {
8771  if(bufferImageGranularity == 1 || IsEmpty())
8772  {
8773  return false;
8774  }
8775 
8776  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8777  bool typeConflictFound = false;
8778  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8779  it != m_Suballocations.cend();
8780  ++it)
8781  {
8782  const VmaSuballocationType suballocType = it->type;
8783  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8784  {
8785  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8786  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8787  {
8788  typeConflictFound = true;
8789  }
8790  inOutPrevSuballocType = suballocType;
8791  }
8792  }
8793 
8794  return typeConflictFound || minAlignment >= bufferImageGranularity;
8795 }
8796 
8798 // class VmaBlockMetadata_Linear
8799 
8800 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
8801  VmaBlockMetadata(hAllocator),
8802  m_SumFreeSize(0),
8803  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8804  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8805  m_1stVectorIndex(0),
8806  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8807  m_1stNullItemsBeginCount(0),
8808  m_1stNullItemsMiddleCount(0),
8809  m_2ndNullItemsCount(0)
8810 {
8811 }
8812 
8813 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8814 {
8815 }
8816 
8817 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8818 {
8819  VmaBlockMetadata::Init(size);
8820  m_SumFreeSize = size;
8821 }
8822 
8823 bool VmaBlockMetadata_Linear::Validate() const
8824 {
8825  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8826  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8827 
8828  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8829  VMA_VALIDATE(!suballocations1st.empty() ||
8830  suballocations2nd.empty() ||
8831  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8832 
8833  if(!suballocations1st.empty())
8834  {
8835  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
8836  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8837  // Null item at the end should be just pop_back().
8838  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8839  }
8840  if(!suballocations2nd.empty())
8841  {
8842  // Null item at the end should be just pop_back().
8843  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8844  }
8845 
8846  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8847  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8848 
8849  VkDeviceSize sumUsedSize = 0;
8850  const size_t suballoc1stCount = suballocations1st.size();
8851  VkDeviceSize offset = VMA_DEBUG_MARGIN;
8852 
8853  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8854  {
8855  const size_t suballoc2ndCount = suballocations2nd.size();
8856  size_t nullItem2ndCount = 0;
8857  for(size_t i = 0; i < suballoc2ndCount; ++i)
8858  {
8859  const VmaSuballocation& suballoc = suballocations2nd[i];
8860  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8861 
8862  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8863  VMA_VALIDATE(suballoc.offset >= offset);
8864 
8865  if(!currFree)
8866  {
8867  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8868  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8869  sumUsedSize += suballoc.size;
8870  }
8871  else
8872  {
8873  ++nullItem2ndCount;
8874  }
8875 
8876  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8877  }
8878 
8879  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8880  }
8881 
8882  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8883  {
8884  const VmaSuballocation& suballoc = suballocations1st[i];
8885  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8886  suballoc.hAllocation == VK_NULL_HANDLE);
8887  }
8888 
8889  size_t nullItem1stCount = m_1stNullItemsBeginCount;
8890 
8891  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8892  {
8893  const VmaSuballocation& suballoc = suballocations1st[i];
8894  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8895 
8896  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8897  VMA_VALIDATE(suballoc.offset >= offset);
8898  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8899 
8900  if(!currFree)
8901  {
8902  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8903  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8904  sumUsedSize += suballoc.size;
8905  }
8906  else
8907  {
8908  ++nullItem1stCount;
8909  }
8910 
8911  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8912  }
8913  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8914 
8915  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8916  {
8917  const size_t suballoc2ndCount = suballocations2nd.size();
8918  size_t nullItem2ndCount = 0;
8919  for(size_t i = suballoc2ndCount; i--; )
8920  {
8921  const VmaSuballocation& suballoc = suballocations2nd[i];
8922  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8923 
8924  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8925  VMA_VALIDATE(suballoc.offset >= offset);
8926 
8927  if(!currFree)
8928  {
8929  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8930  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8931  sumUsedSize += suballoc.size;
8932  }
8933  else
8934  {
8935  ++nullItem2ndCount;
8936  }
8937 
8938  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8939  }
8940 
8941  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8942  }
8943 
8944  VMA_VALIDATE(offset <= GetSize());
8945  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
8946 
8947  return true;
8948 }
8949 
8950 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
8951 {
8952  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
8953  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
8954 }
8955 
8956 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
8957 {
8958  const VkDeviceSize size = GetSize();
8959 
8960  /*
8961  We don't consider gaps inside allocation vectors with freed allocations because
8962  they are not suitable for reuse in linear allocator. We consider only space that
8963  is available for new allocations.
8964  */
8965  if(IsEmpty())
8966  {
8967  return size;
8968  }
8969 
8970  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8971 
8972  switch(m_2ndVectorMode)
8973  {
8974  case SECOND_VECTOR_EMPTY:
8975  /*
8976  Available space is after end of 1st, as well as before beginning of 1st (which
8977  whould make it a ring buffer).
8978  */
8979  {
8980  const size_t suballocations1stCount = suballocations1st.size();
8981  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
8982  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8983  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
8984  return VMA_MAX(
8985  firstSuballoc.offset,
8986  size - (lastSuballoc.offset + lastSuballoc.size));
8987  }
8988  break;
8989 
8990  case SECOND_VECTOR_RING_BUFFER:
8991  /*
8992  Available space is only between end of 2nd and beginning of 1st.
8993  */
8994  {
8995  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8996  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
8997  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
8998  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
8999  }
9000  break;
9001 
9002  case SECOND_VECTOR_DOUBLE_STACK:
9003  /*
9004  Available space is only between end of 1st and top of 2nd.
9005  */
9006  {
9007  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9008  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
9009  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
9010  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
9011  }
9012  break;
9013 
9014  default:
9015  VMA_ASSERT(0);
9016  return 0;
9017  }
9018 }
9019 
9020 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9021 {
9022  const VkDeviceSize size = GetSize();
9023  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9024  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9025  const size_t suballoc1stCount = suballocations1st.size();
9026  const size_t suballoc2ndCount = suballocations2nd.size();
9027 
9028  outInfo.blockCount = 1;
9029  outInfo.allocationCount = (uint32_t)GetAllocationCount();
9030  outInfo.unusedRangeCount = 0;
9031  outInfo.usedBytes = 0;
9032  outInfo.allocationSizeMin = UINT64_MAX;
9033  outInfo.allocationSizeMax = 0;
9034  outInfo.unusedRangeSizeMin = UINT64_MAX;
9035  outInfo.unusedRangeSizeMax = 0;
9036 
9037  VkDeviceSize lastOffset = 0;
9038 
9039  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9040  {
9041  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9042  size_t nextAlloc2ndIndex = 0;
9043  while(lastOffset < freeSpace2ndTo1stEnd)
9044  {
9045  // Find next non-null allocation or move nextAllocIndex to the end.
9046  while(nextAlloc2ndIndex < suballoc2ndCount &&
9047  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9048  {
9049  ++nextAlloc2ndIndex;
9050  }
9051 
9052  // Found non-null allocation.
9053  if(nextAlloc2ndIndex < suballoc2ndCount)
9054  {
9055  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9056 
9057  // 1. Process free space before this allocation.
9058  if(lastOffset < suballoc.offset)
9059  {
9060  // There is free space from lastOffset to suballoc.offset.
9061  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9062  ++outInfo.unusedRangeCount;
9063  outInfo.unusedBytes += unusedRangeSize;
9064  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9065  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9066  }
9067 
9068  // 2. Process this allocation.
9069  // There is allocation with suballoc.offset, suballoc.size.
9070  outInfo.usedBytes += suballoc.size;
9071  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9072  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9073 
9074  // 3. Prepare for next iteration.
9075  lastOffset = suballoc.offset + suballoc.size;
9076  ++nextAlloc2ndIndex;
9077  }
9078  // We are at the end.
9079  else
9080  {
9081  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9082  if(lastOffset < freeSpace2ndTo1stEnd)
9083  {
9084  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9085  ++outInfo.unusedRangeCount;
9086  outInfo.unusedBytes += unusedRangeSize;
9087  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9088  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9089  }
9090 
9091  // End of loop.
9092  lastOffset = freeSpace2ndTo1stEnd;
9093  }
9094  }
9095  }
9096 
9097  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9098  const VkDeviceSize freeSpace1stTo2ndEnd =
9099  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9100  while(lastOffset < freeSpace1stTo2ndEnd)
9101  {
9102  // Find next non-null allocation or move nextAllocIndex to the end.
9103  while(nextAlloc1stIndex < suballoc1stCount &&
9104  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9105  {
9106  ++nextAlloc1stIndex;
9107  }
9108 
9109  // Found non-null allocation.
9110  if(nextAlloc1stIndex < suballoc1stCount)
9111  {
9112  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9113 
9114  // 1. Process free space before this allocation.
9115  if(lastOffset < suballoc.offset)
9116  {
9117  // There is free space from lastOffset to suballoc.offset.
9118  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9119  ++outInfo.unusedRangeCount;
9120  outInfo.unusedBytes += unusedRangeSize;
9121  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9122  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9123  }
9124 
9125  // 2. Process this allocation.
9126  // There is allocation with suballoc.offset, suballoc.size.
9127  outInfo.usedBytes += suballoc.size;
9128  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9129  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9130 
9131  // 3. Prepare for next iteration.
9132  lastOffset = suballoc.offset + suballoc.size;
9133  ++nextAlloc1stIndex;
9134  }
9135  // We are at the end.
9136  else
9137  {
9138  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9139  if(lastOffset < freeSpace1stTo2ndEnd)
9140  {
9141  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9142  ++outInfo.unusedRangeCount;
9143  outInfo.unusedBytes += unusedRangeSize;
9144  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9145  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9146  }
9147 
9148  // End of loop.
9149  lastOffset = freeSpace1stTo2ndEnd;
9150  }
9151  }
9152 
9153  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9154  {
9155  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9156  while(lastOffset < size)
9157  {
9158  // Find next non-null allocation or move nextAllocIndex to the end.
9159  while(nextAlloc2ndIndex != SIZE_MAX &&
9160  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9161  {
9162  --nextAlloc2ndIndex;
9163  }
9164 
9165  // Found non-null allocation.
9166  if(nextAlloc2ndIndex != SIZE_MAX)
9167  {
9168  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9169 
9170  // 1. Process free space before this allocation.
9171  if(lastOffset < suballoc.offset)
9172  {
9173  // There is free space from lastOffset to suballoc.offset.
9174  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9175  ++outInfo.unusedRangeCount;
9176  outInfo.unusedBytes += unusedRangeSize;
9177  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9178  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9179  }
9180 
9181  // 2. Process this allocation.
9182  // There is allocation with suballoc.offset, suballoc.size.
9183  outInfo.usedBytes += suballoc.size;
9184  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9185  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9186 
9187  // 3. Prepare for next iteration.
9188  lastOffset = suballoc.offset + suballoc.size;
9189  --nextAlloc2ndIndex;
9190  }
9191  // We are at the end.
9192  else
9193  {
9194  // There is free space from lastOffset to size.
9195  if(lastOffset < size)
9196  {
9197  const VkDeviceSize unusedRangeSize = size - lastOffset;
9198  ++outInfo.unusedRangeCount;
9199  outInfo.unusedBytes += unusedRangeSize;
9200  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9201  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9202  }
9203 
9204  // End of loop.
9205  lastOffset = size;
9206  }
9207  }
9208  }
9209 
9210  outInfo.unusedBytes = size - outInfo.usedBytes;
9211 }
9212 
9213 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9214 {
9215  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9216  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9217  const VkDeviceSize size = GetSize();
9218  const size_t suballoc1stCount = suballocations1st.size();
9219  const size_t suballoc2ndCount = suballocations2nd.size();
9220 
9221  inoutStats.size += size;
9222 
9223  VkDeviceSize lastOffset = 0;
9224 
9225  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9226  {
9227  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9228  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9229  while(lastOffset < freeSpace2ndTo1stEnd)
9230  {
9231  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9232  while(nextAlloc2ndIndex < suballoc2ndCount &&
9233  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9234  {
9235  ++nextAlloc2ndIndex;
9236  }
9237 
9238  // Found non-null allocation.
9239  if(nextAlloc2ndIndex < suballoc2ndCount)
9240  {
9241  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9242 
9243  // 1. Process free space before this allocation.
9244  if(lastOffset < suballoc.offset)
9245  {
9246  // There is free space from lastOffset to suballoc.offset.
9247  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9248  inoutStats.unusedSize += unusedRangeSize;
9249  ++inoutStats.unusedRangeCount;
9250  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9251  }
9252 
9253  // 2. Process this allocation.
9254  // There is allocation with suballoc.offset, suballoc.size.
9255  ++inoutStats.allocationCount;
9256 
9257  // 3. Prepare for next iteration.
9258  lastOffset = suballoc.offset + suballoc.size;
9259  ++nextAlloc2ndIndex;
9260  }
9261  // We are at the end.
9262  else
9263  {
9264  if(lastOffset < freeSpace2ndTo1stEnd)
9265  {
9266  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9267  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9268  inoutStats.unusedSize += unusedRangeSize;
9269  ++inoutStats.unusedRangeCount;
9270  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9271  }
9272 
9273  // End of loop.
9274  lastOffset = freeSpace2ndTo1stEnd;
9275  }
9276  }
9277  }
9278 
9279  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9280  const VkDeviceSize freeSpace1stTo2ndEnd =
9281  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9282  while(lastOffset < freeSpace1stTo2ndEnd)
9283  {
9284  // Find next non-null allocation or move nextAllocIndex to the end.
9285  while(nextAlloc1stIndex < suballoc1stCount &&
9286  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9287  {
9288  ++nextAlloc1stIndex;
9289  }
9290 
9291  // Found non-null allocation.
9292  if(nextAlloc1stIndex < suballoc1stCount)
9293  {
9294  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9295 
9296  // 1. Process free space before this allocation.
9297  if(lastOffset < suballoc.offset)
9298  {
9299  // There is free space from lastOffset to suballoc.offset.
9300  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9301  inoutStats.unusedSize += unusedRangeSize;
9302  ++inoutStats.unusedRangeCount;
9303  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9304  }
9305 
9306  // 2. Process this allocation.
9307  // There is allocation with suballoc.offset, suballoc.size.
9308  ++inoutStats.allocationCount;
9309 
9310  // 3. Prepare for next iteration.
9311  lastOffset = suballoc.offset + suballoc.size;
9312  ++nextAlloc1stIndex;
9313  }
9314  // We are at the end.
9315  else
9316  {
9317  if(lastOffset < freeSpace1stTo2ndEnd)
9318  {
9319  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9320  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9321  inoutStats.unusedSize += unusedRangeSize;
9322  ++inoutStats.unusedRangeCount;
9323  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9324  }
9325 
9326  // End of loop.
9327  lastOffset = freeSpace1stTo2ndEnd;
9328  }
9329  }
9330 
9331  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9332  {
9333  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9334  while(lastOffset < size)
9335  {
9336  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9337  while(nextAlloc2ndIndex != SIZE_MAX &&
9338  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9339  {
9340  --nextAlloc2ndIndex;
9341  }
9342 
9343  // Found non-null allocation.
9344  if(nextAlloc2ndIndex != SIZE_MAX)
9345  {
9346  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9347 
9348  // 1. Process free space before this allocation.
9349  if(lastOffset < suballoc.offset)
9350  {
9351  // There is free space from lastOffset to suballoc.offset.
9352  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9353  inoutStats.unusedSize += unusedRangeSize;
9354  ++inoutStats.unusedRangeCount;
9355  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9356  }
9357 
9358  // 2. Process this allocation.
9359  // There is allocation with suballoc.offset, suballoc.size.
9360  ++inoutStats.allocationCount;
9361 
9362  // 3. Prepare for next iteration.
9363  lastOffset = suballoc.offset + suballoc.size;
9364  --nextAlloc2ndIndex;
9365  }
9366  // We are at the end.
9367  else
9368  {
9369  if(lastOffset < size)
9370  {
9371  // There is free space from lastOffset to size.
9372  const VkDeviceSize unusedRangeSize = size - lastOffset;
9373  inoutStats.unusedSize += unusedRangeSize;
9374  ++inoutStats.unusedRangeCount;
9375  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9376  }
9377 
9378  // End of loop.
9379  lastOffset = size;
9380  }
9381  }
9382  }
9383 }
9384 
9385 #if VMA_STATS_STRING_ENABLED
9386 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9387 {
9388  const VkDeviceSize size = GetSize();
9389  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9390  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9391  const size_t suballoc1stCount = suballocations1st.size();
9392  const size_t suballoc2ndCount = suballocations2nd.size();
9393 
9394  // FIRST PASS
9395 
9396  size_t unusedRangeCount = 0;
9397  VkDeviceSize usedBytes = 0;
9398 
9399  VkDeviceSize lastOffset = 0;
9400 
9401  size_t alloc2ndCount = 0;
9402  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9403  {
9404  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9405  size_t nextAlloc2ndIndex = 0;
9406  while(lastOffset < freeSpace2ndTo1stEnd)
9407  {
9408  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9409  while(nextAlloc2ndIndex < suballoc2ndCount &&
9410  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9411  {
9412  ++nextAlloc2ndIndex;
9413  }
9414 
9415  // Found non-null allocation.
9416  if(nextAlloc2ndIndex < suballoc2ndCount)
9417  {
9418  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9419 
9420  // 1. Process free space before this allocation.
9421  if(lastOffset < suballoc.offset)
9422  {
9423  // There is free space from lastOffset to suballoc.offset.
9424  ++unusedRangeCount;
9425  }
9426 
9427  // 2. Process this allocation.
9428  // There is allocation with suballoc.offset, suballoc.size.
9429  ++alloc2ndCount;
9430  usedBytes += suballoc.size;
9431 
9432  // 3. Prepare for next iteration.
9433  lastOffset = suballoc.offset + suballoc.size;
9434  ++nextAlloc2ndIndex;
9435  }
9436  // We are at the end.
9437  else
9438  {
9439  if(lastOffset < freeSpace2ndTo1stEnd)
9440  {
9441  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9442  ++unusedRangeCount;
9443  }
9444 
9445  // End of loop.
9446  lastOffset = freeSpace2ndTo1stEnd;
9447  }
9448  }
9449  }
9450 
9451  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9452  size_t alloc1stCount = 0;
9453  const VkDeviceSize freeSpace1stTo2ndEnd =
9454  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9455  while(lastOffset < freeSpace1stTo2ndEnd)
9456  {
9457  // Find next non-null allocation or move nextAllocIndex to the end.
9458  while(nextAlloc1stIndex < suballoc1stCount &&
9459  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9460  {
9461  ++nextAlloc1stIndex;
9462  }
9463 
9464  // Found non-null allocation.
9465  if(nextAlloc1stIndex < suballoc1stCount)
9466  {
9467  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9468 
9469  // 1. Process free space before this allocation.
9470  if(lastOffset < suballoc.offset)
9471  {
9472  // There is free space from lastOffset to suballoc.offset.
9473  ++unusedRangeCount;
9474  }
9475 
9476  // 2. Process this allocation.
9477  // There is allocation with suballoc.offset, suballoc.size.
9478  ++alloc1stCount;
9479  usedBytes += suballoc.size;
9480 
9481  // 3. Prepare for next iteration.
9482  lastOffset = suballoc.offset + suballoc.size;
9483  ++nextAlloc1stIndex;
9484  }
9485  // We are at the end.
9486  else
9487  {
9488  if(lastOffset < size)
9489  {
9490  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9491  ++unusedRangeCount;
9492  }
9493 
9494  // End of loop.
9495  lastOffset = freeSpace1stTo2ndEnd;
9496  }
9497  }
9498 
9499  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9500  {
9501  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9502  while(lastOffset < size)
9503  {
9504  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9505  while(nextAlloc2ndIndex != SIZE_MAX &&
9506  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9507  {
9508  --nextAlloc2ndIndex;
9509  }
9510 
9511  // Found non-null allocation.
9512  if(nextAlloc2ndIndex != SIZE_MAX)
9513  {
9514  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9515 
9516  // 1. Process free space before this allocation.
9517  if(lastOffset < suballoc.offset)
9518  {
9519  // There is free space from lastOffset to suballoc.offset.
9520  ++unusedRangeCount;
9521  }
9522 
9523  // 2. Process this allocation.
9524  // There is allocation with suballoc.offset, suballoc.size.
9525  ++alloc2ndCount;
9526  usedBytes += suballoc.size;
9527 
9528  // 3. Prepare for next iteration.
9529  lastOffset = suballoc.offset + suballoc.size;
9530  --nextAlloc2ndIndex;
9531  }
9532  // We are at the end.
9533  else
9534  {
9535  if(lastOffset < size)
9536  {
9537  // There is free space from lastOffset to size.
9538  ++unusedRangeCount;
9539  }
9540 
9541  // End of loop.
9542  lastOffset = size;
9543  }
9544  }
9545  }
9546 
9547  const VkDeviceSize unusedBytes = size - usedBytes;
9548  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9549 
9550  // SECOND PASS
9551  lastOffset = 0;
9552 
9553  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9554  {
9555  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9556  size_t nextAlloc2ndIndex = 0;
9557  while(lastOffset < freeSpace2ndTo1stEnd)
9558  {
9559  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9560  while(nextAlloc2ndIndex < suballoc2ndCount &&
9561  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9562  {
9563  ++nextAlloc2ndIndex;
9564  }
9565 
9566  // Found non-null allocation.
9567  if(nextAlloc2ndIndex < suballoc2ndCount)
9568  {
9569  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9570 
9571  // 1. Process free space before this allocation.
9572  if(lastOffset < suballoc.offset)
9573  {
9574  // There is free space from lastOffset to suballoc.offset.
9575  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9576  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9577  }
9578 
9579  // 2. Process this allocation.
9580  // There is allocation with suballoc.offset, suballoc.size.
9581  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9582 
9583  // 3. Prepare for next iteration.
9584  lastOffset = suballoc.offset + suballoc.size;
9585  ++nextAlloc2ndIndex;
9586  }
9587  // We are at the end.
9588  else
9589  {
9590  if(lastOffset < freeSpace2ndTo1stEnd)
9591  {
9592  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9593  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9594  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9595  }
9596 
9597  // End of loop.
9598  lastOffset = freeSpace2ndTo1stEnd;
9599  }
9600  }
9601  }
9602 
9603  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9604  while(lastOffset < freeSpace1stTo2ndEnd)
9605  {
9606  // Find next non-null allocation or move nextAllocIndex to the end.
9607  while(nextAlloc1stIndex < suballoc1stCount &&
9608  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9609  {
9610  ++nextAlloc1stIndex;
9611  }
9612 
9613  // Found non-null allocation.
9614  if(nextAlloc1stIndex < suballoc1stCount)
9615  {
9616  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9617 
9618  // 1. Process free space before this allocation.
9619  if(lastOffset < suballoc.offset)
9620  {
9621  // There is free space from lastOffset to suballoc.offset.
9622  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9623  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9624  }
9625 
9626  // 2. Process this allocation.
9627  // There is allocation with suballoc.offset, suballoc.size.
9628  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9629 
9630  // 3. Prepare for next iteration.
9631  lastOffset = suballoc.offset + suballoc.size;
9632  ++nextAlloc1stIndex;
9633  }
9634  // We are at the end.
9635  else
9636  {
9637  if(lastOffset < freeSpace1stTo2ndEnd)
9638  {
9639  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9640  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9641  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9642  }
9643 
9644  // End of loop.
9645  lastOffset = freeSpace1stTo2ndEnd;
9646  }
9647  }
9648 
9649  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9650  {
9651  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9652  while(lastOffset < size)
9653  {
9654  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9655  while(nextAlloc2ndIndex != SIZE_MAX &&
9656  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9657  {
9658  --nextAlloc2ndIndex;
9659  }
9660 
9661  // Found non-null allocation.
9662  if(nextAlloc2ndIndex != SIZE_MAX)
9663  {
9664  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9665 
9666  // 1. Process free space before this allocation.
9667  if(lastOffset < suballoc.offset)
9668  {
9669  // There is free space from lastOffset to suballoc.offset.
9670  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9671  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9672  }
9673 
9674  // 2. Process this allocation.
9675  // There is allocation with suballoc.offset, suballoc.size.
9676  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9677 
9678  // 3. Prepare for next iteration.
9679  lastOffset = suballoc.offset + suballoc.size;
9680  --nextAlloc2ndIndex;
9681  }
9682  // We are at the end.
9683  else
9684  {
9685  if(lastOffset < size)
9686  {
9687  // There is free space from lastOffset to size.
9688  const VkDeviceSize unusedRangeSize = size - lastOffset;
9689  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9690  }
9691 
9692  // End of loop.
9693  lastOffset = size;
9694  }
9695  }
9696  }
9697 
9698  PrintDetailedMap_End(json);
9699 }
9700 #endif // #if VMA_STATS_STRING_ENABLED
9701 
9702 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9703  uint32_t currentFrameIndex,
9704  uint32_t frameInUseCount,
9705  VkDeviceSize bufferImageGranularity,
9706  VkDeviceSize allocSize,
9707  VkDeviceSize allocAlignment,
9708  bool upperAddress,
9709  VmaSuballocationType allocType,
9710  bool canMakeOtherLost,
9711  uint32_t strategy,
9712  VmaAllocationRequest* pAllocationRequest)
9713 {
9714  VMA_ASSERT(allocSize > 0);
9715  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9716  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9717  VMA_HEAVY_ASSERT(Validate());
9718  return upperAddress ?
9719  CreateAllocationRequest_UpperAddress(
9720  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9721  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
9722  CreateAllocationRequest_LowerAddress(
9723  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9724  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
9725 }
9726 
9727 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
9728  uint32_t currentFrameIndex,
9729  uint32_t frameInUseCount,
9730  VkDeviceSize bufferImageGranularity,
9731  VkDeviceSize allocSize,
9732  VkDeviceSize allocAlignment,
9733  VmaSuballocationType allocType,
9734  bool canMakeOtherLost,
9735  uint32_t strategy,
9736  VmaAllocationRequest* pAllocationRequest)
9737 {
9738  const VkDeviceSize size = GetSize();
9739  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9740  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9741 
9742  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9743  {
9744  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9745  return false;
9746  }
9747 
9748  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9749  if(allocSize > size)
9750  {
9751  return false;
9752  }
9753  VkDeviceSize resultBaseOffset = size - allocSize;
9754  if(!suballocations2nd.empty())
9755  {
9756  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9757  resultBaseOffset = lastSuballoc.offset - allocSize;
9758  if(allocSize > lastSuballoc.offset)
9759  {
9760  return false;
9761  }
9762  }
9763 
9764  // Start from offset equal to end of free space.
9765  VkDeviceSize resultOffset = resultBaseOffset;
9766 
9767  // Apply VMA_DEBUG_MARGIN at the end.
9768  if(VMA_DEBUG_MARGIN > 0)
9769  {
9770  if(resultOffset < VMA_DEBUG_MARGIN)
9771  {
9772  return false;
9773  }
9774  resultOffset -= VMA_DEBUG_MARGIN;
9775  }
9776 
9777  // Apply alignment.
9778  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9779 
9780  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9781  // Make bigger alignment if necessary.
9782  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9783  {
9784  bool bufferImageGranularityConflict = false;
9785  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9786  {
9787  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9788  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9789  {
9790  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9791  {
9792  bufferImageGranularityConflict = true;
9793  break;
9794  }
9795  }
9796  else
9797  // Already on previous page.
9798  break;
9799  }
9800  if(bufferImageGranularityConflict)
9801  {
9802  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9803  }
9804  }
9805 
9806  // There is enough free space.
9807  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9808  suballocations1st.back().offset + suballocations1st.back().size :
9809  0;
9810  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9811  {
9812  // Check previous suballocations for BufferImageGranularity conflicts.
9813  // If conflict exists, allocation cannot be made here.
9814  if(bufferImageGranularity > 1)
9815  {
9816  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9817  {
9818  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9819  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9820  {
9821  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9822  {
9823  return false;
9824  }
9825  }
9826  else
9827  {
9828  // Already on next page.
9829  break;
9830  }
9831  }
9832  }
9833 
9834  // All tests passed: Success.
9835  pAllocationRequest->offset = resultOffset;
9836  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9837  pAllocationRequest->sumItemSize = 0;
9838  // pAllocationRequest->item unused.
9839  pAllocationRequest->itemsToMakeLostCount = 0;
9840  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
9841  return true;
9842  }
9843 
9844  return false;
9845 }
9846 
9847 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
9848  uint32_t currentFrameIndex,
9849  uint32_t frameInUseCount,
9850  VkDeviceSize bufferImageGranularity,
9851  VkDeviceSize allocSize,
9852  VkDeviceSize allocAlignment,
9853  VmaSuballocationType allocType,
9854  bool canMakeOtherLost,
9855  uint32_t strategy,
9856  VmaAllocationRequest* pAllocationRequest)
9857 {
9858  const VkDeviceSize size = GetSize();
9859  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9860  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9861 
9862  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9863  {
9864  // Try to allocate at the end of 1st vector.
9865 
9866  VkDeviceSize resultBaseOffset = 0;
9867  if(!suballocations1st.empty())
9868  {
9869  const VmaSuballocation& lastSuballoc = suballocations1st.back();
9870  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9871  }
9872 
9873  // Start from offset equal to beginning of free space.
9874  VkDeviceSize resultOffset = resultBaseOffset;
9875 
9876  // Apply VMA_DEBUG_MARGIN at the beginning.
9877  if(VMA_DEBUG_MARGIN > 0)
9878  {
9879  resultOffset += VMA_DEBUG_MARGIN;
9880  }
9881 
9882  // Apply alignment.
9883  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9884 
9885  // Check previous suballocations for BufferImageGranularity conflicts.
9886  // Make bigger alignment if necessary.
9887  if(bufferImageGranularity > 1 && !suballocations1st.empty())
9888  {
9889  bool bufferImageGranularityConflict = false;
9890  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9891  {
9892  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9893  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9894  {
9895  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9896  {
9897  bufferImageGranularityConflict = true;
9898  break;
9899  }
9900  }
9901  else
9902  // Already on previous page.
9903  break;
9904  }
9905  if(bufferImageGranularityConflict)
9906  {
9907  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9908  }
9909  }
9910 
9911  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9912  suballocations2nd.back().offset : size;
9913 
9914  // There is enough free space at the end after alignment.
9915  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9916  {
9917  // Check next suballocations for BufferImageGranularity conflicts.
9918  // If conflict exists, allocation cannot be made here.
9919  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9920  {
9921  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9922  {
9923  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9924  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9925  {
9926  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9927  {
9928  return false;
9929  }
9930  }
9931  else
9932  {
9933  // Already on previous page.
9934  break;
9935  }
9936  }
9937  }
9938 
9939  // All tests passed: Success.
9940  pAllocationRequest->offset = resultOffset;
9941  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
9942  pAllocationRequest->sumItemSize = 0;
9943  // pAllocationRequest->item, customData unused.
9944  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
9945  pAllocationRequest->itemsToMakeLostCount = 0;
9946  return true;
9947  }
9948  }
9949 
9950  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9951  // beginning of 1st vector as the end of free space.
9952  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9953  {
9954  VMA_ASSERT(!suballocations1st.empty());
9955 
9956  VkDeviceSize resultBaseOffset = 0;
9957  if(!suballocations2nd.empty())
9958  {
9959  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9960  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9961  }
9962 
9963  // Start from offset equal to beginning of free space.
9964  VkDeviceSize resultOffset = resultBaseOffset;
9965 
9966  // Apply VMA_DEBUG_MARGIN at the beginning.
9967  if(VMA_DEBUG_MARGIN > 0)
9968  {
9969  resultOffset += VMA_DEBUG_MARGIN;
9970  }
9971 
9972  // Apply alignment.
9973  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9974 
9975  // Check previous suballocations for BufferImageGranularity conflicts.
9976  // Make bigger alignment if necessary.
9977  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9978  {
9979  bool bufferImageGranularityConflict = false;
9980  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
9981  {
9982  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
9983  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9984  {
9985  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9986  {
9987  bufferImageGranularityConflict = true;
9988  break;
9989  }
9990  }
9991  else
9992  // Already on previous page.
9993  break;
9994  }
9995  if(bufferImageGranularityConflict)
9996  {
9997  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9998  }
9999  }
10000 
10001  pAllocationRequest->itemsToMakeLostCount = 0;
10002  pAllocationRequest->sumItemSize = 0;
10003  size_t index1st = m_1stNullItemsBeginCount;
10004 
10005  if(canMakeOtherLost)
10006  {
10007  while(index1st < suballocations1st.size() &&
10008  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
10009  {
10010  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
10011  const VmaSuballocation& suballoc = suballocations1st[index1st];
10012  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
10013  {
10014  // No problem.
10015  }
10016  else
10017  {
10018  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10019  if(suballoc.hAllocation->CanBecomeLost() &&
10020  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10021  {
10022  ++pAllocationRequest->itemsToMakeLostCount;
10023  pAllocationRequest->sumItemSize += suballoc.size;
10024  }
10025  else
10026  {
10027  return false;
10028  }
10029  }
10030  ++index1st;
10031  }
10032 
10033  // Check next suballocations for BufferImageGranularity conflicts.
10034  // If conflict exists, we must mark more allocations lost or fail.
10035  if(bufferImageGranularity > 1)
10036  {
10037  while(index1st < suballocations1st.size())
10038  {
10039  const VmaSuballocation& suballoc = suballocations1st[index1st];
10040  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
10041  {
10042  if(suballoc.hAllocation != VK_NULL_HANDLE)
10043  {
10044  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
10045  if(suballoc.hAllocation->CanBecomeLost() &&
10046  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10047  {
10048  ++pAllocationRequest->itemsToMakeLostCount;
10049  pAllocationRequest->sumItemSize += suballoc.size;
10050  }
10051  else
10052  {
10053  return false;
10054  }
10055  }
10056  }
10057  else
10058  {
10059  // Already on next page.
10060  break;
10061  }
10062  ++index1st;
10063  }
10064  }
10065 
10066  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
10067  if(index1st == suballocations1st.size() &&
10068  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
10069  {
10070  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
10071  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
10072  }
10073  }
10074 
10075  // There is enough free space at the end after alignment.
10076  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
10077  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
10078  {
10079  // Check next suballocations for BufferImageGranularity conflicts.
10080  // If conflict exists, allocation cannot be made here.
10081  if(bufferImageGranularity > 1)
10082  {
10083  for(size_t nextSuballocIndex = index1st;
10084  nextSuballocIndex < suballocations1st.size();
10085  nextSuballocIndex++)
10086  {
10087  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
10088  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10089  {
10090  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10091  {
10092  return false;
10093  }
10094  }
10095  else
10096  {
10097  // Already on next page.
10098  break;
10099  }
10100  }
10101  }
10102 
10103  // All tests passed: Success.
10104  pAllocationRequest->offset = resultOffset;
10105  pAllocationRequest->sumFreeSize =
10106  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
10107  - resultBaseOffset
10108  - pAllocationRequest->sumItemSize;
10109  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
10110  // pAllocationRequest->item, customData unused.
10111  return true;
10112  }
10113  }
10114 
10115  return false;
10116 }
10117 
10118 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10119  uint32_t currentFrameIndex,
10120  uint32_t frameInUseCount,
10121  VmaAllocationRequest* pAllocationRequest)
10122 {
10123  if(pAllocationRequest->itemsToMakeLostCount == 0)
10124  {
10125  return true;
10126  }
10127 
10128  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10129 
10130  // We always start from 1st.
10131  SuballocationVectorType* suballocations = &AccessSuballocations1st();
10132  size_t index = m_1stNullItemsBeginCount;
10133  size_t madeLostCount = 0;
10134  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10135  {
10136  if(index == suballocations->size())
10137  {
10138  index = 0;
10139  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
10140  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10141  {
10142  suballocations = &AccessSuballocations2nd();
10143  }
10144  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
10145  // suballocations continues pointing at AccessSuballocations1st().
10146  VMA_ASSERT(!suballocations->empty());
10147  }
10148  VmaSuballocation& suballoc = (*suballocations)[index];
10149  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10150  {
10151  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10152  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10153  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10154  {
10155  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10156  suballoc.hAllocation = VK_NULL_HANDLE;
10157  m_SumFreeSize += suballoc.size;
10158  if(suballocations == &AccessSuballocations1st())
10159  {
10160  ++m_1stNullItemsMiddleCount;
10161  }
10162  else
10163  {
10164  ++m_2ndNullItemsCount;
10165  }
10166  ++madeLostCount;
10167  }
10168  else
10169  {
10170  return false;
10171  }
10172  }
10173  ++index;
10174  }
10175 
10176  CleanupAfterFree();
10177  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10178 
10179  return true;
10180 }
10181 
10182 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10183 {
10184  uint32_t lostAllocationCount = 0;
10185 
10186  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10187  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10188  {
10189  VmaSuballocation& suballoc = suballocations1st[i];
10190  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10191  suballoc.hAllocation->CanBecomeLost() &&
10192  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10193  {
10194  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10195  suballoc.hAllocation = VK_NULL_HANDLE;
10196  ++m_1stNullItemsMiddleCount;
10197  m_SumFreeSize += suballoc.size;
10198  ++lostAllocationCount;
10199  }
10200  }
10201 
10202  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10203  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10204  {
10205  VmaSuballocation& suballoc = suballocations2nd[i];
10206  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10207  suballoc.hAllocation->CanBecomeLost() &&
10208  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10209  {
10210  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10211  suballoc.hAllocation = VK_NULL_HANDLE;
10212  ++m_2ndNullItemsCount;
10213  m_SumFreeSize += suballoc.size;
10214  ++lostAllocationCount;
10215  }
10216  }
10217 
10218  if(lostAllocationCount)
10219  {
10220  CleanupAfterFree();
10221  }
10222 
10223  return lostAllocationCount;
10224 }
10225 
10226 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10227 {
10228  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10229  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10230  {
10231  const VmaSuballocation& suballoc = suballocations1st[i];
10232  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10233  {
10234  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10235  {
10236  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10237  return VK_ERROR_VALIDATION_FAILED_EXT;
10238  }
10239  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10240  {
10241  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10242  return VK_ERROR_VALIDATION_FAILED_EXT;
10243  }
10244  }
10245  }
10246 
10247  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10248  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10249  {
10250  const VmaSuballocation& suballoc = suballocations2nd[i];
10251  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10252  {
10253  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10254  {
10255  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10256  return VK_ERROR_VALIDATION_FAILED_EXT;
10257  }
10258  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10259  {
10260  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10261  return VK_ERROR_VALIDATION_FAILED_EXT;
10262  }
10263  }
10264  }
10265 
10266  return VK_SUCCESS;
10267 }
10268 
10269 void VmaBlockMetadata_Linear::Alloc(
10270  const VmaAllocationRequest& request,
10271  VmaSuballocationType type,
10272  VkDeviceSize allocSize,
10273  VmaAllocation hAllocation)
10274 {
10275  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10276 
10277  switch(request.type)
10278  {
10279  case VmaAllocationRequestType::UpperAddress:
10280  {
10281  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10282  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10283  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10284  suballocations2nd.push_back(newSuballoc);
10285  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10286  }
10287  break;
10288  case VmaAllocationRequestType::EndOf1st:
10289  {
10290  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10291 
10292  VMA_ASSERT(suballocations1st.empty() ||
10293  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
10294  // Check if it fits before the end of the block.
10295  VMA_ASSERT(request.offset + allocSize <= GetSize());
10296 
10297  suballocations1st.push_back(newSuballoc);
10298  }
10299  break;
10300  case VmaAllocationRequestType::EndOf2nd:
10301  {
10302  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10303  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10304  VMA_ASSERT(!suballocations1st.empty() &&
10305  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
10306  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10307 
10308  switch(m_2ndVectorMode)
10309  {
10310  case SECOND_VECTOR_EMPTY:
10311  // First allocation from second part ring buffer.
10312  VMA_ASSERT(suballocations2nd.empty());
10313  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10314  break;
10315  case SECOND_VECTOR_RING_BUFFER:
10316  // 2-part ring buffer is already started.
10317  VMA_ASSERT(!suballocations2nd.empty());
10318  break;
10319  case SECOND_VECTOR_DOUBLE_STACK:
10320  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10321  break;
10322  default:
10323  VMA_ASSERT(0);
10324  }
10325 
10326  suballocations2nd.push_back(newSuballoc);
10327  }
10328  break;
10329  default:
10330  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10331  }
10332 
10333  m_SumFreeSize -= newSuballoc.size;
10334 }
10335 
10336 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10337 {
10338  FreeAtOffset(allocation->GetOffset());
10339 }
10340 
10341 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10342 {
10343  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10344  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10345 
10346  if(!suballocations1st.empty())
10347  {
10348  // First allocation: Mark it as next empty at the beginning.
10349  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10350  if(firstSuballoc.offset == offset)
10351  {
10352  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10353  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10354  m_SumFreeSize += firstSuballoc.size;
10355  ++m_1stNullItemsBeginCount;
10356  CleanupAfterFree();
10357  return;
10358  }
10359  }
10360 
10361  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10362  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10363  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10364  {
10365  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10366  if(lastSuballoc.offset == offset)
10367  {
10368  m_SumFreeSize += lastSuballoc.size;
10369  suballocations2nd.pop_back();
10370  CleanupAfterFree();
10371  return;
10372  }
10373  }
10374  // Last allocation in 1st vector.
10375  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10376  {
10377  VmaSuballocation& lastSuballoc = suballocations1st.back();
10378  if(lastSuballoc.offset == offset)
10379  {
10380  m_SumFreeSize += lastSuballoc.size;
10381  suballocations1st.pop_back();
10382  CleanupAfterFree();
10383  return;
10384  }
10385  }
10386 
10387  // Item from the middle of 1st vector.
10388  {
10389  VmaSuballocation refSuballoc;
10390  refSuballoc.offset = offset;
10391  // Rest of members stays uninitialized intentionally for better performance.
10392  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
10393  suballocations1st.begin() + m_1stNullItemsBeginCount,
10394  suballocations1st.end(),
10395  refSuballoc,
10396  VmaSuballocationOffsetLess());
10397  if(it != suballocations1st.end())
10398  {
10399  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10400  it->hAllocation = VK_NULL_HANDLE;
10401  ++m_1stNullItemsMiddleCount;
10402  m_SumFreeSize += it->size;
10403  CleanupAfterFree();
10404  return;
10405  }
10406  }
10407 
10408  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10409  {
10410  // Item from the middle of 2nd vector.
10411  VmaSuballocation refSuballoc;
10412  refSuballoc.offset = offset;
10413  // Rest of members stays uninitialized intentionally for better performance.
10414  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10415  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
10416  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
10417  if(it != suballocations2nd.end())
10418  {
10419  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10420  it->hAllocation = VK_NULL_HANDLE;
10421  ++m_2ndNullItemsCount;
10422  m_SumFreeSize += it->size;
10423  CleanupAfterFree();
10424  return;
10425  }
10426  }
10427 
10428  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10429 }
10430 
10431 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10432 {
10433  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10434  const size_t suballocCount = AccessSuballocations1st().size();
10435  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10436 }
10437 
10438 void VmaBlockMetadata_Linear::CleanupAfterFree()
10439 {
10440  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10441  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10442 
10443  if(IsEmpty())
10444  {
10445  suballocations1st.clear();
10446  suballocations2nd.clear();
10447  m_1stNullItemsBeginCount = 0;
10448  m_1stNullItemsMiddleCount = 0;
10449  m_2ndNullItemsCount = 0;
10450  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10451  }
10452  else
10453  {
10454  const size_t suballoc1stCount = suballocations1st.size();
10455  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10456  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10457 
10458  // Find more null items at the beginning of 1st vector.
10459  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10460  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10461  {
10462  ++m_1stNullItemsBeginCount;
10463  --m_1stNullItemsMiddleCount;
10464  }
10465 
10466  // Find more null items at the end of 1st vector.
10467  while(m_1stNullItemsMiddleCount > 0 &&
10468  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10469  {
10470  --m_1stNullItemsMiddleCount;
10471  suballocations1st.pop_back();
10472  }
10473 
10474  // Find more null items at the end of 2nd vector.
10475  while(m_2ndNullItemsCount > 0 &&
10476  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10477  {
10478  --m_2ndNullItemsCount;
10479  suballocations2nd.pop_back();
10480  }
10481 
10482  // Find more null items at the beginning of 2nd vector.
10483  while(m_2ndNullItemsCount > 0 &&
10484  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
10485  {
10486  --m_2ndNullItemsCount;
10487  VmaVectorRemove(suballocations2nd, 0);
10488  }
10489 
10490  if(ShouldCompact1st())
10491  {
10492  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10493  size_t srcIndex = m_1stNullItemsBeginCount;
10494  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10495  {
10496  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10497  {
10498  ++srcIndex;
10499  }
10500  if(dstIndex != srcIndex)
10501  {
10502  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10503  }
10504  ++srcIndex;
10505  }
10506  suballocations1st.resize(nonNullItemCount);
10507  m_1stNullItemsBeginCount = 0;
10508  m_1stNullItemsMiddleCount = 0;
10509  }
10510 
10511  // 2nd vector became empty.
10512  if(suballocations2nd.empty())
10513  {
10514  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10515  }
10516 
10517  // 1st vector became empty.
10518  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10519  {
10520  suballocations1st.clear();
10521  m_1stNullItemsBeginCount = 0;
10522 
10523  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10524  {
10525  // Swap 1st with 2nd. Now 2nd is empty.
10526  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10527  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10528  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10529  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10530  {
10531  ++m_1stNullItemsBeginCount;
10532  --m_1stNullItemsMiddleCount;
10533  }
10534  m_2ndNullItemsCount = 0;
10535  m_1stVectorIndex ^= 1;
10536  }
10537  }
10538  }
10539 
10540  VMA_HEAVY_ASSERT(Validate());
10541 }
10542 
10543 
10545 // class VmaBlockMetadata_Buddy
10546 
10547 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10548  VmaBlockMetadata(hAllocator),
10549  m_Root(VMA_NULL),
10550  m_AllocationCount(0),
10551  m_FreeCount(1),
10552  m_SumFreeSize(0)
10553 {
10554  memset(m_FreeList, 0, sizeof(m_FreeList));
10555 }
10556 
10557 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10558 {
10559  DeleteNode(m_Root);
10560 }
10561 
10562 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10563 {
10564  VmaBlockMetadata::Init(size);
10565 
10566  m_UsableSize = VmaPrevPow2(size);
10567  m_SumFreeSize = m_UsableSize;
10568 
10569  // Calculate m_LevelCount.
10570  m_LevelCount = 1;
10571  while(m_LevelCount < MAX_LEVELS &&
10572  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10573  {
10574  ++m_LevelCount;
10575  }
10576 
10577  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10578  rootNode->offset = 0;
10579  rootNode->type = Node::TYPE_FREE;
10580  rootNode->parent = VMA_NULL;
10581  rootNode->buddy = VMA_NULL;
10582 
10583  m_Root = rootNode;
10584  AddToFreeListFront(0, rootNode);
10585 }
10586 
10587 bool VmaBlockMetadata_Buddy::Validate() const
10588 {
10589  // Validate tree.
10590  ValidationContext ctx;
10591  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10592  {
10593  VMA_VALIDATE(false && "ValidateNode failed.");
10594  }
10595  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10596  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10597 
10598  // Validate free node lists.
10599  for(uint32_t level = 0; level < m_LevelCount; ++level)
10600  {
10601  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10602  m_FreeList[level].front->free.prev == VMA_NULL);
10603 
10604  for(Node* node = m_FreeList[level].front;
10605  node != VMA_NULL;
10606  node = node->free.next)
10607  {
10608  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10609 
10610  if(node->free.next == VMA_NULL)
10611  {
10612  VMA_VALIDATE(m_FreeList[level].back == node);
10613  }
10614  else
10615  {
10616  VMA_VALIDATE(node->free.next->free.prev == node);
10617  }
10618  }
10619  }
10620 
10621  // Validate that free lists ar higher levels are empty.
10622  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10623  {
10624  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10625  }
10626 
10627  return true;
10628 }
10629 
10630 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10631 {
10632  for(uint32_t level = 0; level < m_LevelCount; ++level)
10633  {
10634  if(m_FreeList[level].front != VMA_NULL)
10635  {
10636  return LevelToNodeSize(level);
10637  }
10638  }
10639  return 0;
10640 }
10641 
10642 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10643 {
10644  const VkDeviceSize unusableSize = GetUnusableSize();
10645 
10646  outInfo.blockCount = 1;
10647 
10648  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10649  outInfo.usedBytes = outInfo.unusedBytes = 0;
10650 
10651  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10652  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10653  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10654 
10655  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10656 
10657  if(unusableSize > 0)
10658  {
10659  ++outInfo.unusedRangeCount;
10660  outInfo.unusedBytes += unusableSize;
10661  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10662  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10663  }
10664 }
10665 
10666 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10667 {
10668  const VkDeviceSize unusableSize = GetUnusableSize();
10669 
10670  inoutStats.size += GetSize();
10671  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10672  inoutStats.allocationCount += m_AllocationCount;
10673  inoutStats.unusedRangeCount += m_FreeCount;
10674  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10675 
10676  if(unusableSize > 0)
10677  {
10678  ++inoutStats.unusedRangeCount;
10679  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10680  }
10681 }
10682 
10683 #if VMA_STATS_STRING_ENABLED
10684 
10685 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10686 {
10687  // TODO optimize
10688  VmaStatInfo stat;
10689  CalcAllocationStatInfo(stat);
10690 
10691  PrintDetailedMap_Begin(
10692  json,
10693  stat.unusedBytes,
10694  stat.allocationCount,
10695  stat.unusedRangeCount);
10696 
10697  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10698 
10699  const VkDeviceSize unusableSize = GetUnusableSize();
10700  if(unusableSize > 0)
10701  {
10702  PrintDetailedMap_UnusedRange(json,
10703  m_UsableSize, // offset
10704  unusableSize); // size
10705  }
10706 
10707  PrintDetailedMap_End(json);
10708 }
10709 
10710 #endif // #if VMA_STATS_STRING_ENABLED
10711 
10712 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10713  uint32_t currentFrameIndex,
10714  uint32_t frameInUseCount,
10715  VkDeviceSize bufferImageGranularity,
10716  VkDeviceSize allocSize,
10717  VkDeviceSize allocAlignment,
10718  bool upperAddress,
10719  VmaSuballocationType allocType,
10720  bool canMakeOtherLost,
10721  uint32_t strategy,
10722  VmaAllocationRequest* pAllocationRequest)
10723 {
10724  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10725 
10726  // Simple way to respect bufferImageGranularity. May be optimized some day.
10727  // Whenever it might be an OPTIMAL image...
10728  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10729  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10730  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10731  {
10732  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10733  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10734  }
10735 
10736  if(allocSize > m_UsableSize)
10737  {
10738  return false;
10739  }
10740 
10741  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10742  for(uint32_t level = targetLevel + 1; level--; )
10743  {
10744  for(Node* freeNode = m_FreeList[level].front;
10745  freeNode != VMA_NULL;
10746  freeNode = freeNode->free.next)
10747  {
10748  if(freeNode->offset % allocAlignment == 0)
10749  {
10750  pAllocationRequest->type = VmaAllocationRequestType::Normal;
10751  pAllocationRequest->offset = freeNode->offset;
10752  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10753  pAllocationRequest->sumItemSize = 0;
10754  pAllocationRequest->itemsToMakeLostCount = 0;
10755  pAllocationRequest->customData = (void*)(uintptr_t)level;
10756  return true;
10757  }
10758  }
10759  }
10760 
10761  return false;
10762 }
10763 
10764 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10765  uint32_t currentFrameIndex,
10766  uint32_t frameInUseCount,
10767  VmaAllocationRequest* pAllocationRequest)
10768 {
10769  /*
10770  Lost allocations are not supported in buddy allocator at the moment.
10771  Support might be added in the future.
10772  */
10773  return pAllocationRequest->itemsToMakeLostCount == 0;
10774 }
10775 
10776 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10777 {
10778  /*
10779  Lost allocations are not supported in buddy allocator at the moment.
10780  Support might be added in the future.
10781  */
10782  return 0;
10783 }
10784 
10785 void VmaBlockMetadata_Buddy::Alloc(
10786  const VmaAllocationRequest& request,
10787  VmaSuballocationType type,
10788  VkDeviceSize allocSize,
10789  VmaAllocation hAllocation)
10790 {
10791  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
10792 
10793  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10794  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10795 
10796  Node* currNode = m_FreeList[currLevel].front;
10797  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10798  while(currNode->offset != request.offset)
10799  {
10800  currNode = currNode->free.next;
10801  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10802  }
10803 
10804  // Go down, splitting free nodes.
10805  while(currLevel < targetLevel)
10806  {
10807  // currNode is already first free node at currLevel.
10808  // Remove it from list of free nodes at this currLevel.
10809  RemoveFromFreeList(currLevel, currNode);
10810 
10811  const uint32_t childrenLevel = currLevel + 1;
10812 
10813  // Create two free sub-nodes.
10814  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10815  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10816 
10817  leftChild->offset = currNode->offset;
10818  leftChild->type = Node::TYPE_FREE;
10819  leftChild->parent = currNode;
10820  leftChild->buddy = rightChild;
10821 
10822  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10823  rightChild->type = Node::TYPE_FREE;
10824  rightChild->parent = currNode;
10825  rightChild->buddy = leftChild;
10826 
10827  // Convert current currNode to split type.
10828  currNode->type = Node::TYPE_SPLIT;
10829  currNode->split.leftChild = leftChild;
10830 
10831  // Add child nodes to free list. Order is important!
10832  AddToFreeListFront(childrenLevel, rightChild);
10833  AddToFreeListFront(childrenLevel, leftChild);
10834 
10835  ++m_FreeCount;
10836  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
10837  ++currLevel;
10838  currNode = m_FreeList[currLevel].front;
10839 
10840  /*
10841  We can be sure that currNode, as left child of node previously split,
10842  also fullfills the alignment requirement.
10843  */
10844  }
10845 
10846  // Remove from free list.
10847  VMA_ASSERT(currLevel == targetLevel &&
10848  currNode != VMA_NULL &&
10849  currNode->type == Node::TYPE_FREE);
10850  RemoveFromFreeList(currLevel, currNode);
10851 
10852  // Convert to allocation node.
10853  currNode->type = Node::TYPE_ALLOCATION;
10854  currNode->allocation.alloc = hAllocation;
10855 
10856  ++m_AllocationCount;
10857  --m_FreeCount;
10858  m_SumFreeSize -= allocSize;
10859 }
10860 
10861 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10862 {
10863  if(node->type == Node::TYPE_SPLIT)
10864  {
10865  DeleteNode(node->split.leftChild->buddy);
10866  DeleteNode(node->split.leftChild);
10867  }
10868 
10869  vma_delete(GetAllocationCallbacks(), node);
10870 }
10871 
10872 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
10873 {
10874  VMA_VALIDATE(level < m_LevelCount);
10875  VMA_VALIDATE(curr->parent == parent);
10876  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10877  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10878  switch(curr->type)
10879  {
10880  case Node::TYPE_FREE:
10881  // curr->free.prev, next are validated separately.
10882  ctx.calculatedSumFreeSize += levelNodeSize;
10883  ++ctx.calculatedFreeCount;
10884  break;
10885  case Node::TYPE_ALLOCATION:
10886  ++ctx.calculatedAllocationCount;
10887  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10888  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10889  break;
10890  case Node::TYPE_SPLIT:
10891  {
10892  const uint32_t childrenLevel = level + 1;
10893  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10894  const Node* const leftChild = curr->split.leftChild;
10895  VMA_VALIDATE(leftChild != VMA_NULL);
10896  VMA_VALIDATE(leftChild->offset == curr->offset);
10897  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10898  {
10899  VMA_VALIDATE(false && "ValidateNode for left child failed.");
10900  }
10901  const Node* const rightChild = leftChild->buddy;
10902  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10903  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10904  {
10905  VMA_VALIDATE(false && "ValidateNode for right child failed.");
10906  }
10907  }
10908  break;
10909  default:
10910  return false;
10911  }
10912 
10913  return true;
10914 }
10915 
10916 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
10917 {
10918  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
10919  uint32_t level = 0;
10920  VkDeviceSize currLevelNodeSize = m_UsableSize;
10921  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10922  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10923  {
10924  ++level;
10925  currLevelNodeSize = nextLevelNodeSize;
10926  nextLevelNodeSize = currLevelNodeSize >> 1;
10927  }
10928  return level;
10929 }
10930 
10931 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
10932 {
10933  // Find node and level.
10934  Node* node = m_Root;
10935  VkDeviceSize nodeOffset = 0;
10936  uint32_t level = 0;
10937  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
10938  while(node->type == Node::TYPE_SPLIT)
10939  {
10940  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
10941  if(offset < nodeOffset + nextLevelSize)
10942  {
10943  node = node->split.leftChild;
10944  }
10945  else
10946  {
10947  node = node->split.leftChild->buddy;
10948  nodeOffset += nextLevelSize;
10949  }
10950  ++level;
10951  levelNodeSize = nextLevelSize;
10952  }
10953 
10954  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
10955  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
10956 
10957  ++m_FreeCount;
10958  --m_AllocationCount;
10959  m_SumFreeSize += alloc->GetSize();
10960 
10961  node->type = Node::TYPE_FREE;
10962 
10963  // Join free nodes if possible.
10964  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
10965  {
10966  RemoveFromFreeList(level, node->buddy);
10967  Node* const parent = node->parent;
10968 
10969  vma_delete(GetAllocationCallbacks(), node->buddy);
10970  vma_delete(GetAllocationCallbacks(), node);
10971  parent->type = Node::TYPE_FREE;
10972 
10973  node = parent;
10974  --level;
10975  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
10976  --m_FreeCount;
10977  }
10978 
10979  AddToFreeListFront(level, node);
10980 }
10981 
10982 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
10983 {
10984  switch(node->type)
10985  {
10986  case Node::TYPE_FREE:
10987  ++outInfo.unusedRangeCount;
10988  outInfo.unusedBytes += levelNodeSize;
10989  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
10990  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
10991  break;
10992  case Node::TYPE_ALLOCATION:
10993  {
10994  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10995  ++outInfo.allocationCount;
10996  outInfo.usedBytes += allocSize;
10997  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
10998  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
10999 
11000  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
11001  if(unusedRangeSize > 0)
11002  {
11003  ++outInfo.unusedRangeCount;
11004  outInfo.unusedBytes += unusedRangeSize;
11005  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
11006  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
11007  }
11008  }
11009  break;
11010  case Node::TYPE_SPLIT:
11011  {
11012  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11013  const Node* const leftChild = node->split.leftChild;
11014  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
11015  const Node* const rightChild = leftChild->buddy;
11016  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
11017  }
11018  break;
11019  default:
11020  VMA_ASSERT(0);
11021  }
11022 }
11023 
11024 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
11025 {
11026  VMA_ASSERT(node->type == Node::TYPE_FREE);
11027 
11028  // List is empty.
11029  Node* const frontNode = m_FreeList[level].front;
11030  if(frontNode == VMA_NULL)
11031  {
11032  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
11033  node->free.prev = node->free.next = VMA_NULL;
11034  m_FreeList[level].front = m_FreeList[level].back = node;
11035  }
11036  else
11037  {
11038  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
11039  node->free.prev = VMA_NULL;
11040  node->free.next = frontNode;
11041  frontNode->free.prev = node;
11042  m_FreeList[level].front = node;
11043  }
11044 }
11045 
11046 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
11047 {
11048  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
11049 
11050  // It is at the front.
11051  if(node->free.prev == VMA_NULL)
11052  {
11053  VMA_ASSERT(m_FreeList[level].front == node);
11054  m_FreeList[level].front = node->free.next;
11055  }
11056  else
11057  {
11058  Node* const prevFreeNode = node->free.prev;
11059  VMA_ASSERT(prevFreeNode->free.next == node);
11060  prevFreeNode->free.next = node->free.next;
11061  }
11062 
11063  // It is at the back.
11064  if(node->free.next == VMA_NULL)
11065  {
11066  VMA_ASSERT(m_FreeList[level].back == node);
11067  m_FreeList[level].back = node->free.prev;
11068  }
11069  else
11070  {
11071  Node* const nextFreeNode = node->free.next;
11072  VMA_ASSERT(nextFreeNode->free.prev == node);
11073  nextFreeNode->free.prev = node->free.prev;
11074  }
11075 }
11076 
11077 #if VMA_STATS_STRING_ENABLED
11078 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
11079 {
11080  switch(node->type)
11081  {
11082  case Node::TYPE_FREE:
11083  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
11084  break;
11085  case Node::TYPE_ALLOCATION:
11086  {
11087  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
11088  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11089  if(allocSize < levelNodeSize)
11090  {
11091  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
11092  }
11093  }
11094  break;
11095  case Node::TYPE_SPLIT:
11096  {
11097  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11098  const Node* const leftChild = node->split.leftChild;
11099  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
11100  const Node* const rightChild = leftChild->buddy;
11101  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
11102  }
11103  break;
11104  default:
11105  VMA_ASSERT(0);
11106  }
11107 }
11108 #endif // #if VMA_STATS_STRING_ENABLED
11109 
11110 
11112 // class VmaDeviceMemoryBlock
11113 
11114 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
11115  m_pMetadata(VMA_NULL),
11116  m_MemoryTypeIndex(UINT32_MAX),
11117  m_Id(0),
11118  m_hMemory(VK_NULL_HANDLE),
11119  m_MapCount(0),
11120  m_pMappedData(VMA_NULL)
11121 {
11122 }
11123 
11124 void VmaDeviceMemoryBlock::Init(
11125  VmaAllocator hAllocator,
11126  VmaPool hParentPool,
11127  uint32_t newMemoryTypeIndex,
11128  VkDeviceMemory newMemory,
11129  VkDeviceSize newSize,
11130  uint32_t id,
11131  uint32_t algorithm)
11132 {
11133  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11134 
11135  m_hParentPool = hParentPool;
11136  m_MemoryTypeIndex = newMemoryTypeIndex;
11137  m_Id = id;
11138  m_hMemory = newMemory;
11139 
11140  switch(algorithm)
11141  {
11143  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
11144  break;
11146  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11147  break;
11148  default:
11149  VMA_ASSERT(0);
11150  // Fall-through.
11151  case 0:
11152  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11153  }
11154  m_pMetadata->Init(newSize);
11155 }
11156 
11157 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11158 {
11159  // This is the most important assert in the entire library.
11160  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11161  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11162 
11163  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11164  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11165  m_hMemory = VK_NULL_HANDLE;
11166 
11167  vma_delete(allocator, m_pMetadata);
11168  m_pMetadata = VMA_NULL;
11169 }
11170 
11171 bool VmaDeviceMemoryBlock::Validate() const
11172 {
11173  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11174  (m_pMetadata->GetSize() != 0));
11175 
11176  return m_pMetadata->Validate();
11177 }
11178 
11179 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11180 {
11181  void* pData = nullptr;
11182  VkResult res = Map(hAllocator, 1, &pData);
11183  if(res != VK_SUCCESS)
11184  {
11185  return res;
11186  }
11187 
11188  res = m_pMetadata->CheckCorruption(pData);
11189 
11190  Unmap(hAllocator, 1);
11191 
11192  return res;
11193 }
11194 
11195 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11196 {
11197  if(count == 0)
11198  {
11199  return VK_SUCCESS;
11200  }
11201 
11202  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11203  if(m_MapCount != 0)
11204  {
11205  m_MapCount += count;
11206  VMA_ASSERT(m_pMappedData != VMA_NULL);
11207  if(ppData != VMA_NULL)
11208  {
11209  *ppData = m_pMappedData;
11210  }
11211  return VK_SUCCESS;
11212  }
11213  else
11214  {
11215  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11216  hAllocator->m_hDevice,
11217  m_hMemory,
11218  0, // offset
11219  VK_WHOLE_SIZE,
11220  0, // flags
11221  &m_pMappedData);
11222  if(result == VK_SUCCESS)
11223  {
11224  if(ppData != VMA_NULL)
11225  {
11226  *ppData = m_pMappedData;
11227  }
11228  m_MapCount = count;
11229  }
11230  return result;
11231  }
11232 }
11233 
11234 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11235 {
11236  if(count == 0)
11237  {
11238  return;
11239  }
11240 
11241  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11242  if(m_MapCount >= count)
11243  {
11244  m_MapCount -= count;
11245  if(m_MapCount == 0)
11246  {
11247  m_pMappedData = VMA_NULL;
11248  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11249  }
11250  }
11251  else
11252  {
11253  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11254  }
11255 }
11256 
11257 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11258 {
11259  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11260  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11261 
11262  void* pData;
11263  VkResult res = Map(hAllocator, 1, &pData);
11264  if(res != VK_SUCCESS)
11265  {
11266  return res;
11267  }
11268 
11269  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11270  VmaWriteMagicValue(pData, allocOffset + allocSize);
11271 
11272  Unmap(hAllocator, 1);
11273 
11274  return VK_SUCCESS;
11275 }
11276 
11277 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11278 {
11279  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11280  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11281 
11282  void* pData;
11283  VkResult res = Map(hAllocator, 1, &pData);
11284  if(res != VK_SUCCESS)
11285  {
11286  return res;
11287  }
11288 
11289  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11290  {
11291  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11292  }
11293  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11294  {
11295  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11296  }
11297 
11298  Unmap(hAllocator, 1);
11299 
11300  return VK_SUCCESS;
11301 }
11302 
11303 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11304  const VmaAllocator hAllocator,
11305  const VmaAllocation hAllocation,
11306  VkDeviceSize allocationLocalOffset,
11307  VkBuffer hBuffer,
11308  const void* pNext)
11309 {
11310  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11311  hAllocation->GetBlock() == this);
11312  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11313  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11314  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11315  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11316  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11317  return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
11318 }
11319 
11320 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11321  const VmaAllocator hAllocator,
11322  const VmaAllocation hAllocation,
11323  VkDeviceSize allocationLocalOffset,
11324  VkImage hImage,
11325  const void* pNext)
11326 {
11327  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11328  hAllocation->GetBlock() == this);
11329  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11330  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11331  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11332  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11333  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11334  return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
11335 }
11336 
11337 static void InitStatInfo(VmaStatInfo& outInfo)
11338 {
11339  memset(&outInfo, 0, sizeof(outInfo));
11340  outInfo.allocationSizeMin = UINT64_MAX;
11341  outInfo.unusedRangeSizeMin = UINT64_MAX;
11342 }
11343 
11344 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11345 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11346 {
11347  inoutInfo.blockCount += srcInfo.blockCount;
11348  inoutInfo.allocationCount += srcInfo.allocationCount;
11349  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11350  inoutInfo.usedBytes += srcInfo.usedBytes;
11351  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11352  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11353  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11354  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11355  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11356 }
11357 
11358 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11359 {
11360  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11361  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11362  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11363  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11364 }
11365 
11366 VmaPool_T::VmaPool_T(
11367  VmaAllocator hAllocator,
11368  const VmaPoolCreateInfo& createInfo,
11369  VkDeviceSize preferredBlockSize) :
11370  m_BlockVector(
11371  hAllocator,
11372  this, // hParentPool
11373  createInfo.memoryTypeIndex,
11374  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11375  createInfo.minBlockCount,
11376  createInfo.maxBlockCount,
11377  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11378  createInfo.frameInUseCount,
11379  true, // isCustomPool
11380  createInfo.blockSize != 0, // explicitBlockSize
11381  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11382  m_Id(0)
11383 {
11384 }
11385 
11386 VmaPool_T::~VmaPool_T()
11387 {
11388 }
11389 
11390 #if VMA_STATS_STRING_ENABLED
11391 
11392 #endif // #if VMA_STATS_STRING_ENABLED
11393 
11394 VmaBlockVector::VmaBlockVector(
11395  VmaAllocator hAllocator,
11396  VmaPool hParentPool,
11397  uint32_t memoryTypeIndex,
11398  VkDeviceSize preferredBlockSize,
11399  size_t minBlockCount,
11400  size_t maxBlockCount,
11401  VkDeviceSize bufferImageGranularity,
11402  uint32_t frameInUseCount,
11403  bool isCustomPool,
11404  bool explicitBlockSize,
11405  uint32_t algorithm) :
11406  m_hAllocator(hAllocator),
11407  m_hParentPool(hParentPool),
11408  m_MemoryTypeIndex(memoryTypeIndex),
11409  m_PreferredBlockSize(preferredBlockSize),
11410  m_MinBlockCount(minBlockCount),
11411  m_MaxBlockCount(maxBlockCount),
11412  m_BufferImageGranularity(bufferImageGranularity),
11413  m_FrameInUseCount(frameInUseCount),
11414  m_IsCustomPool(isCustomPool),
11415  m_ExplicitBlockSize(explicitBlockSize),
11416  m_Algorithm(algorithm),
11417  m_HasEmptyBlock(false),
11418  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11419  m_NextBlockId(0)
11420 {
11421 }
11422 
11423 VmaBlockVector::~VmaBlockVector()
11424 {
11425  for(size_t i = m_Blocks.size(); i--; )
11426  {
11427  m_Blocks[i]->Destroy(m_hAllocator);
11428  vma_delete(m_hAllocator, m_Blocks[i]);
11429  }
11430 }
11431 
11432 VkResult VmaBlockVector::CreateMinBlocks()
11433 {
11434  for(size_t i = 0; i < m_MinBlockCount; ++i)
11435  {
11436  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11437  if(res != VK_SUCCESS)
11438  {
11439  return res;
11440  }
11441  }
11442  return VK_SUCCESS;
11443 }
11444 
11445 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11446 {
11447  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11448 
11449  const size_t blockCount = m_Blocks.size();
11450 
11451  pStats->size = 0;
11452  pStats->unusedSize = 0;
11453  pStats->allocationCount = 0;
11454  pStats->unusedRangeCount = 0;
11455  pStats->unusedRangeSizeMax = 0;
11456  pStats->blockCount = blockCount;
11457 
11458  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11459  {
11460  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11461  VMA_ASSERT(pBlock);
11462  VMA_HEAVY_ASSERT(pBlock->Validate());
11463  pBlock->m_pMetadata->AddPoolStats(*pStats);
11464  }
11465 }
11466 
11467 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11468 {
11469  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11470  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11471  (VMA_DEBUG_MARGIN > 0) &&
11472  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
11473  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11474 }
11475 
11476 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11477 
11478 VkResult VmaBlockVector::Allocate(
11479  uint32_t currentFrameIndex,
11480  VkDeviceSize size,
11481  VkDeviceSize alignment,
11482  const VmaAllocationCreateInfo& createInfo,
11483  VmaSuballocationType suballocType,
11484  size_t allocationCount,
11485  VmaAllocation* pAllocations)
11486 {
11487  size_t allocIndex;
11488  VkResult res = VK_SUCCESS;
11489 
11490  if(IsCorruptionDetectionEnabled())
11491  {
11492  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11493  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11494  }
11495 
11496  {
11497  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11498  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11499  {
11500  res = AllocatePage(
11501  currentFrameIndex,
11502  size,
11503  alignment,
11504  createInfo,
11505  suballocType,
11506  pAllocations + allocIndex);
11507  if(res != VK_SUCCESS)
11508  {
11509  break;
11510  }
11511  }
11512  }
11513 
11514  if(res != VK_SUCCESS)
11515  {
11516  // Free all already created allocations.
11517  while(allocIndex--)
11518  {
11519  Free(pAllocations[allocIndex]);
11520  }
11521  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11522  }
11523 
11524  return res;
11525 }
11526 
11527 VkResult VmaBlockVector::AllocatePage(
11528  uint32_t currentFrameIndex,
11529  VkDeviceSize size,
11530  VkDeviceSize alignment,
11531  const VmaAllocationCreateInfo& createInfo,
11532  VmaSuballocationType suballocType,
11533  VmaAllocation* pAllocation)
11534 {
11535  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11536  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11537  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11538  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11539  const bool canCreateNewBlock =
11540  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11541  (m_Blocks.size() < m_MaxBlockCount);
11542  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11543 
11544  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11545  // Which in turn is available only when maxBlockCount = 1.
11546  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11547  {
11548  canMakeOtherLost = false;
11549  }
11550 
11551  // Upper address can only be used with linear allocator and within single memory block.
11552  if(isUpperAddress &&
11553  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11554  {
11555  return VK_ERROR_FEATURE_NOT_PRESENT;
11556  }
11557 
11558  // Validate strategy.
11559  switch(strategy)
11560  {
11561  case 0:
11563  break;
11567  break;
11568  default:
11569  return VK_ERROR_FEATURE_NOT_PRESENT;
11570  }
11571 
11572  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11573  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11574  {
11575  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11576  }
11577 
11578  /*
11579  Under certain condition, this whole section can be skipped for optimization, so
11580  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11581  e.g. for custom pools with linear algorithm.
11582  */
11583  if(!canMakeOtherLost || canCreateNewBlock)
11584  {
11585  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11586  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11588 
11589  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11590  {
11591  // Use only last block.
11592  if(!m_Blocks.empty())
11593  {
11594  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11595  VMA_ASSERT(pCurrBlock);
11596  VkResult res = AllocateFromBlock(
11597  pCurrBlock,
11598  currentFrameIndex,
11599  size,
11600  alignment,
11601  allocFlagsCopy,
11602  createInfo.pUserData,
11603  suballocType,
11604  strategy,
11605  pAllocation);
11606  if(res == VK_SUCCESS)
11607  {
11608  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11609  return VK_SUCCESS;
11610  }
11611  }
11612  }
11613  else
11614  {
11616  {
11617  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11618  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11619  {
11620  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11621  VMA_ASSERT(pCurrBlock);
11622  VkResult res = AllocateFromBlock(
11623  pCurrBlock,
11624  currentFrameIndex,
11625  size,
11626  alignment,
11627  allocFlagsCopy,
11628  createInfo.pUserData,
11629  suballocType,
11630  strategy,
11631  pAllocation);
11632  if(res == VK_SUCCESS)
11633  {
11634  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11635  return VK_SUCCESS;
11636  }
11637  }
11638  }
11639  else // WORST_FIT, FIRST_FIT
11640  {
11641  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11642  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11643  {
11644  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11645  VMA_ASSERT(pCurrBlock);
11646  VkResult res = AllocateFromBlock(
11647  pCurrBlock,
11648  currentFrameIndex,
11649  size,
11650  alignment,
11651  allocFlagsCopy,
11652  createInfo.pUserData,
11653  suballocType,
11654  strategy,
11655  pAllocation);
11656  if(res == VK_SUCCESS)
11657  {
11658  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11659  return VK_SUCCESS;
11660  }
11661  }
11662  }
11663  }
11664 
11665  // 2. Try to create new block.
11666  if(canCreateNewBlock)
11667  {
11668  // Calculate optimal size for new block.
11669  VkDeviceSize newBlockSize = m_PreferredBlockSize;
11670  uint32_t newBlockSizeShift = 0;
11671  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11672 
11673  if(!m_ExplicitBlockSize)
11674  {
11675  // Allocate 1/8, 1/4, 1/2 as first blocks.
11676  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11677  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11678  {
11679  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11680  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11681  {
11682  newBlockSize = smallerNewBlockSize;
11683  ++newBlockSizeShift;
11684  }
11685  else
11686  {
11687  break;
11688  }
11689  }
11690  }
11691 
11692  size_t newBlockIndex = 0;
11693  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11694  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11695  if(!m_ExplicitBlockSize)
11696  {
11697  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11698  {
11699  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11700  if(smallerNewBlockSize >= size)
11701  {
11702  newBlockSize = smallerNewBlockSize;
11703  ++newBlockSizeShift;
11704  res = CreateBlock(newBlockSize, &newBlockIndex);
11705  }
11706  else
11707  {
11708  break;
11709  }
11710  }
11711  }
11712 
11713  if(res == VK_SUCCESS)
11714  {
11715  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11716  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11717 
11718  res = AllocateFromBlock(
11719  pBlock,
11720  currentFrameIndex,
11721  size,
11722  alignment,
11723  allocFlagsCopy,
11724  createInfo.pUserData,
11725  suballocType,
11726  strategy,
11727  pAllocation);
11728  if(res == VK_SUCCESS)
11729  {
11730  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
11731  return VK_SUCCESS;
11732  }
11733  else
11734  {
11735  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11736  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11737  }
11738  }
11739  }
11740  }
11741 
11742  // 3. Try to allocate from existing blocks with making other allocations lost.
11743  if(canMakeOtherLost)
11744  {
11745  uint32_t tryIndex = 0;
11746  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11747  {
11748  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11749  VmaAllocationRequest bestRequest = {};
11750  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11751 
11752  // 1. Search existing allocations.
11754  {
11755  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11756  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11757  {
11758  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11759  VMA_ASSERT(pCurrBlock);
11760  VmaAllocationRequest currRequest = {};
11761  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11762  currentFrameIndex,
11763  m_FrameInUseCount,
11764  m_BufferImageGranularity,
11765  size,
11766  alignment,
11767  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11768  suballocType,
11769  canMakeOtherLost,
11770  strategy,
11771  &currRequest))
11772  {
11773  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11774  if(pBestRequestBlock == VMA_NULL ||
11775  currRequestCost < bestRequestCost)
11776  {
11777  pBestRequestBlock = pCurrBlock;
11778  bestRequest = currRequest;
11779  bestRequestCost = currRequestCost;
11780 
11781  if(bestRequestCost == 0)
11782  {
11783  break;
11784  }
11785  }
11786  }
11787  }
11788  }
11789  else // WORST_FIT, FIRST_FIT
11790  {
11791  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11792  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11793  {
11794  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11795  VMA_ASSERT(pCurrBlock);
11796  VmaAllocationRequest currRequest = {};
11797  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11798  currentFrameIndex,
11799  m_FrameInUseCount,
11800  m_BufferImageGranularity,
11801  size,
11802  alignment,
11803  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11804  suballocType,
11805  canMakeOtherLost,
11806  strategy,
11807  &currRequest))
11808  {
11809  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11810  if(pBestRequestBlock == VMA_NULL ||
11811  currRequestCost < bestRequestCost ||
11813  {
11814  pBestRequestBlock = pCurrBlock;
11815  bestRequest = currRequest;
11816  bestRequestCost = currRequestCost;
11817 
11818  if(bestRequestCost == 0 ||
11820  {
11821  break;
11822  }
11823  }
11824  }
11825  }
11826  }
11827 
11828  if(pBestRequestBlock != VMA_NULL)
11829  {
11830  if(mapped)
11831  {
11832  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11833  if(res != VK_SUCCESS)
11834  {
11835  return res;
11836  }
11837  }
11838 
11839  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11840  currentFrameIndex,
11841  m_FrameInUseCount,
11842  &bestRequest))
11843  {
11844  // We no longer have an empty Allocation.
11845  if(pBestRequestBlock->m_pMetadata->IsEmpty())
11846  {
11847  m_HasEmptyBlock = false;
11848  }
11849  // Allocate from this pBlock.
11850  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
11851  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
11852  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
11853  (*pAllocation)->InitBlockAllocation(
11854  pBestRequestBlock,
11855  bestRequest.offset,
11856  alignment,
11857  size,
11858  suballocType,
11859  mapped,
11860  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11861  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11862  VMA_DEBUG_LOG(" Returned from existing block");
11863  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
11864  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11865  {
11866  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11867  }
11868  if(IsCorruptionDetectionEnabled())
11869  {
11870  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11871  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11872  }
11873  return VK_SUCCESS;
11874  }
11875  // else: Some allocations must have been touched while we are here. Next try.
11876  }
11877  else
11878  {
11879  // Could not find place in any of the blocks - break outer loop.
11880  break;
11881  }
11882  }
11883  /* Maximum number of tries exceeded - a very unlike event when many other
11884  threads are simultaneously touching allocations making it impossible to make
11885  lost at the same time as we try to allocate. */
11886  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11887  {
11888  return VK_ERROR_TOO_MANY_OBJECTS;
11889  }
11890  }
11891 
11892  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11893 }
11894 
11895 void VmaBlockVector::Free(
11896  VmaAllocation hAllocation)
11897 {
11898  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11899 
11900  // Scope for lock.
11901  {
11902  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11903 
11904  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11905 
11906  if(IsCorruptionDetectionEnabled())
11907  {
11908  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11909  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
11910  }
11911 
11912  if(hAllocation->IsPersistentMap())
11913  {
11914  pBlock->Unmap(m_hAllocator, 1);
11915  }
11916 
11917  pBlock->m_pMetadata->Free(hAllocation);
11918  VMA_HEAVY_ASSERT(pBlock->Validate());
11919 
11920  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
11921 
11922  // pBlock became empty after this deallocation.
11923  if(pBlock->m_pMetadata->IsEmpty())
11924  {
11925  // Already has empty Allocation. We don't want to have two, so delete this one.
11926  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11927  {
11928  pBlockToDelete = pBlock;
11929  Remove(pBlock);
11930  }
11931  // We now have first empty block.
11932  else
11933  {
11934  m_HasEmptyBlock = true;
11935  }
11936  }
11937  // pBlock didn't become empty, but we have another empty block - find and free that one.
11938  // (This is optional, heuristics.)
11939  else if(m_HasEmptyBlock)
11940  {
11941  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
11942  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
11943  {
11944  pBlockToDelete = pLastBlock;
11945  m_Blocks.pop_back();
11946  m_HasEmptyBlock = false;
11947  }
11948  }
11949 
11950  IncrementallySortBlocks();
11951  }
11952 
11953  // Destruction of a free Allocation. Deferred until this point, outside of mutex
11954  // lock, for performance reason.
11955  if(pBlockToDelete != VMA_NULL)
11956  {
11957  VMA_DEBUG_LOG(" Deleted empty allocation");
11958  pBlockToDelete->Destroy(m_hAllocator);
11959  vma_delete(m_hAllocator, pBlockToDelete);
11960  }
11961 }
11962 
11963 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
11964 {
11965  VkDeviceSize result = 0;
11966  for(size_t i = m_Blocks.size(); i--; )
11967  {
11968  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
11969  if(result >= m_PreferredBlockSize)
11970  {
11971  break;
11972  }
11973  }
11974  return result;
11975 }
11976 
11977 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
11978 {
11979  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11980  {
11981  if(m_Blocks[blockIndex] == pBlock)
11982  {
11983  VmaVectorRemove(m_Blocks, blockIndex);
11984  return;
11985  }
11986  }
11987  VMA_ASSERT(0);
11988 }
11989 
11990 void VmaBlockVector::IncrementallySortBlocks()
11991 {
11992  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11993  {
11994  // Bubble sort only until first swap.
11995  for(size_t i = 1; i < m_Blocks.size(); ++i)
11996  {
11997  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
11998  {
11999  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
12000  return;
12001  }
12002  }
12003  }
12004 }
12005 
12006 VkResult VmaBlockVector::AllocateFromBlock(
12007  VmaDeviceMemoryBlock* pBlock,
12008  uint32_t currentFrameIndex,
12009  VkDeviceSize size,
12010  VkDeviceSize alignment,
12011  VmaAllocationCreateFlags allocFlags,
12012  void* pUserData,
12013  VmaSuballocationType suballocType,
12014  uint32_t strategy,
12015  VmaAllocation* pAllocation)
12016 {
12017  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
12018  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12019  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12020  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12021 
12022  VmaAllocationRequest currRequest = {};
12023  if(pBlock->m_pMetadata->CreateAllocationRequest(
12024  currentFrameIndex,
12025  m_FrameInUseCount,
12026  m_BufferImageGranularity,
12027  size,
12028  alignment,
12029  isUpperAddress,
12030  suballocType,
12031  false, // canMakeOtherLost
12032  strategy,
12033  &currRequest))
12034  {
12035  // Allocate from pCurrBlock.
12036  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
12037 
12038  if(mapped)
12039  {
12040  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12041  if(res != VK_SUCCESS)
12042  {
12043  return res;
12044  }
12045  }
12046 
12047  // We no longer have an empty Allocation.
12048  if(pBlock->m_pMetadata->IsEmpty())
12049  {
12050  m_HasEmptyBlock = false;
12051  }
12052 
12053  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
12054  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
12055  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
12056  (*pAllocation)->InitBlockAllocation(
12057  pBlock,
12058  currRequest.offset,
12059  alignment,
12060  size,
12061  suballocType,
12062  mapped,
12063  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12064  VMA_HEAVY_ASSERT(pBlock->Validate());
12065  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12066  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12067  {
12068  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12069  }
12070  if(IsCorruptionDetectionEnabled())
12071  {
12072  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
12073  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12074  }
12075  return VK_SUCCESS;
12076  }
12077  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12078 }
12079 
12080 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12081 {
12082  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12083  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12084  allocInfo.allocationSize = blockSize;
12085  VkDeviceMemory mem = VK_NULL_HANDLE;
12086  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12087  if(res < 0)
12088  {
12089  return res;
12090  }
12091 
12092  // New VkDeviceMemory successfully created.
12093 
12094  // Create new Allocation for it.
12095  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12096  pBlock->Init(
12097  m_hAllocator,
12098  m_hParentPool,
12099  m_MemoryTypeIndex,
12100  mem,
12101  allocInfo.allocationSize,
12102  m_NextBlockId++,
12103  m_Algorithm);
12104 
12105  m_Blocks.push_back(pBlock);
12106  if(pNewBlockIndex != VMA_NULL)
12107  {
12108  *pNewBlockIndex = m_Blocks.size() - 1;
12109  }
12110 
12111  return VK_SUCCESS;
12112 }
12113 
12114 void VmaBlockVector::ApplyDefragmentationMovesCpu(
12115  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12116  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
12117 {
12118  const size_t blockCount = m_Blocks.size();
12119  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
12120 
12121  enum BLOCK_FLAG
12122  {
12123  BLOCK_FLAG_USED = 0x00000001,
12124  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
12125  };
12126 
12127  struct BlockInfo
12128  {
12129  uint32_t flags;
12130  void* pMappedData;
12131  };
12132  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
12133  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
12134  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
12135 
12136  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12137  const size_t moveCount = moves.size();
12138  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12139  {
12140  const VmaDefragmentationMove& move = moves[moveIndex];
12141  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
12142  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
12143  }
12144 
12145  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12146 
12147  // Go over all blocks. Get mapped pointer or map if necessary.
12148  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12149  {
12150  BlockInfo& currBlockInfo = blockInfo[blockIndex];
12151  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12152  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12153  {
12154  currBlockInfo.pMappedData = pBlock->GetMappedData();
12155  // It is not originally mapped - map it.
12156  if(currBlockInfo.pMappedData == VMA_NULL)
12157  {
12158  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12159  if(pDefragCtx->res == VK_SUCCESS)
12160  {
12161  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12162  }
12163  }
12164  }
12165  }
12166 
12167  // Go over all moves. Do actual data transfer.
12168  if(pDefragCtx->res == VK_SUCCESS)
12169  {
12170  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12171  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12172 
12173  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12174  {
12175  const VmaDefragmentationMove& move = moves[moveIndex];
12176 
12177  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12178  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12179 
12180  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12181 
12182  // Invalidate source.
12183  if(isNonCoherent)
12184  {
12185  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12186  memRange.memory = pSrcBlock->GetDeviceMemory();
12187  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12188  memRange.size = VMA_MIN(
12189  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12190  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12191  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12192  }
12193 
12194  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12195  memmove(
12196  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12197  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12198  static_cast<size_t>(move.size));
12199 
12200  if(IsCorruptionDetectionEnabled())
12201  {
12202  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12203  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12204  }
12205 
12206  // Flush destination.
12207  if(isNonCoherent)
12208  {
12209  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12210  memRange.memory = pDstBlock->GetDeviceMemory();
12211  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12212  memRange.size = VMA_MIN(
12213  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12214  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12215  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12216  }
12217  }
12218  }
12219 
12220  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12221  // Regardless of pCtx->res == VK_SUCCESS.
12222  for(size_t blockIndex = blockCount; blockIndex--; )
12223  {
12224  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12225  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12226  {
12227  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12228  pBlock->Unmap(m_hAllocator, 1);
12229  }
12230  }
12231 }
12232 
12233 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12234  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12235  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12236  VkCommandBuffer commandBuffer)
12237 {
12238  const size_t blockCount = m_Blocks.size();
12239 
12240  pDefragCtx->blockContexts.resize(blockCount);
12241  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12242 
12243  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12244  const size_t moveCount = moves.size();
12245  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12246  {
12247  const VmaDefragmentationMove& move = moves[moveIndex];
12248  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12249  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12250  }
12251 
12252  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12253 
12254  // Go over all blocks. Create and bind buffer for whole block if necessary.
12255  {
12256  VkBufferCreateInfo bufCreateInfo;
12257  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
12258 
12259  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12260  {
12261  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12262  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12263  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12264  {
12265  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12266  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12267  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12268  if(pDefragCtx->res == VK_SUCCESS)
12269  {
12270  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12271  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12272  }
12273  }
12274  }
12275  }
12276 
12277  // Go over all moves. Post data transfer commands to command buffer.
12278  if(pDefragCtx->res == VK_SUCCESS)
12279  {
12280  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12281  {
12282  const VmaDefragmentationMove& move = moves[moveIndex];
12283 
12284  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12285  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12286 
12287  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12288 
12289  VkBufferCopy region = {
12290  move.srcOffset,
12291  move.dstOffset,
12292  move.size };
12293  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12294  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12295  }
12296  }
12297 
12298  // Save buffers to defrag context for later destruction.
12299  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12300  {
12301  pDefragCtx->res = VK_NOT_READY;
12302  }
12303 }
12304 
12305 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12306 {
12307  m_HasEmptyBlock = false;
12308  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12309  {
12310  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12311  if(pBlock->m_pMetadata->IsEmpty())
12312  {
12313  if(m_Blocks.size() > m_MinBlockCount)
12314  {
12315  if(pDefragmentationStats != VMA_NULL)
12316  {
12317  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12318  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12319  }
12320 
12321  VmaVectorRemove(m_Blocks, blockIndex);
12322  pBlock->Destroy(m_hAllocator);
12323  vma_delete(m_hAllocator, pBlock);
12324  }
12325  else
12326  {
12327  m_HasEmptyBlock = true;
12328  }
12329  }
12330  }
12331 }
12332 
12333 #if VMA_STATS_STRING_ENABLED
12334 
12335 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12336 {
12337  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12338 
12339  json.BeginObject();
12340 
12341  if(m_IsCustomPool)
12342  {
12343  json.WriteString("MemoryTypeIndex");
12344  json.WriteNumber(m_MemoryTypeIndex);
12345 
12346  json.WriteString("BlockSize");
12347  json.WriteNumber(m_PreferredBlockSize);
12348 
12349  json.WriteString("BlockCount");
12350  json.BeginObject(true);
12351  if(m_MinBlockCount > 0)
12352  {
12353  json.WriteString("Min");
12354  json.WriteNumber((uint64_t)m_MinBlockCount);
12355  }
12356  if(m_MaxBlockCount < SIZE_MAX)
12357  {
12358  json.WriteString("Max");
12359  json.WriteNumber((uint64_t)m_MaxBlockCount);
12360  }
12361  json.WriteString("Cur");
12362  json.WriteNumber((uint64_t)m_Blocks.size());
12363  json.EndObject();
12364 
12365  if(m_FrameInUseCount > 0)
12366  {
12367  json.WriteString("FrameInUseCount");
12368  json.WriteNumber(m_FrameInUseCount);
12369  }
12370 
12371  if(m_Algorithm != 0)
12372  {
12373  json.WriteString("Algorithm");
12374  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12375  }
12376  }
12377  else
12378  {
12379  json.WriteString("PreferredBlockSize");
12380  json.WriteNumber(m_PreferredBlockSize);
12381  }
12382 
12383  json.WriteString("Blocks");
12384  json.BeginObject();
12385  for(size_t i = 0; i < m_Blocks.size(); ++i)
12386  {
12387  json.BeginString();
12388  json.ContinueString(m_Blocks[i]->GetId());
12389  json.EndString();
12390 
12391  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12392  }
12393  json.EndObject();
12394 
12395  json.EndObject();
12396 }
12397 
12398 #endif // #if VMA_STATS_STRING_ENABLED
12399 
12400 void VmaBlockVector::Defragment(
12401  class VmaBlockVectorDefragmentationContext* pCtx,
12402  VmaDefragmentationStats* pStats,
12403  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12404  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12405  VkCommandBuffer commandBuffer)
12406 {
12407  pCtx->res = VK_SUCCESS;
12408 
12409  const VkMemoryPropertyFlags memPropFlags =
12410  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12411  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12412 
12413  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12414  isHostVisible;
12415  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12416  !IsCorruptionDetectionEnabled() &&
12417  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
12418 
12419  // There are options to defragment this memory type.
12420  if(canDefragmentOnCpu || canDefragmentOnGpu)
12421  {
12422  bool defragmentOnGpu;
12423  // There is only one option to defragment this memory type.
12424  if(canDefragmentOnGpu != canDefragmentOnCpu)
12425  {
12426  defragmentOnGpu = canDefragmentOnGpu;
12427  }
12428  // Both options are available: Heuristics to choose the best one.
12429  else
12430  {
12431  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12432  m_hAllocator->IsIntegratedGpu();
12433  }
12434 
12435  bool overlappingMoveSupported = !defragmentOnGpu;
12436 
12437  if(m_hAllocator->m_UseMutex)
12438  {
12439  m_Mutex.LockWrite();
12440  pCtx->mutexLocked = true;
12441  }
12442 
12443  pCtx->Begin(overlappingMoveSupported);
12444 
12445  // Defragment.
12446 
12447  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12448  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12449  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12450  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12451  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12452 
12453  // Accumulate statistics.
12454  if(pStats != VMA_NULL)
12455  {
12456  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12457  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12458  pStats->bytesMoved += bytesMoved;
12459  pStats->allocationsMoved += allocationsMoved;
12460  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12461  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12462  if(defragmentOnGpu)
12463  {
12464  maxGpuBytesToMove -= bytesMoved;
12465  maxGpuAllocationsToMove -= allocationsMoved;
12466  }
12467  else
12468  {
12469  maxCpuBytesToMove -= bytesMoved;
12470  maxCpuAllocationsToMove -= allocationsMoved;
12471  }
12472  }
12473 
12474  if(pCtx->res >= VK_SUCCESS)
12475  {
12476  if(defragmentOnGpu)
12477  {
12478  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12479  }
12480  else
12481  {
12482  ApplyDefragmentationMovesCpu(pCtx, moves);
12483  }
12484  }
12485  }
12486 }
12487 
12488 void VmaBlockVector::DefragmentationEnd(
12489  class VmaBlockVectorDefragmentationContext* pCtx,
12490  VmaDefragmentationStats* pStats)
12491 {
12492  // Destroy buffers.
12493  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12494  {
12495  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12496  if(blockCtx.hBuffer)
12497  {
12498  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12499  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12500  }
12501  }
12502 
12503  if(pCtx->res >= VK_SUCCESS)
12504  {
12505  FreeEmptyBlocks(pStats);
12506  }
12507 
12508  if(pCtx->mutexLocked)
12509  {
12510  VMA_ASSERT(m_hAllocator->m_UseMutex);
12511  m_Mutex.UnlockWrite();
12512  }
12513 }
12514 
12515 size_t VmaBlockVector::CalcAllocationCount() const
12516 {
12517  size_t result = 0;
12518  for(size_t i = 0; i < m_Blocks.size(); ++i)
12519  {
12520  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12521  }
12522  return result;
12523 }
12524 
12525 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12526 {
12527  if(m_BufferImageGranularity == 1)
12528  {
12529  return false;
12530  }
12531  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12532  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12533  {
12534  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12535  VMA_ASSERT(m_Algorithm == 0);
12536  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12537  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12538  {
12539  return true;
12540  }
12541  }
12542  return false;
12543 }
12544 
12545 void VmaBlockVector::MakePoolAllocationsLost(
12546  uint32_t currentFrameIndex,
12547  size_t* pLostAllocationCount)
12548 {
12549  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12550  size_t lostAllocationCount = 0;
12551  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12552  {
12553  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12554  VMA_ASSERT(pBlock);
12555  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12556  }
12557  if(pLostAllocationCount != VMA_NULL)
12558  {
12559  *pLostAllocationCount = lostAllocationCount;
12560  }
12561 }
12562 
12563 VkResult VmaBlockVector::CheckCorruption()
12564 {
12565  if(!IsCorruptionDetectionEnabled())
12566  {
12567  return VK_ERROR_FEATURE_NOT_PRESENT;
12568  }
12569 
12570  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12571  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12572  {
12573  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12574  VMA_ASSERT(pBlock);
12575  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12576  if(res != VK_SUCCESS)
12577  {
12578  return res;
12579  }
12580  }
12581  return VK_SUCCESS;
12582 }
12583 
12584 void VmaBlockVector::AddStats(VmaStats* pStats)
12585 {
12586  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12587  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12588 
12589  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12590 
12591  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12592  {
12593  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12594  VMA_ASSERT(pBlock);
12595  VMA_HEAVY_ASSERT(pBlock->Validate());
12596  VmaStatInfo allocationStatInfo;
12597  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12598  VmaAddStatInfo(pStats->total, allocationStatInfo);
12599  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12600  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12601  }
12602 }
12603 
12605 // VmaDefragmentationAlgorithm_Generic members definition
12606 
12607 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12608  VmaAllocator hAllocator,
12609  VmaBlockVector* pBlockVector,
12610  uint32_t currentFrameIndex,
12611  bool overlappingMoveSupported) :
12612  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12613  m_AllocationCount(0),
12614  m_AllAllocations(false),
12615  m_BytesMoved(0),
12616  m_AllocationsMoved(0),
12617  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12618 {
12619  // Create block info for each block.
12620  const size_t blockCount = m_pBlockVector->m_Blocks.size();
12621  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12622  {
12623  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12624  pBlockInfo->m_OriginalBlockIndex = blockIndex;
12625  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12626  m_Blocks.push_back(pBlockInfo);
12627  }
12628 
12629  // Sort them by m_pBlock pointer value.
12630  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12631 }
12632 
12633 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12634 {
12635  for(size_t i = m_Blocks.size(); i--; )
12636  {
12637  vma_delete(m_hAllocator, m_Blocks[i]);
12638  }
12639 }
12640 
12641 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12642 {
12643  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12644  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12645  {
12646  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12647  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12648  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12649  {
12650  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12651  (*it)->m_Allocations.push_back(allocInfo);
12652  }
12653  else
12654  {
12655  VMA_ASSERT(0);
12656  }
12657 
12658  ++m_AllocationCount;
12659  }
12660 }
12661 
12662 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12663  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12664  VkDeviceSize maxBytesToMove,
12665  uint32_t maxAllocationsToMove)
12666 {
12667  if(m_Blocks.empty())
12668  {
12669  return VK_SUCCESS;
12670  }
12671 
12672  // This is a choice based on research.
12673  // Option 1:
12674  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12675  // Option 2:
12676  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12677  // Option 3:
12678  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12679 
12680  size_t srcBlockMinIndex = 0;
12681  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12682  /*
12683  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12684  {
12685  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12686  if(blocksWithNonMovableCount > 0)
12687  {
12688  srcBlockMinIndex = blocksWithNonMovableCount - 1;
12689  }
12690  }
12691  */
12692 
12693  size_t srcBlockIndex = m_Blocks.size() - 1;
12694  size_t srcAllocIndex = SIZE_MAX;
12695  for(;;)
12696  {
12697  // 1. Find next allocation to move.
12698  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12699  // 1.2. Then start from last to first m_Allocations.
12700  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12701  {
12702  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12703  {
12704  // Finished: no more allocations to process.
12705  if(srcBlockIndex == srcBlockMinIndex)
12706  {
12707  return VK_SUCCESS;
12708  }
12709  else
12710  {
12711  --srcBlockIndex;
12712  srcAllocIndex = SIZE_MAX;
12713  }
12714  }
12715  else
12716  {
12717  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12718  }
12719  }
12720 
12721  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12722  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12723 
12724  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12725  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12726  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12727  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12728 
12729  // 2. Try to find new place for this allocation in preceding or current block.
12730  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12731  {
12732  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12733  VmaAllocationRequest dstAllocRequest;
12734  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12735  m_CurrentFrameIndex,
12736  m_pBlockVector->GetFrameInUseCount(),
12737  m_pBlockVector->GetBufferImageGranularity(),
12738  size,
12739  alignment,
12740  false, // upperAddress
12741  suballocType,
12742  false, // canMakeOtherLost
12743  strategy,
12744  &dstAllocRequest) &&
12745  MoveMakesSense(
12746  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12747  {
12748  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12749 
12750  // Reached limit on number of allocations or bytes to move.
12751  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12752  (m_BytesMoved + size > maxBytesToMove))
12753  {
12754  return VK_SUCCESS;
12755  }
12756 
12757  VmaDefragmentationMove move;
12758  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12759  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12760  move.srcOffset = srcOffset;
12761  move.dstOffset = dstAllocRequest.offset;
12762  move.size = size;
12763  moves.push_back(move);
12764 
12765  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12766  dstAllocRequest,
12767  suballocType,
12768  size,
12769  allocInfo.m_hAllocation);
12770  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12771 
12772  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12773 
12774  if(allocInfo.m_pChanged != VMA_NULL)
12775  {
12776  *allocInfo.m_pChanged = VK_TRUE;
12777  }
12778 
12779  ++m_AllocationsMoved;
12780  m_BytesMoved += size;
12781 
12782  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12783 
12784  break;
12785  }
12786  }
12787 
12788  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
12789 
12790  if(srcAllocIndex > 0)
12791  {
12792  --srcAllocIndex;
12793  }
12794  else
12795  {
12796  if(srcBlockIndex > 0)
12797  {
12798  --srcBlockIndex;
12799  srcAllocIndex = SIZE_MAX;
12800  }
12801  else
12802  {
12803  return VK_SUCCESS;
12804  }
12805  }
12806  }
12807 }
12808 
12809 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
12810 {
12811  size_t result = 0;
12812  for(size_t i = 0; i < m_Blocks.size(); ++i)
12813  {
12814  if(m_Blocks[i]->m_HasNonMovableAllocations)
12815  {
12816  ++result;
12817  }
12818  }
12819  return result;
12820 }
12821 
12822 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12823  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12824  VkDeviceSize maxBytesToMove,
12825  uint32_t maxAllocationsToMove)
12826 {
12827  if(!m_AllAllocations && m_AllocationCount == 0)
12828  {
12829  return VK_SUCCESS;
12830  }
12831 
12832  const size_t blockCount = m_Blocks.size();
12833  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12834  {
12835  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12836 
12837  if(m_AllAllocations)
12838  {
12839  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12840  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12841  it != pMetadata->m_Suballocations.end();
12842  ++it)
12843  {
12844  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12845  {
12846  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12847  pBlockInfo->m_Allocations.push_back(allocInfo);
12848  }
12849  }
12850  }
12851 
12852  pBlockInfo->CalcHasNonMovableAllocations();
12853 
12854  // This is a choice based on research.
12855  // Option 1:
12856  pBlockInfo->SortAllocationsByOffsetDescending();
12857  // Option 2:
12858  //pBlockInfo->SortAllocationsBySizeDescending();
12859  }
12860 
12861  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
12862  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12863 
12864  // This is a choice based on research.
12865  const uint32_t roundCount = 2;
12866 
12867  // Execute defragmentation rounds (the main part).
12868  VkResult result = VK_SUCCESS;
12869  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12870  {
12871  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12872  }
12873 
12874  return result;
12875 }
12876 
12877 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12878  size_t dstBlockIndex, VkDeviceSize dstOffset,
12879  size_t srcBlockIndex, VkDeviceSize srcOffset)
12880 {
12881  if(dstBlockIndex < srcBlockIndex)
12882  {
12883  return true;
12884  }
12885  if(dstBlockIndex > srcBlockIndex)
12886  {
12887  return false;
12888  }
12889  if(dstOffset < srcOffset)
12890  {
12891  return true;
12892  }
12893  return false;
12894 }
12895 
12897 // VmaDefragmentationAlgorithm_Fast
12898 
12899 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12900  VmaAllocator hAllocator,
12901  VmaBlockVector* pBlockVector,
12902  uint32_t currentFrameIndex,
12903  bool overlappingMoveSupported) :
12904  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12905  m_OverlappingMoveSupported(overlappingMoveSupported),
12906  m_AllocationCount(0),
12907  m_AllAllocations(false),
12908  m_BytesMoved(0),
12909  m_AllocationsMoved(0),
12910  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12911 {
12912  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12913 
12914 }
12915 
12916 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12917 {
12918 }
12919 
12920 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12921  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12922  VkDeviceSize maxBytesToMove,
12923  uint32_t maxAllocationsToMove)
12924 {
12925  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12926 
12927  const size_t blockCount = m_pBlockVector->GetBlockCount();
12928  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12929  {
12930  return VK_SUCCESS;
12931  }
12932 
12933  PreprocessMetadata();
12934 
12935  // Sort blocks in order from most destination.
12936 
12937  m_BlockInfos.resize(blockCount);
12938  for(size_t i = 0; i < blockCount; ++i)
12939  {
12940  m_BlockInfos[i].origBlockIndex = i;
12941  }
12942 
12943  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
12944  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
12945  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
12946  });
12947 
12948  // THE MAIN ALGORITHM
12949 
12950  FreeSpaceDatabase freeSpaceDb;
12951 
12952  size_t dstBlockInfoIndex = 0;
12953  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12954  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12955  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12956  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
12957  VkDeviceSize dstOffset = 0;
12958 
12959  bool end = false;
12960  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
12961  {
12962  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
12963  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
12964  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
12965  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
12966  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
12967  {
12968  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
12969  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
12970  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
12971  if(m_AllocationsMoved == maxAllocationsToMove ||
12972  m_BytesMoved + srcAllocSize > maxBytesToMove)
12973  {
12974  end = true;
12975  break;
12976  }
12977  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
12978 
12979  // Try to place it in one of free spaces from the database.
12980  size_t freeSpaceInfoIndex;
12981  VkDeviceSize dstAllocOffset;
12982  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
12983  freeSpaceInfoIndex, dstAllocOffset))
12984  {
12985  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
12986  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
12987  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
12988 
12989  // Same block
12990  if(freeSpaceInfoIndex == srcBlockInfoIndex)
12991  {
12992  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12993 
12994  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12995 
12996  VmaSuballocation suballoc = *srcSuballocIt;
12997  suballoc.offset = dstAllocOffset;
12998  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
12999  m_BytesMoved += srcAllocSize;
13000  ++m_AllocationsMoved;
13001 
13002  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13003  ++nextSuballocIt;
13004  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13005  srcSuballocIt = nextSuballocIt;
13006 
13007  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13008 
13009  VmaDefragmentationMove move = {
13010  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13011  srcAllocOffset, dstAllocOffset,
13012  srcAllocSize };
13013  moves.push_back(move);
13014  }
13015  // Different block
13016  else
13017  {
13018  // MOVE OPTION 2: Move the allocation to a different block.
13019 
13020  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
13021 
13022  VmaSuballocation suballoc = *srcSuballocIt;
13023  suballoc.offset = dstAllocOffset;
13024  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
13025  m_BytesMoved += srcAllocSize;
13026  ++m_AllocationsMoved;
13027 
13028  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13029  ++nextSuballocIt;
13030  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13031  srcSuballocIt = nextSuballocIt;
13032 
13033  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13034 
13035  VmaDefragmentationMove move = {
13036  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13037  srcAllocOffset, dstAllocOffset,
13038  srcAllocSize };
13039  moves.push_back(move);
13040  }
13041  }
13042  else
13043  {
13044  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
13045 
13046  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
13047  while(dstBlockInfoIndex < srcBlockInfoIndex &&
13048  dstAllocOffset + srcAllocSize > dstBlockSize)
13049  {
13050  // But before that, register remaining free space at the end of dst block.
13051  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
13052 
13053  ++dstBlockInfoIndex;
13054  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13055  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13056  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13057  dstBlockSize = pDstMetadata->GetSize();
13058  dstOffset = 0;
13059  dstAllocOffset = 0;
13060  }
13061 
13062  // Same block
13063  if(dstBlockInfoIndex == srcBlockInfoIndex)
13064  {
13065  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13066 
13067  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
13068 
13069  bool skipOver = overlap;
13070  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
13071  {
13072  // If destination and source place overlap, skip if it would move it
13073  // by only < 1/64 of its size.
13074  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
13075  }
13076 
13077  if(skipOver)
13078  {
13079  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
13080 
13081  dstOffset = srcAllocOffset + srcAllocSize;
13082  ++srcSuballocIt;
13083  }
13084  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13085  else
13086  {
13087  srcSuballocIt->offset = dstAllocOffset;
13088  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
13089  dstOffset = dstAllocOffset + srcAllocSize;
13090  m_BytesMoved += srcAllocSize;
13091  ++m_AllocationsMoved;
13092  ++srcSuballocIt;
13093  VmaDefragmentationMove move = {
13094  srcOrigBlockIndex, dstOrigBlockIndex,
13095  srcAllocOffset, dstAllocOffset,
13096  srcAllocSize };
13097  moves.push_back(move);
13098  }
13099  }
13100  // Different block
13101  else
13102  {
13103  // MOVE OPTION 2: Move the allocation to a different block.
13104 
13105  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
13106  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
13107 
13108  VmaSuballocation suballoc = *srcSuballocIt;
13109  suballoc.offset = dstAllocOffset;
13110  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
13111  dstOffset = dstAllocOffset + srcAllocSize;
13112  m_BytesMoved += srcAllocSize;
13113  ++m_AllocationsMoved;
13114 
13115  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13116  ++nextSuballocIt;
13117  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13118  srcSuballocIt = nextSuballocIt;
13119 
13120  pDstMetadata->m_Suballocations.push_back(suballoc);
13121 
13122  VmaDefragmentationMove move = {
13123  srcOrigBlockIndex, dstOrigBlockIndex,
13124  srcAllocOffset, dstAllocOffset,
13125  srcAllocSize };
13126  moves.push_back(move);
13127  }
13128  }
13129  }
13130  }
13131 
13132  m_BlockInfos.clear();
13133 
13134  PostprocessMetadata();
13135 
13136  return VK_SUCCESS;
13137 }
13138 
13139 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
13140 {
13141  const size_t blockCount = m_pBlockVector->GetBlockCount();
13142  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13143  {
13144  VmaBlockMetadata_Generic* const pMetadata =
13145  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13146  pMetadata->m_FreeCount = 0;
13147  pMetadata->m_SumFreeSize = pMetadata->GetSize();
13148  pMetadata->m_FreeSuballocationsBySize.clear();
13149  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13150  it != pMetadata->m_Suballocations.end(); )
13151  {
13152  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13153  {
13154  VmaSuballocationList::iterator nextIt = it;
13155  ++nextIt;
13156  pMetadata->m_Suballocations.erase(it);
13157  it = nextIt;
13158  }
13159  else
13160  {
13161  ++it;
13162  }
13163  }
13164  }
13165 }
13166 
13167 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13168 {
13169  const size_t blockCount = m_pBlockVector->GetBlockCount();
13170  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13171  {
13172  VmaBlockMetadata_Generic* const pMetadata =
13173  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13174  const VkDeviceSize blockSize = pMetadata->GetSize();
13175 
13176  // No allocations in this block - entire area is free.
13177  if(pMetadata->m_Suballocations.empty())
13178  {
13179  pMetadata->m_FreeCount = 1;
13180  //pMetadata->m_SumFreeSize is already set to blockSize.
13181  VmaSuballocation suballoc = {
13182  0, // offset
13183  blockSize, // size
13184  VMA_NULL, // hAllocation
13185  VMA_SUBALLOCATION_TYPE_FREE };
13186  pMetadata->m_Suballocations.push_back(suballoc);
13187  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13188  }
13189  // There are some allocations in this block.
13190  else
13191  {
13192  VkDeviceSize offset = 0;
13193  VmaSuballocationList::iterator it;
13194  for(it = pMetadata->m_Suballocations.begin();
13195  it != pMetadata->m_Suballocations.end();
13196  ++it)
13197  {
13198  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13199  VMA_ASSERT(it->offset >= offset);
13200 
13201  // Need to insert preceding free space.
13202  if(it->offset > offset)
13203  {
13204  ++pMetadata->m_FreeCount;
13205  const VkDeviceSize freeSize = it->offset - offset;
13206  VmaSuballocation suballoc = {
13207  offset, // offset
13208  freeSize, // size
13209  VMA_NULL, // hAllocation
13210  VMA_SUBALLOCATION_TYPE_FREE };
13211  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13212  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13213  {
13214  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13215  }
13216  }
13217 
13218  pMetadata->m_SumFreeSize -= it->size;
13219  offset = it->offset + it->size;
13220  }
13221 
13222  // Need to insert trailing free space.
13223  if(offset < blockSize)
13224  {
13225  ++pMetadata->m_FreeCount;
13226  const VkDeviceSize freeSize = blockSize - offset;
13227  VmaSuballocation suballoc = {
13228  offset, // offset
13229  freeSize, // size
13230  VMA_NULL, // hAllocation
13231  VMA_SUBALLOCATION_TYPE_FREE };
13232  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13233  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13234  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13235  {
13236  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13237  }
13238  }
13239 
13240  VMA_SORT(
13241  pMetadata->m_FreeSuballocationsBySize.begin(),
13242  pMetadata->m_FreeSuballocationsBySize.end(),
13243  VmaSuballocationItemSizeLess());
13244  }
13245 
13246  VMA_HEAVY_ASSERT(pMetadata->Validate());
13247  }
13248 }
13249 
13250 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13251 {
13252  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13253  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13254  while(it != pMetadata->m_Suballocations.end())
13255  {
13256  if(it->offset < suballoc.offset)
13257  {
13258  ++it;
13259  }
13260  }
13261  pMetadata->m_Suballocations.insert(it, suballoc);
13262 }
13263 
13265 // VmaBlockVectorDefragmentationContext
13266 
13267 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13268  VmaAllocator hAllocator,
13269  VmaPool hCustomPool,
13270  VmaBlockVector* pBlockVector,
13271  uint32_t currFrameIndex) :
13272  res(VK_SUCCESS),
13273  mutexLocked(false),
13274  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13275  m_hAllocator(hAllocator),
13276  m_hCustomPool(hCustomPool),
13277  m_pBlockVector(pBlockVector),
13278  m_CurrFrameIndex(currFrameIndex),
13279  m_pAlgorithm(VMA_NULL),
13280  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13281  m_AllAllocations(false)
13282 {
13283 }
13284 
13285 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13286 {
13287  vma_delete(m_hAllocator, m_pAlgorithm);
13288 }
13289 
13290 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13291 {
13292  AllocInfo info = { hAlloc, pChanged };
13293  m_Allocations.push_back(info);
13294 }
13295 
13296 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13297 {
13298  const bool allAllocations = m_AllAllocations ||
13299  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13300 
13301  /********************************
13302  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13303  ********************************/
13304 
13305  /*
13306  Fast algorithm is supported only when certain criteria are met:
13307  - VMA_DEBUG_MARGIN is 0.
13308  - All allocations in this block vector are moveable.
13309  - There is no possibility of image/buffer granularity conflict.
13310  */
13311  if(VMA_DEBUG_MARGIN == 0 &&
13312  allAllocations &&
13313  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13314  {
13315  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13316  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13317  }
13318  else
13319  {
13320  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13321  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13322  }
13323 
13324  if(allAllocations)
13325  {
13326  m_pAlgorithm->AddAll();
13327  }
13328  else
13329  {
13330  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13331  {
13332  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13333  }
13334  }
13335 }
13336 
13338 // VmaDefragmentationContext
13339 
13340 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13341  VmaAllocator hAllocator,
13342  uint32_t currFrameIndex,
13343  uint32_t flags,
13344  VmaDefragmentationStats* pStats) :
13345  m_hAllocator(hAllocator),
13346  m_CurrFrameIndex(currFrameIndex),
13347  m_Flags(flags),
13348  m_pStats(pStats),
13349  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13350 {
13351  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13352 }
13353 
13354 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13355 {
13356  for(size_t i = m_CustomPoolContexts.size(); i--; )
13357  {
13358  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13359  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13360  vma_delete(m_hAllocator, pBlockVectorCtx);
13361  }
13362  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13363  {
13364  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13365  if(pBlockVectorCtx)
13366  {
13367  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13368  vma_delete(m_hAllocator, pBlockVectorCtx);
13369  }
13370  }
13371 }
13372 
13373 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13374 {
13375  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13376  {
13377  VmaPool pool = pPools[poolIndex];
13378  VMA_ASSERT(pool);
13379  // Pools with algorithm other than default are not defragmented.
13380  if(pool->m_BlockVector.GetAlgorithm() == 0)
13381  {
13382  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13383 
13384  for(size_t i = m_CustomPoolContexts.size(); i--; )
13385  {
13386  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13387  {
13388  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13389  break;
13390  }
13391  }
13392 
13393  if(!pBlockVectorDefragCtx)
13394  {
13395  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13396  m_hAllocator,
13397  pool,
13398  &pool->m_BlockVector,
13399  m_CurrFrameIndex);
13400  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13401  }
13402 
13403  pBlockVectorDefragCtx->AddAll();
13404  }
13405  }
13406 }
13407 
13408 void VmaDefragmentationContext_T::AddAllocations(
13409  uint32_t allocationCount,
13410  VmaAllocation* pAllocations,
13411  VkBool32* pAllocationsChanged)
13412 {
13413  // Dispatch pAllocations among defragmentators. Create them when necessary.
13414  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13415  {
13416  const VmaAllocation hAlloc = pAllocations[allocIndex];
13417  VMA_ASSERT(hAlloc);
13418  // DedicatedAlloc cannot be defragmented.
13419  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13420  // Lost allocation cannot be defragmented.
13421  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13422  {
13423  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13424 
13425  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
13426  // This allocation belongs to custom pool.
13427  if(hAllocPool != VK_NULL_HANDLE)
13428  {
13429  // Pools with algorithm other than default are not defragmented.
13430  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13431  {
13432  for(size_t i = m_CustomPoolContexts.size(); i--; )
13433  {
13434  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13435  {
13436  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13437  break;
13438  }
13439  }
13440  if(!pBlockVectorDefragCtx)
13441  {
13442  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13443  m_hAllocator,
13444  hAllocPool,
13445  &hAllocPool->m_BlockVector,
13446  m_CurrFrameIndex);
13447  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13448  }
13449  }
13450  }
13451  // This allocation belongs to default pool.
13452  else
13453  {
13454  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13455  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13456  if(!pBlockVectorDefragCtx)
13457  {
13458  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13459  m_hAllocator,
13460  VMA_NULL, // hCustomPool
13461  m_hAllocator->m_pBlockVectors[memTypeIndex],
13462  m_CurrFrameIndex);
13463  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13464  }
13465  }
13466 
13467  if(pBlockVectorDefragCtx)
13468  {
13469  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13470  &pAllocationsChanged[allocIndex] : VMA_NULL;
13471  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13472  }
13473  }
13474  }
13475 }
13476 
13477 VkResult VmaDefragmentationContext_T::Defragment(
13478  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13479  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13480  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13481 {
13482  if(pStats)
13483  {
13484  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13485  }
13486 
13487  if(commandBuffer == VK_NULL_HANDLE)
13488  {
13489  maxGpuBytesToMove = 0;
13490  maxGpuAllocationsToMove = 0;
13491  }
13492 
13493  VkResult res = VK_SUCCESS;
13494 
13495  // Process default pools.
13496  for(uint32_t memTypeIndex = 0;
13497  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13498  ++memTypeIndex)
13499  {
13500  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13501  if(pBlockVectorCtx)
13502  {
13503  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13504  pBlockVectorCtx->GetBlockVector()->Defragment(
13505  pBlockVectorCtx,
13506  pStats,
13507  maxCpuBytesToMove, maxCpuAllocationsToMove,
13508  maxGpuBytesToMove, maxGpuAllocationsToMove,
13509  commandBuffer);
13510  if(pBlockVectorCtx->res != VK_SUCCESS)
13511  {
13512  res = pBlockVectorCtx->res;
13513  }
13514  }
13515  }
13516 
13517  // Process custom pools.
13518  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13519  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13520  ++customCtxIndex)
13521  {
13522  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13523  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13524  pBlockVectorCtx->GetBlockVector()->Defragment(
13525  pBlockVectorCtx,
13526  pStats,
13527  maxCpuBytesToMove, maxCpuAllocationsToMove,
13528  maxGpuBytesToMove, maxGpuAllocationsToMove,
13529  commandBuffer);
13530  if(pBlockVectorCtx->res != VK_SUCCESS)
13531  {
13532  res = pBlockVectorCtx->res;
13533  }
13534  }
13535 
13536  return res;
13537 }
13538 
13540 // VmaRecorder
13541 
13542 #if VMA_RECORDING_ENABLED
13543 
13544 VmaRecorder::VmaRecorder() :
13545  m_UseMutex(true),
13546  m_Flags(0),
13547  m_File(VMA_NULL),
13548  m_Freq(INT64_MAX),
13549  m_StartCounter(INT64_MAX)
13550 {
13551 }
13552 
13553 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13554 {
13555  m_UseMutex = useMutex;
13556  m_Flags = settings.flags;
13557 
13558  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13559  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13560 
13561  // Open file for writing.
13562  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13563  if(err != 0)
13564  {
13565  return VK_ERROR_INITIALIZATION_FAILED;
13566  }
13567 
13568  // Write header.
13569  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13570  fprintf(m_File, "%s\n", "1,6");
13571 
13572  return VK_SUCCESS;
13573 }
13574 
13575 VmaRecorder::~VmaRecorder()
13576 {
13577  if(m_File != VMA_NULL)
13578  {
13579  fclose(m_File);
13580  }
13581 }
13582 
13583 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13584 {
13585  CallParams callParams;
13586  GetBasicParams(callParams);
13587 
13588  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13589  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13590  Flush();
13591 }
13592 
13593 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13594 {
13595  CallParams callParams;
13596  GetBasicParams(callParams);
13597 
13598  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13599  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13600  Flush();
13601 }
13602 
13603 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13604 {
13605  CallParams callParams;
13606  GetBasicParams(callParams);
13607 
13608  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13609  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13610  createInfo.memoryTypeIndex,
13611  createInfo.flags,
13612  createInfo.blockSize,
13613  (uint64_t)createInfo.minBlockCount,
13614  (uint64_t)createInfo.maxBlockCount,
13615  createInfo.frameInUseCount,
13616  pool);
13617  Flush();
13618 }
13619 
13620 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13621 {
13622  CallParams callParams;
13623  GetBasicParams(callParams);
13624 
13625  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13626  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13627  pool);
13628  Flush();
13629 }
13630 
13631 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13632  const VkMemoryRequirements& vkMemReq,
13633  const VmaAllocationCreateInfo& createInfo,
13634  VmaAllocation allocation)
13635 {
13636  CallParams callParams;
13637  GetBasicParams(callParams);
13638 
13639  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13640  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13641  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13642  vkMemReq.size,
13643  vkMemReq.alignment,
13644  vkMemReq.memoryTypeBits,
13645  createInfo.flags,
13646  createInfo.usage,
13647  createInfo.requiredFlags,
13648  createInfo.preferredFlags,
13649  createInfo.memoryTypeBits,
13650  createInfo.pool,
13651  allocation,
13652  userDataStr.GetString());
13653  Flush();
13654 }
13655 
13656 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
13657  const VkMemoryRequirements& vkMemReq,
13658  const VmaAllocationCreateInfo& createInfo,
13659  uint64_t allocationCount,
13660  const VmaAllocation* pAllocations)
13661 {
13662  CallParams callParams;
13663  GetBasicParams(callParams);
13664 
13665  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13666  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13667  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
13668  vkMemReq.size,
13669  vkMemReq.alignment,
13670  vkMemReq.memoryTypeBits,
13671  createInfo.flags,
13672  createInfo.usage,
13673  createInfo.requiredFlags,
13674  createInfo.preferredFlags,
13675  createInfo.memoryTypeBits,
13676  createInfo.pool);
13677  PrintPointerList(allocationCount, pAllocations);
13678  fprintf(m_File, ",%s\n", userDataStr.GetString());
13679  Flush();
13680 }
13681 
13682 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13683  const VkMemoryRequirements& vkMemReq,
13684  bool requiresDedicatedAllocation,
13685  bool prefersDedicatedAllocation,
13686  const VmaAllocationCreateInfo& createInfo,
13687  VmaAllocation allocation)
13688 {
13689  CallParams callParams;
13690  GetBasicParams(callParams);
13691 
13692  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13693  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13694  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13695  vkMemReq.size,
13696  vkMemReq.alignment,
13697  vkMemReq.memoryTypeBits,
13698  requiresDedicatedAllocation ? 1 : 0,
13699  prefersDedicatedAllocation ? 1 : 0,
13700  createInfo.flags,
13701  createInfo.usage,
13702  createInfo.requiredFlags,
13703  createInfo.preferredFlags,
13704  createInfo.memoryTypeBits,
13705  createInfo.pool,
13706  allocation,
13707  userDataStr.GetString());
13708  Flush();
13709 }
13710 
13711 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13712  const VkMemoryRequirements& vkMemReq,
13713  bool requiresDedicatedAllocation,
13714  bool prefersDedicatedAllocation,
13715  const VmaAllocationCreateInfo& createInfo,
13716  VmaAllocation allocation)
13717 {
13718  CallParams callParams;
13719  GetBasicParams(callParams);
13720 
13721  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13722  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13723  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13724  vkMemReq.size,
13725  vkMemReq.alignment,
13726  vkMemReq.memoryTypeBits,
13727  requiresDedicatedAllocation ? 1 : 0,
13728  prefersDedicatedAllocation ? 1 : 0,
13729  createInfo.flags,
13730  createInfo.usage,
13731  createInfo.requiredFlags,
13732  createInfo.preferredFlags,
13733  createInfo.memoryTypeBits,
13734  createInfo.pool,
13735  allocation,
13736  userDataStr.GetString());
13737  Flush();
13738 }
13739 
13740 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13741  VmaAllocation allocation)
13742 {
13743  CallParams callParams;
13744  GetBasicParams(callParams);
13745 
13746  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13747  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13748  allocation);
13749  Flush();
13750 }
13751 
13752 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
13753  uint64_t allocationCount,
13754  const VmaAllocation* pAllocations)
13755 {
13756  CallParams callParams;
13757  GetBasicParams(callParams);
13758 
13759  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13760  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
13761  PrintPointerList(allocationCount, pAllocations);
13762  fprintf(m_File, "\n");
13763  Flush();
13764 }
13765 
13766 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13767  VmaAllocation allocation,
13768  const void* pUserData)
13769 {
13770  CallParams callParams;
13771  GetBasicParams(callParams);
13772 
13773  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13774  UserDataString userDataStr(
13775  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
13776  pUserData);
13777  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13778  allocation,
13779  userDataStr.GetString());
13780  Flush();
13781 }
13782 
13783 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13784  VmaAllocation allocation)
13785 {
13786  CallParams callParams;
13787  GetBasicParams(callParams);
13788 
13789  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13790  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13791  allocation);
13792  Flush();
13793 }
13794 
13795 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13796  VmaAllocation allocation)
13797 {
13798  CallParams callParams;
13799  GetBasicParams(callParams);
13800 
13801  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13802  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13803  allocation);
13804  Flush();
13805 }
13806 
13807 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13808  VmaAllocation allocation)
13809 {
13810  CallParams callParams;
13811  GetBasicParams(callParams);
13812 
13813  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13814  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13815  allocation);
13816  Flush();
13817 }
13818 
13819 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13820  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13821 {
13822  CallParams callParams;
13823  GetBasicParams(callParams);
13824 
13825  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13826  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13827  allocation,
13828  offset,
13829  size);
13830  Flush();
13831 }
13832 
13833 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13834  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13835 {
13836  CallParams callParams;
13837  GetBasicParams(callParams);
13838 
13839  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13840  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13841  allocation,
13842  offset,
13843  size);
13844  Flush();
13845 }
13846 
13847 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13848  const VkBufferCreateInfo& bufCreateInfo,
13849  const VmaAllocationCreateInfo& allocCreateInfo,
13850  VmaAllocation allocation)
13851 {
13852  CallParams callParams;
13853  GetBasicParams(callParams);
13854 
13855  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13856  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13857  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13858  bufCreateInfo.flags,
13859  bufCreateInfo.size,
13860  bufCreateInfo.usage,
13861  bufCreateInfo.sharingMode,
13862  allocCreateInfo.flags,
13863  allocCreateInfo.usage,
13864  allocCreateInfo.requiredFlags,
13865  allocCreateInfo.preferredFlags,
13866  allocCreateInfo.memoryTypeBits,
13867  allocCreateInfo.pool,
13868  allocation,
13869  userDataStr.GetString());
13870  Flush();
13871 }
13872 
13873 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13874  const VkImageCreateInfo& imageCreateInfo,
13875  const VmaAllocationCreateInfo& allocCreateInfo,
13876  VmaAllocation allocation)
13877 {
13878  CallParams callParams;
13879  GetBasicParams(callParams);
13880 
13881  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13882  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13883  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13884  imageCreateInfo.flags,
13885  imageCreateInfo.imageType,
13886  imageCreateInfo.format,
13887  imageCreateInfo.extent.width,
13888  imageCreateInfo.extent.height,
13889  imageCreateInfo.extent.depth,
13890  imageCreateInfo.mipLevels,
13891  imageCreateInfo.arrayLayers,
13892  imageCreateInfo.samples,
13893  imageCreateInfo.tiling,
13894  imageCreateInfo.usage,
13895  imageCreateInfo.sharingMode,
13896  imageCreateInfo.initialLayout,
13897  allocCreateInfo.flags,
13898  allocCreateInfo.usage,
13899  allocCreateInfo.requiredFlags,
13900  allocCreateInfo.preferredFlags,
13901  allocCreateInfo.memoryTypeBits,
13902  allocCreateInfo.pool,
13903  allocation,
13904  userDataStr.GetString());
13905  Flush();
13906 }
13907 
13908 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13909  VmaAllocation allocation)
13910 {
13911  CallParams callParams;
13912  GetBasicParams(callParams);
13913 
13914  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13915  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13916  allocation);
13917  Flush();
13918 }
13919 
13920 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
13921  VmaAllocation allocation)
13922 {
13923  CallParams callParams;
13924  GetBasicParams(callParams);
13925 
13926  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13927  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
13928  allocation);
13929  Flush();
13930 }
13931 
13932 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
13933  VmaAllocation allocation)
13934 {
13935  CallParams callParams;
13936  GetBasicParams(callParams);
13937 
13938  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13939  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13940  allocation);
13941  Flush();
13942 }
13943 
13944 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
13945  VmaAllocation allocation)
13946 {
13947  CallParams callParams;
13948  GetBasicParams(callParams);
13949 
13950  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13951  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
13952  allocation);
13953  Flush();
13954 }
13955 
13956 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
13957  VmaPool pool)
13958 {
13959  CallParams callParams;
13960  GetBasicParams(callParams);
13961 
13962  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13963  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
13964  pool);
13965  Flush();
13966 }
13967 
13968 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
13969  const VmaDefragmentationInfo2& info,
13971 {
13972  CallParams callParams;
13973  GetBasicParams(callParams);
13974 
13975  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13976  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
13977  info.flags);
13978  PrintPointerList(info.allocationCount, info.pAllocations);
13979  fprintf(m_File, ",");
13980  PrintPointerList(info.poolCount, info.pPools);
13981  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
13982  info.maxCpuBytesToMove,
13984  info.maxGpuBytesToMove,
13986  info.commandBuffer,
13987  ctx);
13988  Flush();
13989 }
13990 
13991 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
13993 {
13994  CallParams callParams;
13995  GetBasicParams(callParams);
13996 
13997  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13998  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
13999  ctx);
14000  Flush();
14001 }
14002 
14003 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
14004 {
14005  if(pUserData != VMA_NULL)
14006  {
14007  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
14008  {
14009  m_Str = (const char*)pUserData;
14010  }
14011  else
14012  {
14013  sprintf_s(m_PtrStr, "%p", pUserData);
14014  m_Str = m_PtrStr;
14015  }
14016  }
14017  else
14018  {
14019  m_Str = "";
14020  }
14021 }
14022 
14023 void VmaRecorder::WriteConfiguration(
14024  const VkPhysicalDeviceProperties& devProps,
14025  const VkPhysicalDeviceMemoryProperties& memProps,
14026  bool dedicatedAllocationExtensionEnabled,
14027  bool bindMemory2ExtensionEnabled)
14028 {
14029  fprintf(m_File, "Config,Begin\n");
14030 
14031  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
14032  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
14033  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
14034  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
14035  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
14036  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
14037 
14038  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
14039  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
14040  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
14041 
14042  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
14043  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
14044  {
14045  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
14046  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
14047  }
14048  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
14049  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
14050  {
14051  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
14052  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
14053  }
14054 
14055  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
14056  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
14057 
14058  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
14059  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
14060  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
14061  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
14062  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
14063  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
14064  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
14065  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
14066  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14067 
14068  fprintf(m_File, "Config,End\n");
14069 }
14070 
14071 void VmaRecorder::GetBasicParams(CallParams& outParams)
14072 {
14073  outParams.threadId = GetCurrentThreadId();
14074 
14075  LARGE_INTEGER counter;
14076  QueryPerformanceCounter(&counter);
14077  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
14078 }
14079 
14080 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
14081 {
14082  if(count)
14083  {
14084  fprintf(m_File, "%p", pItems[0]);
14085  for(uint64_t i = 1; i < count; ++i)
14086  {
14087  fprintf(m_File, " %p", pItems[i]);
14088  }
14089  }
14090 }
14091 
14092 void VmaRecorder::Flush()
14093 {
14094  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
14095  {
14096  fflush(m_File);
14097  }
14098 }
14099 
14100 #endif // #if VMA_RECORDING_ENABLED
14101 
14103 // VmaAllocationObjectAllocator
14104 
14105 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
14106  m_Allocator(pAllocationCallbacks, 1024)
14107 {
14108 }
14109 
14110 VmaAllocation VmaAllocationObjectAllocator::Allocate()
14111 {
14112  VmaMutexLock mutexLock(m_Mutex);
14113  return m_Allocator.Alloc();
14114 }
14115 
14116 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
14117 {
14118  VmaMutexLock mutexLock(m_Mutex);
14119  m_Allocator.Free(hAlloc);
14120 }
14121 
14123 // VmaAllocator_T
14124 
14125 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
14126  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
14127  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
14128  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
14129  m_hDevice(pCreateInfo->device),
14130  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
14131  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
14132  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
14133  m_AllocationObjectAllocator(&m_AllocationCallbacks),
14134  m_PreferredLargeHeapBlockSize(0),
14135  m_PhysicalDevice(pCreateInfo->physicalDevice),
14136  m_CurrentFrameIndex(0),
14137  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
14138  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
14139  m_NextPoolId(0)
14141  ,m_pRecorder(VMA_NULL)
14142 #endif
14143 {
14144  if(VMA_DEBUG_DETECT_CORRUPTION)
14145  {
14146  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
14147  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
14148  }
14149 
14150  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
14151 
14152 #if !(VMA_DEDICATED_ALLOCATION)
14154  {
14155  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14156  }
14157 #endif
14158 #if !(VMA_BIND_MEMORY2)
14159  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
14160  {
14161  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
14162  }
14163 #endif
14164 
14165  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
14166  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14167  memset(&m_MemProps, 0, sizeof(m_MemProps));
14168 
14169  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14170  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
14171 
14172  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14173  {
14174  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
14175  }
14176 
14177  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14178  {
14179  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14180  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14181  }
14182 
14183  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14184 
14185  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14186  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14187 
14188  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14189  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14190  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14191  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14192 
14193  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14194  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14195 
14196  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14197  {
14198  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14199  {
14200  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14201  if(limit != VK_WHOLE_SIZE)
14202  {
14203  m_HeapSizeLimit[heapIndex] = limit;
14204  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14205  {
14206  m_MemProps.memoryHeaps[heapIndex].size = limit;
14207  }
14208  }
14209  }
14210  }
14211 
14212  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14213  {
14214  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14215 
14216  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14217  this,
14218  VK_NULL_HANDLE, // hParentPool
14219  memTypeIndex,
14220  preferredBlockSize,
14221  0,
14222  SIZE_MAX,
14223  GetBufferImageGranularity(),
14224  pCreateInfo->frameInUseCount,
14225  false, // isCustomPool
14226  false, // explicitBlockSize
14227  false); // linearAlgorithm
14228  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14229  // becase minBlockCount is 0.
14230  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14231 
14232  }
14233 }
14234 
14235 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14236 {
14237  VkResult res = VK_SUCCESS;
14238 
14239  if(pCreateInfo->pRecordSettings != VMA_NULL &&
14240  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14241  {
14242 #if VMA_RECORDING_ENABLED
14243  m_pRecorder = vma_new(this, VmaRecorder)();
14244  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14245  if(res != VK_SUCCESS)
14246  {
14247  return res;
14248  }
14249  m_pRecorder->WriteConfiguration(
14250  m_PhysicalDeviceProperties,
14251  m_MemProps,
14252  m_UseKhrDedicatedAllocation,
14253  m_UseKhrBindMemory2);
14254  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14255 #else
14256  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14257  return VK_ERROR_FEATURE_NOT_PRESENT;
14258 #endif
14259  }
14260 
14261  return res;
14262 }
14263 
14264 VmaAllocator_T::~VmaAllocator_T()
14265 {
14266 #if VMA_RECORDING_ENABLED
14267  if(m_pRecorder != VMA_NULL)
14268  {
14269  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14270  vma_delete(this, m_pRecorder);
14271  }
14272 #endif
14273 
14274  VMA_ASSERT(m_Pools.empty());
14275 
14276  for(size_t i = GetMemoryTypeCount(); i--; )
14277  {
14278  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
14279  {
14280  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
14281  }
14282 
14283  vma_delete(this, m_pDedicatedAllocations[i]);
14284  vma_delete(this, m_pBlockVectors[i]);
14285  }
14286 }
14287 
14288 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14289 {
14290 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14291  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
14292  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
14293  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
14294  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
14295  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
14296  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
14297  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
14298  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
14299  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
14300  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
14301  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
14302  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
14303  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
14304  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
14305  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
14306  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
14307  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
14308 #if VMA_DEDICATED_ALLOCATION
14309  if(m_UseKhrDedicatedAllocation)
14310  {
14311  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14312  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14313  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14314  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14315  }
14316 #endif // #if VMA_DEDICATED_ALLOCATION
14317 #if VMA_BIND_MEMORY2
14318  if(m_UseKhrBindMemory2)
14319  {
14320  m_VulkanFunctions.vkBindBufferMemory2KHR =
14321  (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2KHR");
14322  m_VulkanFunctions.vkBindImageMemory2KHR =
14323  (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2KHR");
14324  }
14325 #endif // #if VMA_BIND_MEMORY2
14326 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14327 
14328 #define VMA_COPY_IF_NOT_NULL(funcName) \
14329  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14330 
14331  if(pVulkanFunctions != VMA_NULL)
14332  {
14333  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14334  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14335  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14336  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14337  VMA_COPY_IF_NOT_NULL(vkMapMemory);
14338  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14339  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14340  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14341  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14342  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14343  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14344  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14345  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14346  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14347  VMA_COPY_IF_NOT_NULL(vkCreateImage);
14348  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14349  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14350 #if VMA_DEDICATED_ALLOCATION
14351  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14352  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14353 #endif
14354 #if VMA_BIND_MEMORY2
14355  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
14356  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
14357 #endif
14358  }
14359 
14360 #undef VMA_COPY_IF_NOT_NULL
14361 
14362  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14363  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14364  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14365  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14366  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14367  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14368  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14369  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14370  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14371  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14372  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14373  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14374  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14375  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14376  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14377  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14378  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14379  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14380  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14381 #if VMA_DEDICATED_ALLOCATION
14382  if(m_UseKhrDedicatedAllocation)
14383  {
14384  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14385  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14386  }
14387 #endif
14388 #if VMA_BIND_MEMORY2
14389  if(m_UseKhrBindMemory2)
14390  {
14391  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
14392  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
14393  }
14394 #endif
14395 }
14396 
14397 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14398 {
14399  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14400  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14401  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14402  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
14403 }
14404 
14405 VkResult VmaAllocator_T::AllocateMemoryOfType(
14406  VkDeviceSize size,
14407  VkDeviceSize alignment,
14408  bool dedicatedAllocation,
14409  VkBuffer dedicatedBuffer,
14410  VkImage dedicatedImage,
14411  const VmaAllocationCreateInfo& createInfo,
14412  uint32_t memTypeIndex,
14413  VmaSuballocationType suballocType,
14414  size_t allocationCount,
14415  VmaAllocation* pAllocations)
14416 {
14417  VMA_ASSERT(pAllocations != VMA_NULL);
14418  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
14419 
14420  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14421 
14422  // If memory type is not HOST_VISIBLE, disable MAPPED.
14423  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14424  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14425  {
14426  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14427  }
14428 
14429  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14430  VMA_ASSERT(blockVector);
14431 
14432  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14433  bool preferDedicatedMemory =
14434  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14435  dedicatedAllocation ||
14436  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14437  size > preferredBlockSize / 2;
14438 
14439  if(preferDedicatedMemory &&
14440  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14441  finalCreateInfo.pool == VK_NULL_HANDLE)
14442  {
14444  }
14445 
14446  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14447  {
14448  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14449  {
14450  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14451  }
14452  else
14453  {
14454  return AllocateDedicatedMemory(
14455  size,
14456  suballocType,
14457  memTypeIndex,
14458  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14459  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14460  finalCreateInfo.pUserData,
14461  dedicatedBuffer,
14462  dedicatedImage,
14463  allocationCount,
14464  pAllocations);
14465  }
14466  }
14467  else
14468  {
14469  VkResult res = blockVector->Allocate(
14470  m_CurrentFrameIndex.load(),
14471  size,
14472  alignment,
14473  finalCreateInfo,
14474  suballocType,
14475  allocationCount,
14476  pAllocations);
14477  if(res == VK_SUCCESS)
14478  {
14479  return res;
14480  }
14481 
14482  // 5. Try dedicated memory.
14483  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14484  {
14485  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14486  }
14487  else
14488  {
14489  res = AllocateDedicatedMemory(
14490  size,
14491  suballocType,
14492  memTypeIndex,
14493  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14494  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14495  finalCreateInfo.pUserData,
14496  dedicatedBuffer,
14497  dedicatedImage,
14498  allocationCount,
14499  pAllocations);
14500  if(res == VK_SUCCESS)
14501  {
14502  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14503  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14504  return VK_SUCCESS;
14505  }
14506  else
14507  {
14508  // Everything failed: Return error code.
14509  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14510  return res;
14511  }
14512  }
14513  }
14514 }
14515 
14516 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14517  VkDeviceSize size,
14518  VmaSuballocationType suballocType,
14519  uint32_t memTypeIndex,
14520  bool map,
14521  bool isUserDataString,
14522  void* pUserData,
14523  VkBuffer dedicatedBuffer,
14524  VkImage dedicatedImage,
14525  size_t allocationCount,
14526  VmaAllocation* pAllocations)
14527 {
14528  VMA_ASSERT(allocationCount > 0 && pAllocations);
14529 
14530  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14531  allocInfo.memoryTypeIndex = memTypeIndex;
14532  allocInfo.allocationSize = size;
14533 
14534 #if VMA_DEDICATED_ALLOCATION
14535  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14536  if(m_UseKhrDedicatedAllocation)
14537  {
14538  if(dedicatedBuffer != VK_NULL_HANDLE)
14539  {
14540  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14541  dedicatedAllocInfo.buffer = dedicatedBuffer;
14542  allocInfo.pNext = &dedicatedAllocInfo;
14543  }
14544  else if(dedicatedImage != VK_NULL_HANDLE)
14545  {
14546  dedicatedAllocInfo.image = dedicatedImage;
14547  allocInfo.pNext = &dedicatedAllocInfo;
14548  }
14549  }
14550 #endif // #if VMA_DEDICATED_ALLOCATION
14551 
14552  size_t allocIndex;
14553  VkResult res = VK_SUCCESS;
14554  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14555  {
14556  res = AllocateDedicatedMemoryPage(
14557  size,
14558  suballocType,
14559  memTypeIndex,
14560  allocInfo,
14561  map,
14562  isUserDataString,
14563  pUserData,
14564  pAllocations + allocIndex);
14565  if(res != VK_SUCCESS)
14566  {
14567  break;
14568  }
14569  }
14570 
14571  if(res == VK_SUCCESS)
14572  {
14573  // Register them in m_pDedicatedAllocations.
14574  {
14575  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14576  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14577  VMA_ASSERT(pDedicatedAllocations);
14578  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14579  {
14580  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
14581  }
14582  }
14583 
14584  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14585  }
14586  else
14587  {
14588  // Free all already created allocations.
14589  while(allocIndex--)
14590  {
14591  VmaAllocation currAlloc = pAllocations[allocIndex];
14592  VkDeviceMemory hMemory = currAlloc->GetMemory();
14593 
14594  /*
14595  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14596  before vkFreeMemory.
14597 
14598  if(currAlloc->GetMappedData() != VMA_NULL)
14599  {
14600  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14601  }
14602  */
14603 
14604  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14605 
14606  currAlloc->SetUserData(this, VMA_NULL);
14607  currAlloc->Dtor();
14608  m_AllocationObjectAllocator.Free(currAlloc);
14609  }
14610 
14611  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14612  }
14613 
14614  return res;
14615 }
14616 
14617 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14618  VkDeviceSize size,
14619  VmaSuballocationType suballocType,
14620  uint32_t memTypeIndex,
14621  const VkMemoryAllocateInfo& allocInfo,
14622  bool map,
14623  bool isUserDataString,
14624  void* pUserData,
14625  VmaAllocation* pAllocation)
14626 {
14627  VkDeviceMemory hMemory = VK_NULL_HANDLE;
14628  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14629  if(res < 0)
14630  {
14631  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14632  return res;
14633  }
14634 
14635  void* pMappedData = VMA_NULL;
14636  if(map)
14637  {
14638  res = (*m_VulkanFunctions.vkMapMemory)(
14639  m_hDevice,
14640  hMemory,
14641  0,
14642  VK_WHOLE_SIZE,
14643  0,
14644  &pMappedData);
14645  if(res < 0)
14646  {
14647  VMA_DEBUG_LOG(" vkMapMemory FAILED");
14648  FreeVulkanMemory(memTypeIndex, size, hMemory);
14649  return res;
14650  }
14651  }
14652 
14653  *pAllocation = m_AllocationObjectAllocator.Allocate();
14654  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
14655  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14656  (*pAllocation)->SetUserData(this, pUserData);
14657  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14658  {
14659  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14660  }
14661 
14662  return VK_SUCCESS;
14663 }
14664 
14665 void VmaAllocator_T::GetBufferMemoryRequirements(
14666  VkBuffer hBuffer,
14667  VkMemoryRequirements& memReq,
14668  bool& requiresDedicatedAllocation,
14669  bool& prefersDedicatedAllocation) const
14670 {
14671 #if VMA_DEDICATED_ALLOCATION
14672  if(m_UseKhrDedicatedAllocation)
14673  {
14674  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14675  memReqInfo.buffer = hBuffer;
14676 
14677  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14678 
14679  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14680  memReq2.pNext = &memDedicatedReq;
14681 
14682  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14683 
14684  memReq = memReq2.memoryRequirements;
14685  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14686  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14687  }
14688  else
14689 #endif // #if VMA_DEDICATED_ALLOCATION
14690  {
14691  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14692  requiresDedicatedAllocation = false;
14693  prefersDedicatedAllocation = false;
14694  }
14695 }
14696 
14697 void VmaAllocator_T::GetImageMemoryRequirements(
14698  VkImage hImage,
14699  VkMemoryRequirements& memReq,
14700  bool& requiresDedicatedAllocation,
14701  bool& prefersDedicatedAllocation) const
14702 {
14703 #if VMA_DEDICATED_ALLOCATION
14704  if(m_UseKhrDedicatedAllocation)
14705  {
14706  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14707  memReqInfo.image = hImage;
14708 
14709  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14710 
14711  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14712  memReq2.pNext = &memDedicatedReq;
14713 
14714  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14715 
14716  memReq = memReq2.memoryRequirements;
14717  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14718  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14719  }
14720  else
14721 #endif // #if VMA_DEDICATED_ALLOCATION
14722  {
14723  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14724  requiresDedicatedAllocation = false;
14725  prefersDedicatedAllocation = false;
14726  }
14727 }
14728 
14729 VkResult VmaAllocator_T::AllocateMemory(
14730  const VkMemoryRequirements& vkMemReq,
14731  bool requiresDedicatedAllocation,
14732  bool prefersDedicatedAllocation,
14733  VkBuffer dedicatedBuffer,
14734  VkImage dedicatedImage,
14735  const VmaAllocationCreateInfo& createInfo,
14736  VmaSuballocationType suballocType,
14737  size_t allocationCount,
14738  VmaAllocation* pAllocations)
14739 {
14740  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14741 
14742  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14743 
14744  if(vkMemReq.size == 0)
14745  {
14746  return VK_ERROR_VALIDATION_FAILED_EXT;
14747  }
14748  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14749  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14750  {
14751  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14752  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14753  }
14754  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14756  {
14757  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14758  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14759  }
14760  if(requiresDedicatedAllocation)
14761  {
14762  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14763  {
14764  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14765  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14766  }
14767  if(createInfo.pool != VK_NULL_HANDLE)
14768  {
14769  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
14770  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14771  }
14772  }
14773  if((createInfo.pool != VK_NULL_HANDLE) &&
14774  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
14775  {
14776  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14777  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14778  }
14779 
14780  if(createInfo.pool != VK_NULL_HANDLE)
14781  {
14782  const VkDeviceSize alignmentForPool = VMA_MAX(
14783  vkMemReq.alignment,
14784  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
14785 
14786  VmaAllocationCreateInfo createInfoForPool = createInfo;
14787  // If memory type is not HOST_VISIBLE, disable MAPPED.
14788  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14789  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14790  {
14791  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14792  }
14793 
14794  return createInfo.pool->m_BlockVector.Allocate(
14795  m_CurrentFrameIndex.load(),
14796  vkMemReq.size,
14797  alignmentForPool,
14798  createInfoForPool,
14799  suballocType,
14800  allocationCount,
14801  pAllocations);
14802  }
14803  else
14804  {
14805  // Bit mask of memory Vulkan types acceptable for this allocation.
14806  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14807  uint32_t memTypeIndex = UINT32_MAX;
14808  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14809  if(res == VK_SUCCESS)
14810  {
14811  VkDeviceSize alignmentForMemType = VMA_MAX(
14812  vkMemReq.alignment,
14813  GetMemoryTypeMinAlignment(memTypeIndex));
14814 
14815  res = AllocateMemoryOfType(
14816  vkMemReq.size,
14817  alignmentForMemType,
14818  requiresDedicatedAllocation || prefersDedicatedAllocation,
14819  dedicatedBuffer,
14820  dedicatedImage,
14821  createInfo,
14822  memTypeIndex,
14823  suballocType,
14824  allocationCount,
14825  pAllocations);
14826  // Succeeded on first try.
14827  if(res == VK_SUCCESS)
14828  {
14829  return res;
14830  }
14831  // Allocation from this memory type failed. Try other compatible memory types.
14832  else
14833  {
14834  for(;;)
14835  {
14836  // Remove old memTypeIndex from list of possibilities.
14837  memoryTypeBits &= ~(1u << memTypeIndex);
14838  // Find alternative memTypeIndex.
14839  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14840  if(res == VK_SUCCESS)
14841  {
14842  alignmentForMemType = VMA_MAX(
14843  vkMemReq.alignment,
14844  GetMemoryTypeMinAlignment(memTypeIndex));
14845 
14846  res = AllocateMemoryOfType(
14847  vkMemReq.size,
14848  alignmentForMemType,
14849  requiresDedicatedAllocation || prefersDedicatedAllocation,
14850  dedicatedBuffer,
14851  dedicatedImage,
14852  createInfo,
14853  memTypeIndex,
14854  suballocType,
14855  allocationCount,
14856  pAllocations);
14857  // Allocation from this alternative memory type succeeded.
14858  if(res == VK_SUCCESS)
14859  {
14860  return res;
14861  }
14862  // else: Allocation from this memory type failed. Try next one - next loop iteration.
14863  }
14864  // No other matching memory type index could be found.
14865  else
14866  {
14867  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
14868  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14869  }
14870  }
14871  }
14872  }
14873  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
14874  else
14875  return res;
14876  }
14877 }
14878 
14879 void VmaAllocator_T::FreeMemory(
14880  size_t allocationCount,
14881  const VmaAllocation* pAllocations)
14882 {
14883  VMA_ASSERT(pAllocations);
14884 
14885  for(size_t allocIndex = allocationCount; allocIndex--; )
14886  {
14887  VmaAllocation allocation = pAllocations[allocIndex];
14888 
14889  if(allocation != VK_NULL_HANDLE)
14890  {
14891  if(TouchAllocation(allocation))
14892  {
14893  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14894  {
14895  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14896  }
14897 
14898  switch(allocation->GetType())
14899  {
14900  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14901  {
14902  VmaBlockVector* pBlockVector = VMA_NULL;
14903  VmaPool hPool = allocation->GetBlock()->GetParentPool();
14904  if(hPool != VK_NULL_HANDLE)
14905  {
14906  pBlockVector = &hPool->m_BlockVector;
14907  }
14908  else
14909  {
14910  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14911  pBlockVector = m_pBlockVectors[memTypeIndex];
14912  }
14913  pBlockVector->Free(allocation);
14914  }
14915  break;
14916  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14917  FreeDedicatedMemory(allocation);
14918  break;
14919  default:
14920  VMA_ASSERT(0);
14921  }
14922  }
14923 
14924  allocation->SetUserData(this, VMA_NULL);
14925  allocation->Dtor();
14926  m_AllocationObjectAllocator.Free(allocation);
14927  }
14928  }
14929 }
14930 
14931 VkResult VmaAllocator_T::ResizeAllocation(
14932  const VmaAllocation alloc,
14933  VkDeviceSize newSize)
14934 {
14935  // This function is deprecated and so it does nothing. It's left for backward compatibility.
14936  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14937  {
14938  return VK_ERROR_VALIDATION_FAILED_EXT;
14939  }
14940  if(newSize == alloc->GetSize())
14941  {
14942  return VK_SUCCESS;
14943  }
14944  return VK_ERROR_OUT_OF_POOL_MEMORY;
14945 }
14946 
14947 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
14948 {
14949  // Initialize.
14950  InitStatInfo(pStats->total);
14951  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
14952  InitStatInfo(pStats->memoryType[i]);
14953  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14954  InitStatInfo(pStats->memoryHeap[i]);
14955 
14956  // Process default pools.
14957  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14958  {
14959  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14960  VMA_ASSERT(pBlockVector);
14961  pBlockVector->AddStats(pStats);
14962  }
14963 
14964  // Process custom pools.
14965  {
14966  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14967  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14968  {
14969  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
14970  }
14971  }
14972 
14973  // Process dedicated allocations.
14974  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14975  {
14976  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14977  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14978  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
14979  VMA_ASSERT(pDedicatedAllocVector);
14980  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
14981  {
14982  VmaStatInfo allocationStatInfo;
14983  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
14984  VmaAddStatInfo(pStats->total, allocationStatInfo);
14985  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
14986  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
14987  }
14988  }
14989 
14990  // Postprocess.
14991  VmaPostprocessCalcStatInfo(pStats->total);
14992  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
14993  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
14994  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
14995  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
14996 }
14997 
14998 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
14999 
15000 VkResult VmaAllocator_T::DefragmentationBegin(
15001  const VmaDefragmentationInfo2& info,
15002  VmaDefragmentationStats* pStats,
15003  VmaDefragmentationContext* pContext)
15004 {
15005  if(info.pAllocationsChanged != VMA_NULL)
15006  {
15007  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
15008  }
15009 
15010  *pContext = vma_new(this, VmaDefragmentationContext_T)(
15011  this, m_CurrentFrameIndex.load(), info.flags, pStats);
15012 
15013  (*pContext)->AddPools(info.poolCount, info.pPools);
15014  (*pContext)->AddAllocations(
15016 
15017  VkResult res = (*pContext)->Defragment(
15020  info.commandBuffer, pStats);
15021 
15022  if(res != VK_NOT_READY)
15023  {
15024  vma_delete(this, *pContext);
15025  *pContext = VMA_NULL;
15026  }
15027 
15028  return res;
15029 }
15030 
15031 VkResult VmaAllocator_T::DefragmentationEnd(
15032  VmaDefragmentationContext context)
15033 {
15034  vma_delete(this, context);
15035  return VK_SUCCESS;
15036 }
15037 
15038 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
15039 {
15040  if(hAllocation->CanBecomeLost())
15041  {
15042  /*
15043  Warning: This is a carefully designed algorithm.
15044  Do not modify unless you really know what you're doing :)
15045  */
15046  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15047  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15048  for(;;)
15049  {
15050  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15051  {
15052  pAllocationInfo->memoryType = UINT32_MAX;
15053  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
15054  pAllocationInfo->offset = 0;
15055  pAllocationInfo->size = hAllocation->GetSize();
15056  pAllocationInfo->pMappedData = VMA_NULL;
15057  pAllocationInfo->pUserData = hAllocation->GetUserData();
15058  return;
15059  }
15060  else if(localLastUseFrameIndex == localCurrFrameIndex)
15061  {
15062  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15063  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15064  pAllocationInfo->offset = hAllocation->GetOffset();
15065  pAllocationInfo->size = hAllocation->GetSize();
15066  pAllocationInfo->pMappedData = VMA_NULL;
15067  pAllocationInfo->pUserData = hAllocation->GetUserData();
15068  return;
15069  }
15070  else // Last use time earlier than current time.
15071  {
15072  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15073  {
15074  localLastUseFrameIndex = localCurrFrameIndex;
15075  }
15076  }
15077  }
15078  }
15079  else
15080  {
15081 #if VMA_STATS_STRING_ENABLED
15082  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15083  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15084  for(;;)
15085  {
15086  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15087  if(localLastUseFrameIndex == localCurrFrameIndex)
15088  {
15089  break;
15090  }
15091  else // Last use time earlier than current time.
15092  {
15093  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15094  {
15095  localLastUseFrameIndex = localCurrFrameIndex;
15096  }
15097  }
15098  }
15099 #endif
15100 
15101  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15102  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15103  pAllocationInfo->offset = hAllocation->GetOffset();
15104  pAllocationInfo->size = hAllocation->GetSize();
15105  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
15106  pAllocationInfo->pUserData = hAllocation->GetUserData();
15107  }
15108 }
15109 
15110 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
15111 {
15112  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
15113  if(hAllocation->CanBecomeLost())
15114  {
15115  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15116  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15117  for(;;)
15118  {
15119  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15120  {
15121  return false;
15122  }
15123  else if(localLastUseFrameIndex == localCurrFrameIndex)
15124  {
15125  return true;
15126  }
15127  else // Last use time earlier than current time.
15128  {
15129  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15130  {
15131  localLastUseFrameIndex = localCurrFrameIndex;
15132  }
15133  }
15134  }
15135  }
15136  else
15137  {
15138 #if VMA_STATS_STRING_ENABLED
15139  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15140  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15141  for(;;)
15142  {
15143  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15144  if(localLastUseFrameIndex == localCurrFrameIndex)
15145  {
15146  break;
15147  }
15148  else // Last use time earlier than current time.
15149  {
15150  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15151  {
15152  localLastUseFrameIndex = localCurrFrameIndex;
15153  }
15154  }
15155  }
15156 #endif
15157 
15158  return true;
15159  }
15160 }
15161 
15162 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
15163 {
15164  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
15165 
15166  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
15167 
15168  if(newCreateInfo.maxBlockCount == 0)
15169  {
15170  newCreateInfo.maxBlockCount = SIZE_MAX;
15171  }
15172  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
15173  {
15174  return VK_ERROR_INITIALIZATION_FAILED;
15175  }
15176 
15177  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
15178 
15179  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
15180 
15181  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15182  if(res != VK_SUCCESS)
15183  {
15184  vma_delete(this, *pPool);
15185  *pPool = VMA_NULL;
15186  return res;
15187  }
15188 
15189  // Add to m_Pools.
15190  {
15191  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15192  (*pPool)->SetId(m_NextPoolId++);
15193  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
15194  }
15195 
15196  return VK_SUCCESS;
15197 }
15198 
15199 void VmaAllocator_T::DestroyPool(VmaPool pool)
15200 {
15201  // Remove from m_Pools.
15202  {
15203  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15204  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15205  VMA_ASSERT(success && "Pool not found in Allocator.");
15206  }
15207 
15208  vma_delete(this, pool);
15209 }
15210 
15211 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15212 {
15213  pool->m_BlockVector.GetPoolStats(pPoolStats);
15214 }
15215 
15216 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15217 {
15218  m_CurrentFrameIndex.store(frameIndex);
15219 }
15220 
15221 void VmaAllocator_T::MakePoolAllocationsLost(
15222  VmaPool hPool,
15223  size_t* pLostAllocationCount)
15224 {
15225  hPool->m_BlockVector.MakePoolAllocationsLost(
15226  m_CurrentFrameIndex.load(),
15227  pLostAllocationCount);
15228 }
15229 
15230 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15231 {
15232  return hPool->m_BlockVector.CheckCorruption();
15233 }
15234 
15235 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15236 {
15237  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15238 
15239  // Process default pools.
15240  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15241  {
15242  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15243  {
15244  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15245  VMA_ASSERT(pBlockVector);
15246  VkResult localRes = pBlockVector->CheckCorruption();
15247  switch(localRes)
15248  {
15249  case VK_ERROR_FEATURE_NOT_PRESENT:
15250  break;
15251  case VK_SUCCESS:
15252  finalRes = VK_SUCCESS;
15253  break;
15254  default:
15255  return localRes;
15256  }
15257  }
15258  }
15259 
15260  // Process custom pools.
15261  {
15262  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15263  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15264  {
15265  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15266  {
15267  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15268  switch(localRes)
15269  {
15270  case VK_ERROR_FEATURE_NOT_PRESENT:
15271  break;
15272  case VK_SUCCESS:
15273  finalRes = VK_SUCCESS;
15274  break;
15275  default:
15276  return localRes;
15277  }
15278  }
15279  }
15280  }
15281 
15282  return finalRes;
15283 }
15284 
15285 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15286 {
15287  *pAllocation = m_AllocationObjectAllocator.Allocate();
15288  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
15289  (*pAllocation)->InitLost();
15290 }
15291 
15292 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15293 {
15294  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15295 
15296  VkResult res;
15297  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15298  {
15299  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15300  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
15301  {
15302  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15303  if(res == VK_SUCCESS)
15304  {
15305  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
15306  }
15307  }
15308  else
15309  {
15310  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
15311  }
15312  }
15313  else
15314  {
15315  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15316  }
15317 
15318  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15319  {
15320  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15321  }
15322 
15323  return res;
15324 }
15325 
15326 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15327 {
15328  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15329  {
15330  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15331  }
15332 
15333  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15334 
15335  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15336  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15337  {
15338  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15339  m_HeapSizeLimit[heapIndex] += size;
15340  }
15341 }
15342 
15343 VkResult VmaAllocator_T::BindVulkanBuffer(
15344  VkDeviceMemory memory,
15345  VkDeviceSize memoryOffset,
15346  VkBuffer buffer,
15347  const void* pNext)
15348 {
15349  if(pNext != VMA_NULL)
15350  {
15351 #if VMA_BIND_MEMORY2
15352  if(m_UseKhrBindMemory2 && m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
15353  {
15354  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
15355  bindBufferMemoryInfo.pNext = pNext;
15356  bindBufferMemoryInfo.buffer = buffer;
15357  bindBufferMemoryInfo.memory = memory;
15358  bindBufferMemoryInfo.memoryOffset = memoryOffset;
15359  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
15360  }
15361  else
15362 #endif // #if VMA_BIND_MEMORY2
15363  {
15364  return VK_ERROR_EXTENSION_NOT_PRESENT;
15365  }
15366  }
15367  else
15368  {
15369  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
15370  }
15371 }
15372 
15373 VkResult VmaAllocator_T::BindVulkanImage(
15374  VkDeviceMemory memory,
15375  VkDeviceSize memoryOffset,
15376  VkImage image,
15377  const void* pNext)
15378 {
15379  if(pNext != VMA_NULL)
15380  {
15381 #if VMA_BIND_MEMORY2
15382  if(m_UseKhrBindMemory2 && m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
15383  {
15384  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
15385  bindBufferMemoryInfo.pNext = pNext;
15386  bindBufferMemoryInfo.image = image;
15387  bindBufferMemoryInfo.memory = memory;
15388  bindBufferMemoryInfo.memoryOffset = memoryOffset;
15389  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
15390  }
15391  else
15392 #endif // #if VMA_BIND_MEMORY2
15393  {
15394  return VK_ERROR_EXTENSION_NOT_PRESENT;
15395  }
15396  }
15397  else
15398  {
15399  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
15400  }
15401 }
15402 
15403 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15404 {
15405  if(hAllocation->CanBecomeLost())
15406  {
15407  return VK_ERROR_MEMORY_MAP_FAILED;
15408  }
15409 
15410  switch(hAllocation->GetType())
15411  {
15412  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15413  {
15414  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15415  char *pBytes = VMA_NULL;
15416  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15417  if(res == VK_SUCCESS)
15418  {
15419  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15420  hAllocation->BlockAllocMap();
15421  }
15422  return res;
15423  }
15424  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15425  return hAllocation->DedicatedAllocMap(this, ppData);
15426  default:
15427  VMA_ASSERT(0);
15428  return VK_ERROR_MEMORY_MAP_FAILED;
15429  }
15430 }
15431 
15432 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15433 {
15434  switch(hAllocation->GetType())
15435  {
15436  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15437  {
15438  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15439  hAllocation->BlockAllocUnmap();
15440  pBlock->Unmap(this, 1);
15441  }
15442  break;
15443  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15444  hAllocation->DedicatedAllocUnmap(this);
15445  break;
15446  default:
15447  VMA_ASSERT(0);
15448  }
15449 }
15450 
15451 VkResult VmaAllocator_T::BindBufferMemory(
15452  VmaAllocation hAllocation,
15453  VkDeviceSize allocationLocalOffset,
15454  VkBuffer hBuffer,
15455  const void* pNext)
15456 {
15457  VkResult res = VK_SUCCESS;
15458  switch(hAllocation->GetType())
15459  {
15460  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15461  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
15462  break;
15463  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15464  {
15465  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15466  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
15467  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
15468  break;
15469  }
15470  default:
15471  VMA_ASSERT(0);
15472  }
15473  return res;
15474 }
15475 
15476 VkResult VmaAllocator_T::BindImageMemory(
15477  VmaAllocation hAllocation,
15478  VkDeviceSize allocationLocalOffset,
15479  VkImage hImage,
15480  const void* pNext)
15481 {
15482  VkResult res = VK_SUCCESS;
15483  switch(hAllocation->GetType())
15484  {
15485  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15486  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
15487  break;
15488  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15489  {
15490  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15491  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
15492  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
15493  break;
15494  }
15495  default:
15496  VMA_ASSERT(0);
15497  }
15498  return res;
15499 }
15500 
15501 void VmaAllocator_T::FlushOrInvalidateAllocation(
15502  VmaAllocation hAllocation,
15503  VkDeviceSize offset, VkDeviceSize size,
15504  VMA_CACHE_OPERATION op)
15505 {
15506  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
15507  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15508  {
15509  const VkDeviceSize allocationSize = hAllocation->GetSize();
15510  VMA_ASSERT(offset <= allocationSize);
15511 
15512  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15513 
15514  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
15515  memRange.memory = hAllocation->GetMemory();
15516 
15517  switch(hAllocation->GetType())
15518  {
15519  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15520  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15521  if(size == VK_WHOLE_SIZE)
15522  {
15523  memRange.size = allocationSize - memRange.offset;
15524  }
15525  else
15526  {
15527  VMA_ASSERT(offset + size <= allocationSize);
15528  memRange.size = VMA_MIN(
15529  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
15530  allocationSize - memRange.offset);
15531  }
15532  break;
15533 
15534  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15535  {
15536  // 1. Still within this allocation.
15537  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15538  if(size == VK_WHOLE_SIZE)
15539  {
15540  size = allocationSize - offset;
15541  }
15542  else
15543  {
15544  VMA_ASSERT(offset + size <= allocationSize);
15545  }
15546  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
15547 
15548  // 2. Adjust to whole block.
15549  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
15550  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15551  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
15552  memRange.offset += allocationOffset;
15553  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
15554 
15555  break;
15556  }
15557 
15558  default:
15559  VMA_ASSERT(0);
15560  }
15561 
15562  switch(op)
15563  {
15564  case VMA_CACHE_FLUSH:
15565  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15566  break;
15567  case VMA_CACHE_INVALIDATE:
15568  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15569  break;
15570  default:
15571  VMA_ASSERT(0);
15572  }
15573  }
15574  // else: Just ignore this call.
15575 }
15576 
15577 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
15578 {
15579  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15580 
15581  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15582  {
15583  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15584  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15585  VMA_ASSERT(pDedicatedAllocations);
15586  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
15587  VMA_ASSERT(success);
15588  }
15589 
15590  VkDeviceMemory hMemory = allocation->GetMemory();
15591 
15592  /*
15593  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15594  before vkFreeMemory.
15595 
15596  if(allocation->GetMappedData() != VMA_NULL)
15597  {
15598  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15599  }
15600  */
15601 
15602  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15603 
15604  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15605 }
15606 
15607 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
15608 {
15609  VkBufferCreateInfo dummyBufCreateInfo;
15610  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
15611 
15612  uint32_t memoryTypeBits = 0;
15613 
15614  // Create buffer.
15615  VkBuffer buf = VK_NULL_HANDLE;
15616  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
15617  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
15618  if(res == VK_SUCCESS)
15619  {
15620  // Query for supported memory types.
15621  VkMemoryRequirements memReq;
15622  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
15623  memoryTypeBits = memReq.memoryTypeBits;
15624 
15625  // Destroy buffer.
15626  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
15627  }
15628 
15629  return memoryTypeBits;
15630 }
15631 
15632 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15633 {
15634  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15635  !hAllocation->CanBecomeLost() &&
15636  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15637  {
15638  void* pData = VMA_NULL;
15639  VkResult res = Map(hAllocation, &pData);
15640  if(res == VK_SUCCESS)
15641  {
15642  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15643  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15644  Unmap(hAllocation);
15645  }
15646  else
15647  {
15648  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15649  }
15650  }
15651 }
15652 
15653 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
15654 {
15655  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
15656  if(memoryTypeBits == UINT32_MAX)
15657  {
15658  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
15659  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
15660  }
15661  return memoryTypeBits;
15662 }
15663 
15664 #if VMA_STATS_STRING_ENABLED
15665 
15666 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15667 {
15668  bool dedicatedAllocationsStarted = false;
15669  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15670  {
15671  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15672  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15673  VMA_ASSERT(pDedicatedAllocVector);
15674  if(pDedicatedAllocVector->empty() == false)
15675  {
15676  if(dedicatedAllocationsStarted == false)
15677  {
15678  dedicatedAllocationsStarted = true;
15679  json.WriteString("DedicatedAllocations");
15680  json.BeginObject();
15681  }
15682 
15683  json.BeginString("Type ");
15684  json.ContinueString(memTypeIndex);
15685  json.EndString();
15686 
15687  json.BeginArray();
15688 
15689  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15690  {
15691  json.BeginObject(true);
15692  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
15693  hAlloc->PrintParameters(json);
15694  json.EndObject();
15695  }
15696 
15697  json.EndArray();
15698  }
15699  }
15700  if(dedicatedAllocationsStarted)
15701  {
15702  json.EndObject();
15703  }
15704 
15705  {
15706  bool allocationsStarted = false;
15707  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15708  {
15709  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
15710  {
15711  if(allocationsStarted == false)
15712  {
15713  allocationsStarted = true;
15714  json.WriteString("DefaultPools");
15715  json.BeginObject();
15716  }
15717 
15718  json.BeginString("Type ");
15719  json.ContinueString(memTypeIndex);
15720  json.EndString();
15721 
15722  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15723  }
15724  }
15725  if(allocationsStarted)
15726  {
15727  json.EndObject();
15728  }
15729  }
15730 
15731  // Custom pools
15732  {
15733  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15734  const size_t poolCount = m_Pools.size();
15735  if(poolCount > 0)
15736  {
15737  json.WriteString("Pools");
15738  json.BeginObject();
15739  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15740  {
15741  json.BeginString();
15742  json.ContinueString(m_Pools[poolIndex]->GetId());
15743  json.EndString();
15744 
15745  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15746  }
15747  json.EndObject();
15748  }
15749  }
15750 }
15751 
15752 #endif // #if VMA_STATS_STRING_ENABLED
15753 
15755 // Public interface
15756 
15757 VkResult vmaCreateAllocator(
15758  const VmaAllocatorCreateInfo* pCreateInfo,
15759  VmaAllocator* pAllocator)
15760 {
15761  VMA_ASSERT(pCreateInfo && pAllocator);
15762  VMA_DEBUG_LOG("vmaCreateAllocator");
15763  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
15764  return (*pAllocator)->Init(pCreateInfo);
15765 }
15766 
15767 void vmaDestroyAllocator(
15768  VmaAllocator allocator)
15769 {
15770  if(allocator != VK_NULL_HANDLE)
15771  {
15772  VMA_DEBUG_LOG("vmaDestroyAllocator");
15773  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15774  vma_delete(&allocationCallbacks, allocator);
15775  }
15776 }
15777 
15779  VmaAllocator allocator,
15780  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15781 {
15782  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15783  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15784 }
15785 
15787  VmaAllocator allocator,
15788  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15789 {
15790  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15791  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15792 }
15793 
15795  VmaAllocator allocator,
15796  uint32_t memoryTypeIndex,
15797  VkMemoryPropertyFlags* pFlags)
15798 {
15799  VMA_ASSERT(allocator && pFlags);
15800  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15801  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15802 }
15803 
15805  VmaAllocator allocator,
15806  uint32_t frameIndex)
15807 {
15808  VMA_ASSERT(allocator);
15809  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15810 
15811  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15812 
15813  allocator->SetCurrentFrameIndex(frameIndex);
15814 }
15815 
15816 void vmaCalculateStats(
15817  VmaAllocator allocator,
15818  VmaStats* pStats)
15819 {
15820  VMA_ASSERT(allocator && pStats);
15821  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15822  allocator->CalculateStats(pStats);
15823 }
15824 
15825 #if VMA_STATS_STRING_ENABLED
15826 
15827 void vmaBuildStatsString(
15828  VmaAllocator allocator,
15829  char** ppStatsString,
15830  VkBool32 detailedMap)
15831 {
15832  VMA_ASSERT(allocator && ppStatsString);
15833  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15834 
15835  VmaStringBuilder sb(allocator);
15836  {
15837  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15838  json.BeginObject();
15839 
15840  VmaStats stats;
15841  allocator->CalculateStats(&stats);
15842 
15843  json.WriteString("Total");
15844  VmaPrintStatInfo(json, stats.total);
15845 
15846  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15847  {
15848  json.BeginString("Heap ");
15849  json.ContinueString(heapIndex);
15850  json.EndString();
15851  json.BeginObject();
15852 
15853  json.WriteString("Size");
15854  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15855 
15856  json.WriteString("Flags");
15857  json.BeginArray(true);
15858  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15859  {
15860  json.WriteString("DEVICE_LOCAL");
15861  }
15862  json.EndArray();
15863 
15864  if(stats.memoryHeap[heapIndex].blockCount > 0)
15865  {
15866  json.WriteString("Stats");
15867  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
15868  }
15869 
15870  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15871  {
15872  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15873  {
15874  json.BeginString("Type ");
15875  json.ContinueString(typeIndex);
15876  json.EndString();
15877 
15878  json.BeginObject();
15879 
15880  json.WriteString("Flags");
15881  json.BeginArray(true);
15882  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15883  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15884  {
15885  json.WriteString("DEVICE_LOCAL");
15886  }
15887  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15888  {
15889  json.WriteString("HOST_VISIBLE");
15890  }
15891  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15892  {
15893  json.WriteString("HOST_COHERENT");
15894  }
15895  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15896  {
15897  json.WriteString("HOST_CACHED");
15898  }
15899  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15900  {
15901  json.WriteString("LAZILY_ALLOCATED");
15902  }
15903  json.EndArray();
15904 
15905  if(stats.memoryType[typeIndex].blockCount > 0)
15906  {
15907  json.WriteString("Stats");
15908  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
15909  }
15910 
15911  json.EndObject();
15912  }
15913  }
15914 
15915  json.EndObject();
15916  }
15917  if(detailedMap == VK_TRUE)
15918  {
15919  allocator->PrintDetailedMap(json);
15920  }
15921 
15922  json.EndObject();
15923  }
15924 
15925  const size_t len = sb.GetLength();
15926  char* const pChars = vma_new_array(allocator, char, len + 1);
15927  if(len > 0)
15928  {
15929  memcpy(pChars, sb.GetData(), len);
15930  }
15931  pChars[len] = '\0';
15932  *ppStatsString = pChars;
15933 }
15934 
15935 void vmaFreeStatsString(
15936  VmaAllocator allocator,
15937  char* pStatsString)
15938 {
15939  if(pStatsString != VMA_NULL)
15940  {
15941  VMA_ASSERT(allocator);
15942  size_t len = strlen(pStatsString);
15943  vma_delete_array(allocator, pStatsString, len + 1);
15944  }
15945 }
15946 
15947 #endif // #if VMA_STATS_STRING_ENABLED
15948 
15949 /*
15950 This function is not protected by any mutex because it just reads immutable data.
15951 */
15952 VkResult vmaFindMemoryTypeIndex(
15953  VmaAllocator allocator,
15954  uint32_t memoryTypeBits,
15955  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15956  uint32_t* pMemoryTypeIndex)
15957 {
15958  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15959  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15960  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15961 
15962  if(pAllocationCreateInfo->memoryTypeBits != 0)
15963  {
15964  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
15965  }
15966 
15967  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
15968  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
15969 
15970  // Convert usage to requiredFlags and preferredFlags.
15971  switch(pAllocationCreateInfo->usage)
15972  {
15974  break;
15976  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15977  {
15978  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15979  }
15980  break;
15982  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15983  break;
15985  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15986  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15987  {
15988  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15989  }
15990  break;
15992  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15993  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
15994  break;
15995  default:
15996  break;
15997  }
15998 
15999  *pMemoryTypeIndex = UINT32_MAX;
16000  uint32_t minCost = UINT32_MAX;
16001  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
16002  memTypeIndex < allocator->GetMemoryTypeCount();
16003  ++memTypeIndex, memTypeBit <<= 1)
16004  {
16005  // This memory type is acceptable according to memoryTypeBits bitmask.
16006  if((memTypeBit & memoryTypeBits) != 0)
16007  {
16008  const VkMemoryPropertyFlags currFlags =
16009  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
16010  // This memory type contains requiredFlags.
16011  if((requiredFlags & ~currFlags) == 0)
16012  {
16013  // Calculate cost as number of bits from preferredFlags not present in this memory type.
16014  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
16015  // Remember memory type with lowest cost.
16016  if(currCost < minCost)
16017  {
16018  *pMemoryTypeIndex = memTypeIndex;
16019  if(currCost == 0)
16020  {
16021  return VK_SUCCESS;
16022  }
16023  minCost = currCost;
16024  }
16025  }
16026  }
16027  }
16028  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
16029 }
16030 
16032  VmaAllocator allocator,
16033  const VkBufferCreateInfo* pBufferCreateInfo,
16034  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16035  uint32_t* pMemoryTypeIndex)
16036 {
16037  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16038  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
16039  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16040  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16041 
16042  const VkDevice hDev = allocator->m_hDevice;
16043  VkBuffer hBuffer = VK_NULL_HANDLE;
16044  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
16045  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
16046  if(res == VK_SUCCESS)
16047  {
16048  VkMemoryRequirements memReq = {};
16049  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
16050  hDev, hBuffer, &memReq);
16051 
16052  res = vmaFindMemoryTypeIndex(
16053  allocator,
16054  memReq.memoryTypeBits,
16055  pAllocationCreateInfo,
16056  pMemoryTypeIndex);
16057 
16058  allocator->GetVulkanFunctions().vkDestroyBuffer(
16059  hDev, hBuffer, allocator->GetAllocationCallbacks());
16060  }
16061  return res;
16062 }
16063 
16065  VmaAllocator allocator,
16066  const VkImageCreateInfo* pImageCreateInfo,
16067  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16068  uint32_t* pMemoryTypeIndex)
16069 {
16070  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16071  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
16072  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16073  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16074 
16075  const VkDevice hDev = allocator->m_hDevice;
16076  VkImage hImage = VK_NULL_HANDLE;
16077  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
16078  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
16079  if(res == VK_SUCCESS)
16080  {
16081  VkMemoryRequirements memReq = {};
16082  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
16083  hDev, hImage, &memReq);
16084 
16085  res = vmaFindMemoryTypeIndex(
16086  allocator,
16087  memReq.memoryTypeBits,
16088  pAllocationCreateInfo,
16089  pMemoryTypeIndex);
16090 
16091  allocator->GetVulkanFunctions().vkDestroyImage(
16092  hDev, hImage, allocator->GetAllocationCallbacks());
16093  }
16094  return res;
16095 }
16096 
16097 VkResult vmaCreatePool(
16098  VmaAllocator allocator,
16099  const VmaPoolCreateInfo* pCreateInfo,
16100  VmaPool* pPool)
16101 {
16102  VMA_ASSERT(allocator && pCreateInfo && pPool);
16103 
16104  VMA_DEBUG_LOG("vmaCreatePool");
16105 
16106  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16107 
16108  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
16109 
16110 #if VMA_RECORDING_ENABLED
16111  if(allocator->GetRecorder() != VMA_NULL)
16112  {
16113  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
16114  }
16115 #endif
16116 
16117  return res;
16118 }
16119 
16120 void vmaDestroyPool(
16121  VmaAllocator allocator,
16122  VmaPool pool)
16123 {
16124  VMA_ASSERT(allocator);
16125 
16126  if(pool == VK_NULL_HANDLE)
16127  {
16128  return;
16129  }
16130 
16131  VMA_DEBUG_LOG("vmaDestroyPool");
16132 
16133  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16134 
16135 #if VMA_RECORDING_ENABLED
16136  if(allocator->GetRecorder() != VMA_NULL)
16137  {
16138  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
16139  }
16140 #endif
16141 
16142  allocator->DestroyPool(pool);
16143 }
16144 
16145 void vmaGetPoolStats(
16146  VmaAllocator allocator,
16147  VmaPool pool,
16148  VmaPoolStats* pPoolStats)
16149 {
16150  VMA_ASSERT(allocator && pool && pPoolStats);
16151 
16152  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16153 
16154  allocator->GetPoolStats(pool, pPoolStats);
16155 }
16156 
16158  VmaAllocator allocator,
16159  VmaPool pool,
16160  size_t* pLostAllocationCount)
16161 {
16162  VMA_ASSERT(allocator && pool);
16163 
16164  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16165 
16166 #if VMA_RECORDING_ENABLED
16167  if(allocator->GetRecorder() != VMA_NULL)
16168  {
16169  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
16170  }
16171 #endif
16172 
16173  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
16174 }
16175 
16176 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
16177 {
16178  VMA_ASSERT(allocator && pool);
16179 
16180  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16181 
16182  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
16183 
16184  return allocator->CheckPoolCorruption(pool);
16185 }
16186 
16187 VkResult vmaAllocateMemory(
16188  VmaAllocator allocator,
16189  const VkMemoryRequirements* pVkMemoryRequirements,
16190  const VmaAllocationCreateInfo* pCreateInfo,
16191  VmaAllocation* pAllocation,
16192  VmaAllocationInfo* pAllocationInfo)
16193 {
16194  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
16195 
16196  VMA_DEBUG_LOG("vmaAllocateMemory");
16197 
16198  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16199 
16200  VkResult result = allocator->AllocateMemory(
16201  *pVkMemoryRequirements,
16202  false, // requiresDedicatedAllocation
16203  false, // prefersDedicatedAllocation
16204  VK_NULL_HANDLE, // dedicatedBuffer
16205  VK_NULL_HANDLE, // dedicatedImage
16206  *pCreateInfo,
16207  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16208  1, // allocationCount
16209  pAllocation);
16210 
16211 #if VMA_RECORDING_ENABLED
16212  if(allocator->GetRecorder() != VMA_NULL)
16213  {
16214  allocator->GetRecorder()->RecordAllocateMemory(
16215  allocator->GetCurrentFrameIndex(),
16216  *pVkMemoryRequirements,
16217  *pCreateInfo,
16218  *pAllocation);
16219  }
16220 #endif
16221 
16222  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16223  {
16224  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16225  }
16226 
16227  return result;
16228 }
16229 
16230 VkResult vmaAllocateMemoryPages(
16231  VmaAllocator allocator,
16232  const VkMemoryRequirements* pVkMemoryRequirements,
16233  const VmaAllocationCreateInfo* pCreateInfo,
16234  size_t allocationCount,
16235  VmaAllocation* pAllocations,
16236  VmaAllocationInfo* pAllocationInfo)
16237 {
16238  if(allocationCount == 0)
16239  {
16240  return VK_SUCCESS;
16241  }
16242 
16243  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
16244 
16245  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
16246 
16247  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16248 
16249  VkResult result = allocator->AllocateMemory(
16250  *pVkMemoryRequirements,
16251  false, // requiresDedicatedAllocation
16252  false, // prefersDedicatedAllocation
16253  VK_NULL_HANDLE, // dedicatedBuffer
16254  VK_NULL_HANDLE, // dedicatedImage
16255  *pCreateInfo,
16256  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16257  allocationCount,
16258  pAllocations);
16259 
16260 #if VMA_RECORDING_ENABLED
16261  if(allocator->GetRecorder() != VMA_NULL)
16262  {
16263  allocator->GetRecorder()->RecordAllocateMemoryPages(
16264  allocator->GetCurrentFrameIndex(),
16265  *pVkMemoryRequirements,
16266  *pCreateInfo,
16267  (uint64_t)allocationCount,
16268  pAllocations);
16269  }
16270 #endif
16271 
16272  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16273  {
16274  for(size_t i = 0; i < allocationCount; ++i)
16275  {
16276  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16277  }
16278  }
16279 
16280  return result;
16281 }
16282 
16284  VmaAllocator allocator,
16285  VkBuffer buffer,
16286  const VmaAllocationCreateInfo* pCreateInfo,
16287  VmaAllocation* pAllocation,
16288  VmaAllocationInfo* pAllocationInfo)
16289 {
16290  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16291 
16292  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16293 
16294  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16295 
16296  VkMemoryRequirements vkMemReq = {};
16297  bool requiresDedicatedAllocation = false;
16298  bool prefersDedicatedAllocation = false;
16299  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16300  requiresDedicatedAllocation,
16301  prefersDedicatedAllocation);
16302 
16303  VkResult result = allocator->AllocateMemory(
16304  vkMemReq,
16305  requiresDedicatedAllocation,
16306  prefersDedicatedAllocation,
16307  buffer, // dedicatedBuffer
16308  VK_NULL_HANDLE, // dedicatedImage
16309  *pCreateInfo,
16310  VMA_SUBALLOCATION_TYPE_BUFFER,
16311  1, // allocationCount
16312  pAllocation);
16313 
16314 #if VMA_RECORDING_ENABLED
16315  if(allocator->GetRecorder() != VMA_NULL)
16316  {
16317  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16318  allocator->GetCurrentFrameIndex(),
16319  vkMemReq,
16320  requiresDedicatedAllocation,
16321  prefersDedicatedAllocation,
16322  *pCreateInfo,
16323  *pAllocation);
16324  }
16325 #endif
16326 
16327  if(pAllocationInfo && result == VK_SUCCESS)
16328  {
16329  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16330  }
16331 
16332  return result;
16333 }
16334 
16335 VkResult vmaAllocateMemoryForImage(
16336  VmaAllocator allocator,
16337  VkImage image,
16338  const VmaAllocationCreateInfo* pCreateInfo,
16339  VmaAllocation* pAllocation,
16340  VmaAllocationInfo* pAllocationInfo)
16341 {
16342  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16343 
16344  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
16345 
16346  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16347 
16348  VkMemoryRequirements vkMemReq = {};
16349  bool requiresDedicatedAllocation = false;
16350  bool prefersDedicatedAllocation = false;
16351  allocator->GetImageMemoryRequirements(image, vkMemReq,
16352  requiresDedicatedAllocation, prefersDedicatedAllocation);
16353 
16354  VkResult result = allocator->AllocateMemory(
16355  vkMemReq,
16356  requiresDedicatedAllocation,
16357  prefersDedicatedAllocation,
16358  VK_NULL_HANDLE, // dedicatedBuffer
16359  image, // dedicatedImage
16360  *pCreateInfo,
16361  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16362  1, // allocationCount
16363  pAllocation);
16364 
16365 #if VMA_RECORDING_ENABLED
16366  if(allocator->GetRecorder() != VMA_NULL)
16367  {
16368  allocator->GetRecorder()->RecordAllocateMemoryForImage(
16369  allocator->GetCurrentFrameIndex(),
16370  vkMemReq,
16371  requiresDedicatedAllocation,
16372  prefersDedicatedAllocation,
16373  *pCreateInfo,
16374  *pAllocation);
16375  }
16376 #endif
16377 
16378  if(pAllocationInfo && result == VK_SUCCESS)
16379  {
16380  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16381  }
16382 
16383  return result;
16384 }
16385 
16386 void vmaFreeMemory(
16387  VmaAllocator allocator,
16388  VmaAllocation allocation)
16389 {
16390  VMA_ASSERT(allocator);
16391 
16392  if(allocation == VK_NULL_HANDLE)
16393  {
16394  return;
16395  }
16396 
16397  VMA_DEBUG_LOG("vmaFreeMemory");
16398 
16399  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16400 
16401 #if VMA_RECORDING_ENABLED
16402  if(allocator->GetRecorder() != VMA_NULL)
16403  {
16404  allocator->GetRecorder()->RecordFreeMemory(
16405  allocator->GetCurrentFrameIndex(),
16406  allocation);
16407  }
16408 #endif
16409 
16410  allocator->FreeMemory(
16411  1, // allocationCount
16412  &allocation);
16413 }
16414 
16415 void vmaFreeMemoryPages(
16416  VmaAllocator allocator,
16417  size_t allocationCount,
16418  VmaAllocation* pAllocations)
16419 {
16420  if(allocationCount == 0)
16421  {
16422  return;
16423  }
16424 
16425  VMA_ASSERT(allocator);
16426 
16427  VMA_DEBUG_LOG("vmaFreeMemoryPages");
16428 
16429  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16430 
16431 #if VMA_RECORDING_ENABLED
16432  if(allocator->GetRecorder() != VMA_NULL)
16433  {
16434  allocator->GetRecorder()->RecordFreeMemoryPages(
16435  allocator->GetCurrentFrameIndex(),
16436  (uint64_t)allocationCount,
16437  pAllocations);
16438  }
16439 #endif
16440 
16441  allocator->FreeMemory(allocationCount, pAllocations);
16442 }
16443 
16444 VkResult vmaResizeAllocation(
16445  VmaAllocator allocator,
16446  VmaAllocation allocation,
16447  VkDeviceSize newSize)
16448 {
16449  VMA_ASSERT(allocator && allocation);
16450 
16451  VMA_DEBUG_LOG("vmaResizeAllocation");
16452 
16453  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16454 
16455  return allocator->ResizeAllocation(allocation, newSize);
16456 }
16457 
16459  VmaAllocator allocator,
16460  VmaAllocation allocation,
16461  VmaAllocationInfo* pAllocationInfo)
16462 {
16463  VMA_ASSERT(allocator && allocation && pAllocationInfo);
16464 
16465  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16466 
16467 #if VMA_RECORDING_ENABLED
16468  if(allocator->GetRecorder() != VMA_NULL)
16469  {
16470  allocator->GetRecorder()->RecordGetAllocationInfo(
16471  allocator->GetCurrentFrameIndex(),
16472  allocation);
16473  }
16474 #endif
16475 
16476  allocator->GetAllocationInfo(allocation, pAllocationInfo);
16477 }
16478 
16479 VkBool32 vmaTouchAllocation(
16480  VmaAllocator allocator,
16481  VmaAllocation allocation)
16482 {
16483  VMA_ASSERT(allocator && allocation);
16484 
16485  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16486 
16487 #if VMA_RECORDING_ENABLED
16488  if(allocator->GetRecorder() != VMA_NULL)
16489  {
16490  allocator->GetRecorder()->RecordTouchAllocation(
16491  allocator->GetCurrentFrameIndex(),
16492  allocation);
16493  }
16494 #endif
16495 
16496  return allocator->TouchAllocation(allocation);
16497 }
16498 
16500  VmaAllocator allocator,
16501  VmaAllocation allocation,
16502  void* pUserData)
16503 {
16504  VMA_ASSERT(allocator && allocation);
16505 
16506  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16507 
16508  allocation->SetUserData(allocator, pUserData);
16509 
16510 #if VMA_RECORDING_ENABLED
16511  if(allocator->GetRecorder() != VMA_NULL)
16512  {
16513  allocator->GetRecorder()->RecordSetAllocationUserData(
16514  allocator->GetCurrentFrameIndex(),
16515  allocation,
16516  pUserData);
16517  }
16518 #endif
16519 }
16520 
16522  VmaAllocator allocator,
16523  VmaAllocation* pAllocation)
16524 {
16525  VMA_ASSERT(allocator && pAllocation);
16526 
16527  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
16528 
16529  allocator->CreateLostAllocation(pAllocation);
16530 
16531 #if VMA_RECORDING_ENABLED
16532  if(allocator->GetRecorder() != VMA_NULL)
16533  {
16534  allocator->GetRecorder()->RecordCreateLostAllocation(
16535  allocator->GetCurrentFrameIndex(),
16536  *pAllocation);
16537  }
16538 #endif
16539 }
16540 
16541 VkResult vmaMapMemory(
16542  VmaAllocator allocator,
16543  VmaAllocation allocation,
16544  void** ppData)
16545 {
16546  VMA_ASSERT(allocator && allocation && ppData);
16547 
16548  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16549 
16550  VkResult res = allocator->Map(allocation, ppData);
16551 
16552 #if VMA_RECORDING_ENABLED
16553  if(allocator->GetRecorder() != VMA_NULL)
16554  {
16555  allocator->GetRecorder()->RecordMapMemory(
16556  allocator->GetCurrentFrameIndex(),
16557  allocation);
16558  }
16559 #endif
16560 
16561  return res;
16562 }
16563 
16564 void vmaUnmapMemory(
16565  VmaAllocator allocator,
16566  VmaAllocation allocation)
16567 {
16568  VMA_ASSERT(allocator && allocation);
16569 
16570  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16571 
16572 #if VMA_RECORDING_ENABLED
16573  if(allocator->GetRecorder() != VMA_NULL)
16574  {
16575  allocator->GetRecorder()->RecordUnmapMemory(
16576  allocator->GetCurrentFrameIndex(),
16577  allocation);
16578  }
16579 #endif
16580 
16581  allocator->Unmap(allocation);
16582 }
16583 
16584 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16585 {
16586  VMA_ASSERT(allocator && allocation);
16587 
16588  VMA_DEBUG_LOG("vmaFlushAllocation");
16589 
16590  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16591 
16592  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
16593 
16594 #if VMA_RECORDING_ENABLED
16595  if(allocator->GetRecorder() != VMA_NULL)
16596  {
16597  allocator->GetRecorder()->RecordFlushAllocation(
16598  allocator->GetCurrentFrameIndex(),
16599  allocation, offset, size);
16600  }
16601 #endif
16602 }
16603 
16604 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16605 {
16606  VMA_ASSERT(allocator && allocation);
16607 
16608  VMA_DEBUG_LOG("vmaInvalidateAllocation");
16609 
16610  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16611 
16612  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
16613 
16614 #if VMA_RECORDING_ENABLED
16615  if(allocator->GetRecorder() != VMA_NULL)
16616  {
16617  allocator->GetRecorder()->RecordInvalidateAllocation(
16618  allocator->GetCurrentFrameIndex(),
16619  allocation, offset, size);
16620  }
16621 #endif
16622 }
16623 
16624 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
16625 {
16626  VMA_ASSERT(allocator);
16627 
16628  VMA_DEBUG_LOG("vmaCheckCorruption");
16629 
16630  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16631 
16632  return allocator->CheckCorruption(memoryTypeBits);
16633 }
16634 
16635 VkResult vmaDefragment(
16636  VmaAllocator allocator,
16637  VmaAllocation* pAllocations,
16638  size_t allocationCount,
16639  VkBool32* pAllocationsChanged,
16640  const VmaDefragmentationInfo *pDefragmentationInfo,
16641  VmaDefragmentationStats* pDefragmentationStats)
16642 {
16643  // Deprecated interface, reimplemented using new one.
16644 
16645  VmaDefragmentationInfo2 info2 = {};
16646  info2.allocationCount = (uint32_t)allocationCount;
16647  info2.pAllocations = pAllocations;
16648  info2.pAllocationsChanged = pAllocationsChanged;
16649  if(pDefragmentationInfo != VMA_NULL)
16650  {
16651  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
16652  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
16653  }
16654  else
16655  {
16656  info2.maxCpuAllocationsToMove = UINT32_MAX;
16657  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
16658  }
16659  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
16660 
16662  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
16663  if(res == VK_NOT_READY)
16664  {
16665  res = vmaDefragmentationEnd( allocator, ctx);
16666  }
16667  return res;
16668 }
16669 
16670 VkResult vmaDefragmentationBegin(
16671  VmaAllocator allocator,
16672  const VmaDefragmentationInfo2* pInfo,
16673  VmaDefragmentationStats* pStats,
16674  VmaDefragmentationContext *pContext)
16675 {
16676  VMA_ASSERT(allocator && pInfo && pContext);
16677 
16678  // Degenerate case: Nothing to defragment.
16679  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
16680  {
16681  return VK_SUCCESS;
16682  }
16683 
16684  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
16685  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
16686  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
16687  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
16688 
16689  VMA_DEBUG_LOG("vmaDefragmentationBegin");
16690 
16691  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16692 
16693  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
16694 
16695 #if VMA_RECORDING_ENABLED
16696  if(allocator->GetRecorder() != VMA_NULL)
16697  {
16698  allocator->GetRecorder()->RecordDefragmentationBegin(
16699  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
16700  }
16701 #endif
16702 
16703  return res;
16704 }
16705 
16706 VkResult vmaDefragmentationEnd(
16707  VmaAllocator allocator,
16708  VmaDefragmentationContext context)
16709 {
16710  VMA_ASSERT(allocator);
16711 
16712  VMA_DEBUG_LOG("vmaDefragmentationEnd");
16713 
16714  if(context != VK_NULL_HANDLE)
16715  {
16716  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16717 
16718 #if VMA_RECORDING_ENABLED
16719  if(allocator->GetRecorder() != VMA_NULL)
16720  {
16721  allocator->GetRecorder()->RecordDefragmentationEnd(
16722  allocator->GetCurrentFrameIndex(), context);
16723  }
16724 #endif
16725 
16726  return allocator->DefragmentationEnd(context);
16727  }
16728  else
16729  {
16730  return VK_SUCCESS;
16731  }
16732 }
16733 
16734 VkResult vmaBindBufferMemory(
16735  VmaAllocator allocator,
16736  VmaAllocation allocation,
16737  VkBuffer buffer)
16738 {
16739  VMA_ASSERT(allocator && allocation && buffer);
16740 
16741  VMA_DEBUG_LOG("vmaBindBufferMemory");
16742 
16743  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16744 
16745  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
16746 }
16747 
16748 VkResult vmaBindBufferMemory2(
16749  VmaAllocator allocator,
16750  VmaAllocation allocation,
16751  VkDeviceSize allocationLocalOffset,
16752  VkBuffer buffer,
16753  const void* pNext)
16754 {
16755  VMA_ASSERT(allocator && allocation && buffer);
16756 
16757  VMA_DEBUG_LOG("vmaBindBufferMemory2");
16758 
16759  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16760 
16761  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
16762 }
16763 
16764 VkResult vmaBindImageMemory(
16765  VmaAllocator allocator,
16766  VmaAllocation allocation,
16767  VkImage image)
16768 {
16769  VMA_ASSERT(allocator && allocation && image);
16770 
16771  VMA_DEBUG_LOG("vmaBindImageMemory");
16772 
16773  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16774 
16775  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
16776 }
16777 
16778 VkResult vmaBindImageMemory2(
16779  VmaAllocator allocator,
16780  VmaAllocation allocation,
16781  VkDeviceSize allocationLocalOffset,
16782  VkImage image,
16783  const void* pNext)
16784 {
16785  VMA_ASSERT(allocator && allocation && image);
16786 
16787  VMA_DEBUG_LOG("vmaBindImageMemory2");
16788 
16789  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16790 
16791  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
16792 }
16793 
16794 VkResult vmaCreateBuffer(
16795  VmaAllocator allocator,
16796  const VkBufferCreateInfo* pBufferCreateInfo,
16797  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16798  VkBuffer* pBuffer,
16799  VmaAllocation* pAllocation,
16800  VmaAllocationInfo* pAllocationInfo)
16801 {
16802  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16803 
16804  if(pBufferCreateInfo->size == 0)
16805  {
16806  return VK_ERROR_VALIDATION_FAILED_EXT;
16807  }
16808 
16809  VMA_DEBUG_LOG("vmaCreateBuffer");
16810 
16811  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16812 
16813  *pBuffer = VK_NULL_HANDLE;
16814  *pAllocation = VK_NULL_HANDLE;
16815 
16816  // 1. Create VkBuffer.
16817  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16818  allocator->m_hDevice,
16819  pBufferCreateInfo,
16820  allocator->GetAllocationCallbacks(),
16821  pBuffer);
16822  if(res >= 0)
16823  {
16824  // 2. vkGetBufferMemoryRequirements.
16825  VkMemoryRequirements vkMemReq = {};
16826  bool requiresDedicatedAllocation = false;
16827  bool prefersDedicatedAllocation = false;
16828  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16829  requiresDedicatedAllocation, prefersDedicatedAllocation);
16830 
16831  // Make sure alignment requirements for specific buffer usages reported
16832  // in Physical Device Properties are included in alignment reported by memory requirements.
16833  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16834  {
16835  VMA_ASSERT(vkMemReq.alignment %
16836  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16837  }
16838  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16839  {
16840  VMA_ASSERT(vkMemReq.alignment %
16841  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16842  }
16843  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16844  {
16845  VMA_ASSERT(vkMemReq.alignment %
16846  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16847  }
16848 
16849  // 3. Allocate memory using allocator.
16850  res = allocator->AllocateMemory(
16851  vkMemReq,
16852  requiresDedicatedAllocation,
16853  prefersDedicatedAllocation,
16854  *pBuffer, // dedicatedBuffer
16855  VK_NULL_HANDLE, // dedicatedImage
16856  *pAllocationCreateInfo,
16857  VMA_SUBALLOCATION_TYPE_BUFFER,
16858  1, // allocationCount
16859  pAllocation);
16860 
16861 #if VMA_RECORDING_ENABLED
16862  if(allocator->GetRecorder() != VMA_NULL)
16863  {
16864  allocator->GetRecorder()->RecordCreateBuffer(
16865  allocator->GetCurrentFrameIndex(),
16866  *pBufferCreateInfo,
16867  *pAllocationCreateInfo,
16868  *pAllocation);
16869  }
16870 #endif
16871 
16872  if(res >= 0)
16873  {
16874  // 3. Bind buffer with memory.
16875  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
16876  {
16877  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
16878  }
16879  if(res >= 0)
16880  {
16881  // All steps succeeded.
16882  #if VMA_STATS_STRING_ENABLED
16883  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16884  #endif
16885  if(pAllocationInfo != VMA_NULL)
16886  {
16887  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16888  }
16889 
16890  return VK_SUCCESS;
16891  }
16892  allocator->FreeMemory(
16893  1, // allocationCount
16894  pAllocation);
16895  *pAllocation = VK_NULL_HANDLE;
16896  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16897  *pBuffer = VK_NULL_HANDLE;
16898  return res;
16899  }
16900  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16901  *pBuffer = VK_NULL_HANDLE;
16902  return res;
16903  }
16904  return res;
16905 }
16906 
16907 void vmaDestroyBuffer(
16908  VmaAllocator allocator,
16909  VkBuffer buffer,
16910  VmaAllocation allocation)
16911 {
16912  VMA_ASSERT(allocator);
16913 
16914  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16915  {
16916  return;
16917  }
16918 
16919  VMA_DEBUG_LOG("vmaDestroyBuffer");
16920 
16921  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16922 
16923 #if VMA_RECORDING_ENABLED
16924  if(allocator->GetRecorder() != VMA_NULL)
16925  {
16926  allocator->GetRecorder()->RecordDestroyBuffer(
16927  allocator->GetCurrentFrameIndex(),
16928  allocation);
16929  }
16930 #endif
16931 
16932  if(buffer != VK_NULL_HANDLE)
16933  {
16934  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16935  }
16936 
16937  if(allocation != VK_NULL_HANDLE)
16938  {
16939  allocator->FreeMemory(
16940  1, // allocationCount
16941  &allocation);
16942  }
16943 }
16944 
16945 VkResult vmaCreateImage(
16946  VmaAllocator allocator,
16947  const VkImageCreateInfo* pImageCreateInfo,
16948  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16949  VkImage* pImage,
16950  VmaAllocation* pAllocation,
16951  VmaAllocationInfo* pAllocationInfo)
16952 {
16953  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16954 
16955  if(pImageCreateInfo->extent.width == 0 ||
16956  pImageCreateInfo->extent.height == 0 ||
16957  pImageCreateInfo->extent.depth == 0 ||
16958  pImageCreateInfo->mipLevels == 0 ||
16959  pImageCreateInfo->arrayLayers == 0)
16960  {
16961  return VK_ERROR_VALIDATION_FAILED_EXT;
16962  }
16963 
16964  VMA_DEBUG_LOG("vmaCreateImage");
16965 
16966  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16967 
16968  *pImage = VK_NULL_HANDLE;
16969  *pAllocation = VK_NULL_HANDLE;
16970 
16971  // 1. Create VkImage.
16972  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16973  allocator->m_hDevice,
16974  pImageCreateInfo,
16975  allocator->GetAllocationCallbacks(),
16976  pImage);
16977  if(res >= 0)
16978  {
16979  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16980  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16981  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16982 
16983  // 2. Allocate memory using allocator.
16984  VkMemoryRequirements vkMemReq = {};
16985  bool requiresDedicatedAllocation = false;
16986  bool prefersDedicatedAllocation = false;
16987  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16988  requiresDedicatedAllocation, prefersDedicatedAllocation);
16989 
16990  res = allocator->AllocateMemory(
16991  vkMemReq,
16992  requiresDedicatedAllocation,
16993  prefersDedicatedAllocation,
16994  VK_NULL_HANDLE, // dedicatedBuffer
16995  *pImage, // dedicatedImage
16996  *pAllocationCreateInfo,
16997  suballocType,
16998  1, // allocationCount
16999  pAllocation);
17000 
17001 #if VMA_RECORDING_ENABLED
17002  if(allocator->GetRecorder() != VMA_NULL)
17003  {
17004  allocator->GetRecorder()->RecordCreateImage(
17005  allocator->GetCurrentFrameIndex(),
17006  *pImageCreateInfo,
17007  *pAllocationCreateInfo,
17008  *pAllocation);
17009  }
17010 #endif
17011 
17012  if(res >= 0)
17013  {
17014  // 3. Bind image with memory.
17015  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
17016  {
17017  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
17018  }
17019  if(res >= 0)
17020  {
17021  // All steps succeeded.
17022  #if VMA_STATS_STRING_ENABLED
17023  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
17024  #endif
17025  if(pAllocationInfo != VMA_NULL)
17026  {
17027  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17028  }
17029 
17030  return VK_SUCCESS;
17031  }
17032  allocator->FreeMemory(
17033  1, // allocationCount
17034  pAllocation);
17035  *pAllocation = VK_NULL_HANDLE;
17036  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17037  *pImage = VK_NULL_HANDLE;
17038  return res;
17039  }
17040  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17041  *pImage = VK_NULL_HANDLE;
17042  return res;
17043  }
17044  return res;
17045 }
17046 
17047 void vmaDestroyImage(
17048  VmaAllocator allocator,
17049  VkImage image,
17050  VmaAllocation allocation)
17051 {
17052  VMA_ASSERT(allocator);
17053 
17054  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
17055  {
17056  return;
17057  }
17058 
17059  VMA_DEBUG_LOG("vmaDestroyImage");
17060 
17061  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17062 
17063 #if VMA_RECORDING_ENABLED
17064  if(allocator->GetRecorder() != VMA_NULL)
17065  {
17066  allocator->GetRecorder()->RecordDestroyImage(
17067  allocator->GetCurrentFrameIndex(),
17068  allocation);
17069  }
17070 #endif
17071 
17072  if(image != VK_NULL_HANDLE)
17073  {
17074  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
17075  }
17076  if(allocation != VK_NULL_HANDLE)
17077  {
17078  allocator->FreeMemory(
17079  1, // allocationCount
17080  &allocation);
17081  }
17082 }
17083 
17084 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1810
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2114
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1872
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:2911
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1846
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2445
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1822
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2076
Definition: vk_mem_alloc.h:2180
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:2864
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1814
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2545
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1869
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2947
VkResult vmaBindImageMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, const void *pNext)
Binds image to allocation with additional parameters.
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2334
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1690
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2426
Definition: vk_mem_alloc.h:2151
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:2867
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1803
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2233
Definition: vk_mem_alloc.h:2103
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1881
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2362
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
Definition: vk_mem_alloc.h:1935
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1866
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2107
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:2007
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1819
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:2901
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:2006
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2951
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1898
VmaStatInfo total
Definition: vk_mem_alloc.h:2016
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2959
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2217
Definition: vk_mem_alloc.h:2175
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2942
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1820
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1733
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1875
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2376
Definition: vk_mem_alloc.h:2370
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:1826
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1942
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2555
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1815
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1844
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2254
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2396
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2432
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1801
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2379
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2916
VmaMemoryUsage
Definition: vk_mem_alloc.h:2054
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:2876
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2937
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:2955
Definition: vk_mem_alloc.h:2093
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2241
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1818
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2012
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1739
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:2855
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
Definition: vk_mem_alloc.h:2853
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:2882
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1760
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1848
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1765
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2957
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2228
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2442
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1811
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1995
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:2391
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1752
Definition: vk_mem_alloc.h:2366
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2158
Represents Opaque object that represents started defragmentation process.
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:2008
Definition: vk_mem_alloc.h:1799
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1756
Definition: vk_mem_alloc.h:2191
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2382
Definition: vk_mem_alloc.h:2102
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1817
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2223
Definition: vk_mem_alloc.h:2214
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1998
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1813
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2404
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1884
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2435
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2212
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2906
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2247
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1923
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:2014
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2138
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:2007
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1824
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1854
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:2852
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:2930
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1754
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1823
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2418
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1816
Definition: vk_mem_alloc.h:2169
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1862
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2569
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:1878
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:2007
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:2004
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2423
Parameters for defragmentation.
Definition: vk_mem_alloc.h:2861
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
Definition: vk_mem_alloc.h:2184
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:2550
Definition: vk_mem_alloc.h:2198
Definition: vk_mem_alloc.h:2210
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2953
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1809
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
VkResult vmaBindBufferMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void *pNext)
Binds buffer to allocation with additional parameters.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:2002
Definition: vk_mem_alloc.h:2059
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2372
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1851
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:2000
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1821
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1825
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2125
Definition: vk_mem_alloc.h:2205
Definition: vk_mem_alloc.h:2086
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2564
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1787
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1812
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2351
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Deprecated.
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2531
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2195
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2316
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:2008
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1838
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:2015
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2429
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:2008
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:2921
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2536
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:2885