Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1598 /*
1599 Define this macro to 0/1 to disable/enable support for recording functionality,
1600 available through VmaAllocatorCreateInfo::pRecordSettings.
1601 */
1602 #ifndef VMA_RECORDING_ENABLED
1603  #ifdef _WIN32
1604  #define VMA_RECORDING_ENABLED 1
1605  #else
1606  #define VMA_RECORDING_ENABLED 0
1607  #endif
1608 #endif
1609 
1610 #ifndef NOMINMAX
1611  #define NOMINMAX // For windows.h
1612 #endif
1613 
1614 #ifndef VULKAN_H_
1615  #include <vulkan/vulkan.h>
1616 #endif
1617 
1618 #if VMA_RECORDING_ENABLED
1619  #include <windows.h>
1620 #endif
1621 
1622 #if !defined(VMA_DEDICATED_ALLOCATION)
1623  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1624  #define VMA_DEDICATED_ALLOCATION 1
1625  #else
1626  #define VMA_DEDICATED_ALLOCATION 0
1627  #endif
1628 #endif
1629 
1639 VK_DEFINE_HANDLE(VmaAllocator)
1640 
1641 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1643  VmaAllocator allocator,
1644  uint32_t memoryType,
1645  VkDeviceMemory memory,
1646  VkDeviceSize size);
1648 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1649  VmaAllocator allocator,
1650  uint32_t memoryType,
1651  VkDeviceMemory memory,
1652  VkDeviceSize size);
1653 
1667 
1697 
1700 typedef VkFlags VmaAllocatorCreateFlags;
1701 
1706 typedef struct VmaVulkanFunctions {
1707  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1708  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1709  PFN_vkAllocateMemory vkAllocateMemory;
1710  PFN_vkFreeMemory vkFreeMemory;
1711  PFN_vkMapMemory vkMapMemory;
1712  PFN_vkUnmapMemory vkUnmapMemory;
1713  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1714  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1715  PFN_vkBindBufferMemory vkBindBufferMemory;
1716  PFN_vkBindImageMemory vkBindImageMemory;
1717  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1718  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1719  PFN_vkCreateBuffer vkCreateBuffer;
1720  PFN_vkDestroyBuffer vkDestroyBuffer;
1721  PFN_vkCreateImage vkCreateImage;
1722  PFN_vkDestroyImage vkDestroyImage;
1723  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1724 #if VMA_DEDICATED_ALLOCATION
1725  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1726  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1727 #endif
1729 
1731 typedef enum VmaRecordFlagBits {
1738 
1741 typedef VkFlags VmaRecordFlags;
1742 
1744 typedef struct VmaRecordSettings
1745 {
1755  const char* pFilePath;
1757 
1760 {
1764 
1765  VkPhysicalDevice physicalDevice;
1767 
1768  VkDevice device;
1770 
1773 
1774  const VkAllocationCallbacks* pAllocationCallbacks;
1776 
1816  const VkDeviceSize* pHeapSizeLimit;
1837 
1839 VkResult vmaCreateAllocator(
1840  const VmaAllocatorCreateInfo* pCreateInfo,
1841  VmaAllocator* pAllocator);
1842 
1844 void vmaDestroyAllocator(
1845  VmaAllocator allocator);
1846 
1852  VmaAllocator allocator,
1853  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1854 
1860  VmaAllocator allocator,
1861  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1862 
1870  VmaAllocator allocator,
1871  uint32_t memoryTypeIndex,
1872  VkMemoryPropertyFlags* pFlags);
1873 
1883  VmaAllocator allocator,
1884  uint32_t frameIndex);
1885 
1888 typedef struct VmaStatInfo
1889 {
1891  uint32_t blockCount;
1897  VkDeviceSize usedBytes;
1899  VkDeviceSize unusedBytes;
1902 } VmaStatInfo;
1903 
1905 typedef struct VmaStats
1906 {
1907  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1908  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1910 } VmaStats;
1911 
1913 void vmaCalculateStats(
1914  VmaAllocator allocator,
1915  VmaStats* pStats);
1916 
1917 #define VMA_STATS_STRING_ENABLED 1
1918 
1919 #if VMA_STATS_STRING_ENABLED
1920 
1922 
1924 void vmaBuildStatsString(
1925  VmaAllocator allocator,
1926  char** ppStatsString,
1927  VkBool32 detailedMap);
1928 
1929 void vmaFreeStatsString(
1930  VmaAllocator allocator,
1931  char* pStatsString);
1932 
1933 #endif // #if VMA_STATS_STRING_ENABLED
1934 
1943 VK_DEFINE_HANDLE(VmaPool)
1944 
1945 typedef enum VmaMemoryUsage
1946 {
1995 } VmaMemoryUsage;
1996 
2011 
2066 
2082 
2092 
2099 
2103 
2105 {
2118  VkMemoryPropertyFlags requiredFlags;
2123  VkMemoryPropertyFlags preferredFlags;
2131  uint32_t memoryTypeBits;
2144  void* pUserData;
2146 
2163 VkResult vmaFindMemoryTypeIndex(
2164  VmaAllocator allocator,
2165  uint32_t memoryTypeBits,
2166  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2167  uint32_t* pMemoryTypeIndex);
2168 
2182  VmaAllocator allocator,
2183  const VkBufferCreateInfo* pBufferCreateInfo,
2184  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2185  uint32_t* pMemoryTypeIndex);
2186 
2200  VmaAllocator allocator,
2201  const VkImageCreateInfo* pImageCreateInfo,
2202  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2203  uint32_t* pMemoryTypeIndex);
2204 
2225 
2242 
2253 
2259 
2262 typedef VkFlags VmaPoolCreateFlags;
2263 
2266 typedef struct VmaPoolCreateInfo {
2281  VkDeviceSize blockSize;
2310 
2313 typedef struct VmaPoolStats {
2316  VkDeviceSize size;
2319  VkDeviceSize unusedSize;
2332  VkDeviceSize unusedRangeSizeMax;
2335  size_t blockCount;
2336 } VmaPoolStats;
2337 
2344 VkResult vmaCreatePool(
2345  VmaAllocator allocator,
2346  const VmaPoolCreateInfo* pCreateInfo,
2347  VmaPool* pPool);
2348 
2351 void vmaDestroyPool(
2352  VmaAllocator allocator,
2353  VmaPool pool);
2354 
2361 void vmaGetPoolStats(
2362  VmaAllocator allocator,
2363  VmaPool pool,
2364  VmaPoolStats* pPoolStats);
2365 
2373  VmaAllocator allocator,
2374  VmaPool pool,
2375  size_t* pLostAllocationCount);
2376 
2391 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2392 
2417 VK_DEFINE_HANDLE(VmaAllocation)
2418 
2419 
2421 typedef struct VmaAllocationInfo {
2426  uint32_t memoryType;
2435  VkDeviceMemory deviceMemory;
2440  VkDeviceSize offset;
2445  VkDeviceSize size;
2459  void* pUserData;
2461 
2472 VkResult vmaAllocateMemory(
2473  VmaAllocator allocator,
2474  const VkMemoryRequirements* pVkMemoryRequirements,
2475  const VmaAllocationCreateInfo* pCreateInfo,
2476  VmaAllocation* pAllocation,
2477  VmaAllocationInfo* pAllocationInfo);
2478 
2486  VmaAllocator allocator,
2487  VkBuffer buffer,
2488  const VmaAllocationCreateInfo* pCreateInfo,
2489  VmaAllocation* pAllocation,
2490  VmaAllocationInfo* pAllocationInfo);
2491 
2493 VkResult vmaAllocateMemoryForImage(
2494  VmaAllocator allocator,
2495  VkImage image,
2496  const VmaAllocationCreateInfo* pCreateInfo,
2497  VmaAllocation* pAllocation,
2498  VmaAllocationInfo* pAllocationInfo);
2499 
2501 void vmaFreeMemory(
2502  VmaAllocator allocator,
2503  VmaAllocation allocation);
2504 
2525 VkResult vmaResizeAllocation(
2526  VmaAllocator allocator,
2527  VmaAllocation allocation,
2528  VkDeviceSize newSize);
2529 
2547  VmaAllocator allocator,
2548  VmaAllocation allocation,
2549  VmaAllocationInfo* pAllocationInfo);
2550 
2565 VkBool32 vmaTouchAllocation(
2566  VmaAllocator allocator,
2567  VmaAllocation allocation);
2568 
2583  VmaAllocator allocator,
2584  VmaAllocation allocation,
2585  void* pUserData);
2586 
2598  VmaAllocator allocator,
2599  VmaAllocation* pAllocation);
2600 
2635 VkResult vmaMapMemory(
2636  VmaAllocator allocator,
2637  VmaAllocation allocation,
2638  void** ppData);
2639 
2644 void vmaUnmapMemory(
2645  VmaAllocator allocator,
2646  VmaAllocation allocation);
2647 
2660 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2661 
2674 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2675 
2692 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2693 
2700 VK_DEFINE_HANDLE(VmaDefragmentationContext)
2701 
2702 typedef enum VmaDefragmentationFlagBits {
2706 typedef VkFlags VmaDefragmentationFlags;
2707 
2712 typedef struct VmaDefragmentationInfo2 {
2736  uint32_t poolCount;
2757  VkDeviceSize maxCpuBytesToMove;
2767  VkDeviceSize maxGpuBytesToMove;
2781  VkCommandBuffer commandBuffer;
2783 
2788 typedef struct VmaDefragmentationInfo {
2793  VkDeviceSize maxBytesToMove;
2800 
2802 typedef struct VmaDefragmentationStats {
2804  VkDeviceSize bytesMoved;
2806  VkDeviceSize bytesFreed;
2812 
2839 VkResult vmaDefragmentationBegin(
2840  VmaAllocator allocator,
2841  const VmaDefragmentationInfo2* pInfo,
2842  VmaDefragmentationStats* pStats,
2843  VmaDefragmentationContext *pContext);
2844 
2850 VkResult vmaDefragmentationEnd(
2851  VmaAllocator allocator,
2852  VmaDefragmentationContext context);
2853 
2894 VkResult vmaDefragment(
2895  VmaAllocator allocator,
2896  VmaAllocation* pAllocations,
2897  size_t allocationCount,
2898  VkBool32* pAllocationsChanged,
2899  const VmaDefragmentationInfo *pDefragmentationInfo,
2900  VmaDefragmentationStats* pDefragmentationStats);
2901 
2914 VkResult vmaBindBufferMemory(
2915  VmaAllocator allocator,
2916  VmaAllocation allocation,
2917  VkBuffer buffer);
2918 
2931 VkResult vmaBindImageMemory(
2932  VmaAllocator allocator,
2933  VmaAllocation allocation,
2934  VkImage image);
2935 
2962 VkResult vmaCreateBuffer(
2963  VmaAllocator allocator,
2964  const VkBufferCreateInfo* pBufferCreateInfo,
2965  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2966  VkBuffer* pBuffer,
2967  VmaAllocation* pAllocation,
2968  VmaAllocationInfo* pAllocationInfo);
2969 
2981 void vmaDestroyBuffer(
2982  VmaAllocator allocator,
2983  VkBuffer buffer,
2984  VmaAllocation allocation);
2985 
2987 VkResult vmaCreateImage(
2988  VmaAllocator allocator,
2989  const VkImageCreateInfo* pImageCreateInfo,
2990  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2991  VkImage* pImage,
2992  VmaAllocation* pAllocation,
2993  VmaAllocationInfo* pAllocationInfo);
2994 
3006 void vmaDestroyImage(
3007  VmaAllocator allocator,
3008  VkImage image,
3009  VmaAllocation allocation);
3010 
3011 #ifdef __cplusplus
3012 }
3013 #endif
3014 
3015 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3016 
3017 // For Visual Studio IntelliSense.
3018 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3019 #define VMA_IMPLEMENTATION
3020 #endif
3021 
3022 #ifdef VMA_IMPLEMENTATION
3023 #undef VMA_IMPLEMENTATION
3024 
3025 #include <cstdint>
3026 #include <cstdlib>
3027 #include <cstring>
3028 
3029 /*******************************************************************************
3030 CONFIGURATION SECTION
3031 
3032 Define some of these macros before each #include of this header or change them
3033 here if you need other then default behavior depending on your environment.
3034 */
3035 
3036 /*
3037 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3038 internally, like:
3039 
3040  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3041 
3042 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3043 VmaAllocatorCreateInfo::pVulkanFunctions.
3044 */
3045 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3046 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3047 #endif
3048 
3049 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3050 //#define VMA_USE_STL_CONTAINERS 1
3051 
3052 /* Set this macro to 1 to make the library including and using STL containers:
3053 std::pair, std::vector, std::list, std::unordered_map.
3054 
3055 Set it to 0 or undefined to make the library using its own implementation of
3056 the containers.
3057 */
3058 #if VMA_USE_STL_CONTAINERS
3059  #define VMA_USE_STL_VECTOR 1
3060  #define VMA_USE_STL_UNORDERED_MAP 1
3061  #define VMA_USE_STL_LIST 1
3062 #endif
3063 
3064 #ifndef VMA_USE_STL_SHARED_MUTEX
3065  // Minimum Visual Studio 2015 Update 2
3066  #if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918
3067  #define VMA_USE_STL_SHARED_MUTEX 1
3068  #endif
3069 #endif
3070 
3071 #if VMA_USE_STL_VECTOR
3072  #include <vector>
3073 #endif
3074 
3075 #if VMA_USE_STL_UNORDERED_MAP
3076  #include <unordered_map>
3077 #endif
3078 
3079 #if VMA_USE_STL_LIST
3080  #include <list>
3081 #endif
3082 
3083 /*
3084 Following headers are used in this CONFIGURATION section only, so feel free to
3085 remove them if not needed.
3086 */
3087 #include <cassert> // for assert
3088 #include <algorithm> // for min, max
3089 #include <mutex>
3090 #include <atomic> // for std::atomic
3091 
3092 #ifndef VMA_NULL
3093  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3094  #define VMA_NULL nullptr
3095 #endif
3096 
3097 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3098 #include <cstdlib>
3099 void *aligned_alloc(size_t alignment, size_t size)
3100 {
3101  // alignment must be >= sizeof(void*)
3102  if(alignment < sizeof(void*))
3103  {
3104  alignment = sizeof(void*);
3105  }
3106 
3107  return memalign(alignment, size);
3108 }
3109 #elif defined(__APPLE__) || defined(__ANDROID__)
3110 #include <cstdlib>
3111 void *aligned_alloc(size_t alignment, size_t size)
3112 {
3113  // alignment must be >= sizeof(void*)
3114  if(alignment < sizeof(void*))
3115  {
3116  alignment = sizeof(void*);
3117  }
3118 
3119  void *pointer;
3120  if(posix_memalign(&pointer, alignment, size) == 0)
3121  return pointer;
3122  return VMA_NULL;
3123 }
3124 #endif
3125 
3126 // If your compiler is not compatible with C++11 and definition of
3127 // aligned_alloc() function is missing, uncommeting following line may help:
3128 
3129 //#include <malloc.h>
3130 
3131 // Normal assert to check for programmer's errors, especially in Debug configuration.
3132 #ifndef VMA_ASSERT
3133  #ifdef _DEBUG
3134  #define VMA_ASSERT(expr) assert(expr)
3135  #else
3136  #define VMA_ASSERT(expr)
3137  #endif
3138 #endif
3139 
3140 // Assert that will be called very often, like inside data structures e.g. operator[].
3141 // Making it non-empty can make program slow.
3142 #ifndef VMA_HEAVY_ASSERT
3143  #ifdef _DEBUG
3144  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3145  #else
3146  #define VMA_HEAVY_ASSERT(expr)
3147  #endif
3148 #endif
3149 
3150 #ifndef VMA_ALIGN_OF
3151  #define VMA_ALIGN_OF(type) (__alignof(type))
3152 #endif
3153 
3154 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3155  #if defined(_WIN32)
3156  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3157  #else
3158  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3159  #endif
3160 #endif
3161 
3162 #ifndef VMA_SYSTEM_FREE
3163  #if defined(_WIN32)
3164  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3165  #else
3166  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3167  #endif
3168 #endif
3169 
3170 #ifndef VMA_MIN
3171  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3172 #endif
3173 
3174 #ifndef VMA_MAX
3175  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3176 #endif
3177 
3178 #ifndef VMA_SWAP
3179  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3180 #endif
3181 
3182 #ifndef VMA_SORT
3183  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3184 #endif
3185 
3186 #ifndef VMA_DEBUG_LOG
3187  #define VMA_DEBUG_LOG(format, ...)
3188  /*
3189  #define VMA_DEBUG_LOG(format, ...) do { \
3190  printf(format, __VA_ARGS__); \
3191  printf("\n"); \
3192  } while(false)
3193  */
3194 #endif
3195 
3196 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3197 #if VMA_STATS_STRING_ENABLED
3198  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3199  {
3200  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3201  }
3202  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3203  {
3204  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3205  }
3206  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3207  {
3208  snprintf(outStr, strLen, "%p", ptr);
3209  }
3210 #endif
3211 
3212 #ifndef VMA_MUTEX
3213  class VmaMutex
3214  {
3215  public:
3216  void Lock() { m_Mutex.lock(); }
3217  void Unlock() { m_Mutex.unlock(); }
3218  private:
3219  std::mutex m_Mutex;
3220  };
3221  #define VMA_MUTEX VmaMutex
3222 #endif
3223 
3224 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3225 #ifndef VMA_RW_MUTEX
3226  #if VMA_USE_STL_SHARED_MUTEX
3227  // Use std::shared_mutex from C++17.
3228  #include <shared_mutex>
3229  class VmaRWMutex
3230  {
3231  public:
3232  void LockRead() { m_Mutex.lock_shared(); }
3233  void UnlockRead() { m_Mutex.unlock_shared(); }
3234  void LockWrite() { m_Mutex.lock(); }
3235  void UnlockWrite() { m_Mutex.unlock(); }
3236  private:
3237  std::shared_mutex m_Mutex;
3238  };
3239  #define VMA_RW_MUTEX VmaRWMutex
3240  #elif defined(_WIN32)
3241  // Use SRWLOCK from WinAPI.
3242  class VmaRWMutex
3243  {
3244  public:
3245  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3246  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3247  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3248  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3249  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3250  private:
3251  SRWLOCK m_Lock;
3252  };
3253  #define VMA_RW_MUTEX VmaRWMutex
3254  #else
3255  // Less efficient fallback: Use normal mutex.
3256  class VmaRWMutex
3257  {
3258  public:
3259  void LockRead() { m_Mutex.Lock(); }
3260  void UnlockRead() { m_Mutex.Unlock(); }
3261  void LockWrite() { m_Mutex.Lock(); }
3262  void UnlockWrite() { m_Mutex.Unlock(); }
3263  private:
3264  VMA_MUTEX m_Mutex;
3265  };
3266  #define VMA_RW_MUTEX VmaRWMutex
3267  #endif // #if VMA_USE_STL_SHARED_MUTEX
3268 #endif // #ifndef VMA_RW_MUTEX
3269 
3270 /*
3271 If providing your own implementation, you need to implement a subset of std::atomic:
3272 
3273 - Constructor(uint32_t desired)
3274 - uint32_t load() const
3275 - void store(uint32_t desired)
3276 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3277 */
3278 #ifndef VMA_ATOMIC_UINT32
3279  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3280 #endif
3281 
3282 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3283 
3287  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3288 #endif
3289 
3290 #ifndef VMA_DEBUG_ALIGNMENT
3291 
3295  #define VMA_DEBUG_ALIGNMENT (1)
3296 #endif
3297 
3298 #ifndef VMA_DEBUG_MARGIN
3299 
3303  #define VMA_DEBUG_MARGIN (0)
3304 #endif
3305 
3306 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3307 
3311  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3312 #endif
3313 
3314 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3315 
3320  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3321 #endif
3322 
3323 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3324 
3328  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3329 #endif
3330 
3331 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3332 
3336  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3337 #endif
3338 
3339 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3340  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3342 #endif
3343 
3344 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3345  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3347 #endif
3348 
3349 #ifndef VMA_CLASS_NO_COPY
3350  #define VMA_CLASS_NO_COPY(className) \
3351  private: \
3352  className(const className&) = delete; \
3353  className& operator=(const className&) = delete;
3354 #endif
3355 
3356 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3357 
3358 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3359 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3360 
3361 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3362 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3363 
3364 /*******************************************************************************
3365 END OF CONFIGURATION
3366 */
3367 
3368 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3369 
3370 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3371  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3372 
3373 // Returns number of bits set to 1 in (v).
3374 static inline uint32_t VmaCountBitsSet(uint32_t v)
3375 {
3376  uint32_t c = v - ((v >> 1) & 0x55555555);
3377  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3378  c = ((c >> 4) + c) & 0x0F0F0F0F;
3379  c = ((c >> 8) + c) & 0x00FF00FF;
3380  c = ((c >> 16) + c) & 0x0000FFFF;
3381  return c;
3382 }
3383 
3384 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3385 // Use types like uint32_t, uint64_t as T.
3386 template <typename T>
3387 static inline T VmaAlignUp(T val, T align)
3388 {
3389  return (val + align - 1) / align * align;
3390 }
3391 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3392 // Use types like uint32_t, uint64_t as T.
3393 template <typename T>
3394 static inline T VmaAlignDown(T val, T align)
3395 {
3396  return val / align * align;
3397 }
3398 
3399 // Division with mathematical rounding to nearest number.
3400 template <typename T>
3401 static inline T VmaRoundDiv(T x, T y)
3402 {
3403  return (x + (y / (T)2)) / y;
3404 }
3405 
3406 /*
3407 Returns true if given number is a power of two.
3408 T must be unsigned integer number or signed integer but always nonnegative.
3409 For 0 returns true.
3410 */
3411 template <typename T>
3412 inline bool VmaIsPow2(T x)
3413 {
3414  return (x & (x-1)) == 0;
3415 }
3416 
3417 // Returns smallest power of 2 greater or equal to v.
3418 static inline uint32_t VmaNextPow2(uint32_t v)
3419 {
3420  v--;
3421  v |= v >> 1;
3422  v |= v >> 2;
3423  v |= v >> 4;
3424  v |= v >> 8;
3425  v |= v >> 16;
3426  v++;
3427  return v;
3428 }
3429 static inline uint64_t VmaNextPow2(uint64_t v)
3430 {
3431  v--;
3432  v |= v >> 1;
3433  v |= v >> 2;
3434  v |= v >> 4;
3435  v |= v >> 8;
3436  v |= v >> 16;
3437  v |= v >> 32;
3438  v++;
3439  return v;
3440 }
3441 
3442 // Returns largest power of 2 less or equal to v.
3443 static inline uint32_t VmaPrevPow2(uint32_t v)
3444 {
3445  v |= v >> 1;
3446  v |= v >> 2;
3447  v |= v >> 4;
3448  v |= v >> 8;
3449  v |= v >> 16;
3450  v = v ^ (v >> 1);
3451  return v;
3452 }
3453 static inline uint64_t VmaPrevPow2(uint64_t v)
3454 {
3455  v |= v >> 1;
3456  v |= v >> 2;
3457  v |= v >> 4;
3458  v |= v >> 8;
3459  v |= v >> 16;
3460  v |= v >> 32;
3461  v = v ^ (v >> 1);
3462  return v;
3463 }
3464 
3465 static inline bool VmaStrIsEmpty(const char* pStr)
3466 {
3467  return pStr == VMA_NULL || *pStr == '\0';
3468 }
3469 
3470 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3471 {
3472  switch(algorithm)
3473  {
3475  return "Linear";
3477  return "Buddy";
3478  case 0:
3479  return "Default";
3480  default:
3481  VMA_ASSERT(0);
3482  return "";
3483  }
3484 }
3485 
3486 #ifndef VMA_SORT
3487 
3488 template<typename Iterator, typename Compare>
3489 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3490 {
3491  Iterator centerValue = end; --centerValue;
3492  Iterator insertIndex = beg;
3493  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3494  {
3495  if(cmp(*memTypeIndex, *centerValue))
3496  {
3497  if(insertIndex != memTypeIndex)
3498  {
3499  VMA_SWAP(*memTypeIndex, *insertIndex);
3500  }
3501  ++insertIndex;
3502  }
3503  }
3504  if(insertIndex != centerValue)
3505  {
3506  VMA_SWAP(*insertIndex, *centerValue);
3507  }
3508  return insertIndex;
3509 }
3510 
3511 template<typename Iterator, typename Compare>
3512 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3513 {
3514  if(beg < end)
3515  {
3516  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3517  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3518  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3519  }
3520 }
3521 
3522 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3523 
3524 #endif // #ifndef VMA_SORT
3525 
3526 /*
3527 Returns true if two memory blocks occupy overlapping pages.
3528 ResourceA must be in less memory offset than ResourceB.
3529 
3530 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3531 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3532 */
3533 static inline bool VmaBlocksOnSamePage(
3534  VkDeviceSize resourceAOffset,
3535  VkDeviceSize resourceASize,
3536  VkDeviceSize resourceBOffset,
3537  VkDeviceSize pageSize)
3538 {
3539  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3540  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3541  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3542  VkDeviceSize resourceBStart = resourceBOffset;
3543  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3544  return resourceAEndPage == resourceBStartPage;
3545 }
3546 
3547 enum VmaSuballocationType
3548 {
3549  VMA_SUBALLOCATION_TYPE_FREE = 0,
3550  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3551  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3552  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3553  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3554  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3555  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3556 };
3557 
3558 /*
3559 Returns true if given suballocation types could conflict and must respect
3560 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3561 or linear image and another one is optimal image. If type is unknown, behave
3562 conservatively.
3563 */
3564 static inline bool VmaIsBufferImageGranularityConflict(
3565  VmaSuballocationType suballocType1,
3566  VmaSuballocationType suballocType2)
3567 {
3568  if(suballocType1 > suballocType2)
3569  {
3570  VMA_SWAP(suballocType1, suballocType2);
3571  }
3572 
3573  switch(suballocType1)
3574  {
3575  case VMA_SUBALLOCATION_TYPE_FREE:
3576  return false;
3577  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3578  return true;
3579  case VMA_SUBALLOCATION_TYPE_BUFFER:
3580  return
3581  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3582  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3583  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3584  return
3585  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3586  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3587  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3588  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3589  return
3590  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3591  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3592  return false;
3593  default:
3594  VMA_ASSERT(0);
3595  return true;
3596  }
3597 }
3598 
3599 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3600 {
3601  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3602  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3603  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3604  {
3605  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3606  }
3607 }
3608 
3609 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3610 {
3611  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3612  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3613  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3614  {
3615  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3616  {
3617  return false;
3618  }
3619  }
3620  return true;
3621 }
3622 
3623 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3624 struct VmaMutexLock
3625 {
3626  VMA_CLASS_NO_COPY(VmaMutexLock)
3627 public:
3628  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3629  m_pMutex(useMutex ? &mutex : VMA_NULL)
3630  { if(m_pMutex) { m_pMutex->Lock(); } }
3631  ~VmaMutexLock()
3632  { if(m_pMutex) { m_pMutex->Unlock(); } }
3633 private:
3634  VMA_MUTEX* m_pMutex;
3635 };
3636 
3637 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3638 struct VmaMutexLockRead
3639 {
3640  VMA_CLASS_NO_COPY(VmaMutexLockRead)
3641 public:
3642  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3643  m_pMutex(useMutex ? &mutex : VMA_NULL)
3644  { if(m_pMutex) { m_pMutex->LockRead(); } }
3645  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
3646 private:
3647  VMA_RW_MUTEX* m_pMutex;
3648 };
3649 
3650 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3651 struct VmaMutexLockWrite
3652 {
3653  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3654 public:
3655  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
3656  m_pMutex(useMutex ? &mutex : VMA_NULL)
3657  { if(m_pMutex) { m_pMutex->LockWrite(); } }
3658  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3659 private:
3660  VMA_RW_MUTEX* m_pMutex;
3661 };
3662 
3663 #if VMA_DEBUG_GLOBAL_MUTEX
3664  static VMA_MUTEX gDebugGlobalMutex;
3665  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3666 #else
3667  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3668 #endif
3669 
3670 // Minimum size of a free suballocation to register it in the free suballocation collection.
3671 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3672 
3673 /*
3674 Performs binary search and returns iterator to first element that is greater or
3675 equal to (key), according to comparison (cmp).
3676 
3677 Cmp should return true if first argument is less than second argument.
3678 
3679 Returned value is the found element, if present in the collection or place where
3680 new element with value (key) should be inserted.
3681 */
3682 template <typename CmpLess, typename IterT, typename KeyT>
3683 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3684 {
3685  size_t down = 0, up = (end - beg);
3686  while(down < up)
3687  {
3688  const size_t mid = (down + up) / 2;
3689  if(cmp(*(beg+mid), key))
3690  {
3691  down = mid + 1;
3692  }
3693  else
3694  {
3695  up = mid;
3696  }
3697  }
3698  return beg + down;
3699 }
3700 
3702 // Memory allocation
3703 
3704 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3705 {
3706  if((pAllocationCallbacks != VMA_NULL) &&
3707  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3708  {
3709  return (*pAllocationCallbacks->pfnAllocation)(
3710  pAllocationCallbacks->pUserData,
3711  size,
3712  alignment,
3713  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3714  }
3715  else
3716  {
3717  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3718  }
3719 }
3720 
3721 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3722 {
3723  if((pAllocationCallbacks != VMA_NULL) &&
3724  (pAllocationCallbacks->pfnFree != VMA_NULL))
3725  {
3726  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3727  }
3728  else
3729  {
3730  VMA_SYSTEM_FREE(ptr);
3731  }
3732 }
3733 
3734 template<typename T>
3735 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3736 {
3737  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3738 }
3739 
3740 template<typename T>
3741 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3742 {
3743  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3744 }
3745 
3746 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3747 
3748 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3749 
3750 template<typename T>
3751 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3752 {
3753  ptr->~T();
3754  VmaFree(pAllocationCallbacks, ptr);
3755 }
3756 
3757 template<typename T>
3758 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3759 {
3760  if(ptr != VMA_NULL)
3761  {
3762  for(size_t i = count; i--; )
3763  {
3764  ptr[i].~T();
3765  }
3766  VmaFree(pAllocationCallbacks, ptr);
3767  }
3768 }
3769 
3770 // STL-compatible allocator.
3771 template<typename T>
3772 class VmaStlAllocator
3773 {
3774 public:
3775  const VkAllocationCallbacks* const m_pCallbacks;
3776  typedef T value_type;
3777 
3778  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3779  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3780 
3781  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3782  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3783 
3784  template<typename U>
3785  bool operator==(const VmaStlAllocator<U>& rhs) const
3786  {
3787  return m_pCallbacks == rhs.m_pCallbacks;
3788  }
3789  template<typename U>
3790  bool operator!=(const VmaStlAllocator<U>& rhs) const
3791  {
3792  return m_pCallbacks != rhs.m_pCallbacks;
3793  }
3794 
3795  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3796 };
3797 
3798 #if VMA_USE_STL_VECTOR
3799 
3800 #define VmaVector std::vector
3801 
3802 template<typename T, typename allocatorT>
3803 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3804 {
3805  vec.insert(vec.begin() + index, item);
3806 }
3807 
3808 template<typename T, typename allocatorT>
3809 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3810 {
3811  vec.erase(vec.begin() + index);
3812 }
3813 
3814 #else // #if VMA_USE_STL_VECTOR
3815 
3816 /* Class with interface compatible with subset of std::vector.
3817 T must be POD because constructors and destructors are not called and memcpy is
3818 used for these objects. */
3819 template<typename T, typename AllocatorT>
3820 class VmaVector
3821 {
3822 public:
3823  typedef T value_type;
3824 
3825  VmaVector(const AllocatorT& allocator) :
3826  m_Allocator(allocator),
3827  m_pArray(VMA_NULL),
3828  m_Count(0),
3829  m_Capacity(0)
3830  {
3831  }
3832 
3833  VmaVector(size_t count, const AllocatorT& allocator) :
3834  m_Allocator(allocator),
3835  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3836  m_Count(count),
3837  m_Capacity(count)
3838  {
3839  }
3840 
3841  VmaVector(const VmaVector<T, AllocatorT>& src) :
3842  m_Allocator(src.m_Allocator),
3843  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3844  m_Count(src.m_Count),
3845  m_Capacity(src.m_Count)
3846  {
3847  if(m_Count != 0)
3848  {
3849  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3850  }
3851  }
3852 
3853  ~VmaVector()
3854  {
3855  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3856  }
3857 
3858  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3859  {
3860  if(&rhs != this)
3861  {
3862  resize(rhs.m_Count);
3863  if(m_Count != 0)
3864  {
3865  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3866  }
3867  }
3868  return *this;
3869  }
3870 
3871  bool empty() const { return m_Count == 0; }
3872  size_t size() const { return m_Count; }
3873  T* data() { return m_pArray; }
3874  const T* data() const { return m_pArray; }
3875 
3876  T& operator[](size_t index)
3877  {
3878  VMA_HEAVY_ASSERT(index < m_Count);
3879  return m_pArray[index];
3880  }
3881  const T& operator[](size_t index) const
3882  {
3883  VMA_HEAVY_ASSERT(index < m_Count);
3884  return m_pArray[index];
3885  }
3886 
3887  T& front()
3888  {
3889  VMA_HEAVY_ASSERT(m_Count > 0);
3890  return m_pArray[0];
3891  }
3892  const T& front() const
3893  {
3894  VMA_HEAVY_ASSERT(m_Count > 0);
3895  return m_pArray[0];
3896  }
3897  T& back()
3898  {
3899  VMA_HEAVY_ASSERT(m_Count > 0);
3900  return m_pArray[m_Count - 1];
3901  }
3902  const T& back() const
3903  {
3904  VMA_HEAVY_ASSERT(m_Count > 0);
3905  return m_pArray[m_Count - 1];
3906  }
3907 
3908  void reserve(size_t newCapacity, bool freeMemory = false)
3909  {
3910  newCapacity = VMA_MAX(newCapacity, m_Count);
3911 
3912  if((newCapacity < m_Capacity) && !freeMemory)
3913  {
3914  newCapacity = m_Capacity;
3915  }
3916 
3917  if(newCapacity != m_Capacity)
3918  {
3919  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
3920  if(m_Count != 0)
3921  {
3922  memcpy(newArray, m_pArray, m_Count * sizeof(T));
3923  }
3924  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3925  m_Capacity = newCapacity;
3926  m_pArray = newArray;
3927  }
3928  }
3929 
3930  void resize(size_t newCount, bool freeMemory = false)
3931  {
3932  size_t newCapacity = m_Capacity;
3933  if(newCount > m_Capacity)
3934  {
3935  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
3936  }
3937  else if(freeMemory)
3938  {
3939  newCapacity = newCount;
3940  }
3941 
3942  if(newCapacity != m_Capacity)
3943  {
3944  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
3945  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
3946  if(elementsToCopy != 0)
3947  {
3948  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
3949  }
3950  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3951  m_Capacity = newCapacity;
3952  m_pArray = newArray;
3953  }
3954 
3955  m_Count = newCount;
3956  }
3957 
3958  void clear(bool freeMemory = false)
3959  {
3960  resize(0, freeMemory);
3961  }
3962 
3963  void insert(size_t index, const T& src)
3964  {
3965  VMA_HEAVY_ASSERT(index <= m_Count);
3966  const size_t oldCount = size();
3967  resize(oldCount + 1);
3968  if(index < oldCount)
3969  {
3970  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
3971  }
3972  m_pArray[index] = src;
3973  }
3974 
3975  void remove(size_t index)
3976  {
3977  VMA_HEAVY_ASSERT(index < m_Count);
3978  const size_t oldCount = size();
3979  if(index < oldCount - 1)
3980  {
3981  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
3982  }
3983  resize(oldCount - 1);
3984  }
3985 
3986  void push_back(const T& src)
3987  {
3988  const size_t newIndex = size();
3989  resize(newIndex + 1);
3990  m_pArray[newIndex] = src;
3991  }
3992 
3993  void pop_back()
3994  {
3995  VMA_HEAVY_ASSERT(m_Count > 0);
3996  resize(size() - 1);
3997  }
3998 
3999  void push_front(const T& src)
4000  {
4001  insert(0, src);
4002  }
4003 
4004  void pop_front()
4005  {
4006  VMA_HEAVY_ASSERT(m_Count > 0);
4007  remove(0);
4008  }
4009 
4010  typedef T* iterator;
4011 
4012  iterator begin() { return m_pArray; }
4013  iterator end() { return m_pArray + m_Count; }
4014 
4015 private:
4016  AllocatorT m_Allocator;
4017  T* m_pArray;
4018  size_t m_Count;
4019  size_t m_Capacity;
4020 };
4021 
4022 template<typename T, typename allocatorT>
4023 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4024 {
4025  vec.insert(index, item);
4026 }
4027 
4028 template<typename T, typename allocatorT>
4029 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4030 {
4031  vec.remove(index);
4032 }
4033 
4034 #endif // #if VMA_USE_STL_VECTOR
4035 
4036 template<typename CmpLess, typename VectorT>
4037 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4038 {
4039  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4040  vector.data(),
4041  vector.data() + vector.size(),
4042  value,
4043  CmpLess()) - vector.data();
4044  VmaVectorInsert(vector, indexToInsert, value);
4045  return indexToInsert;
4046 }
4047 
4048 template<typename CmpLess, typename VectorT>
4049 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4050 {
4051  CmpLess comparator;
4052  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4053  vector.begin(),
4054  vector.end(),
4055  value,
4056  comparator);
4057  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4058  {
4059  size_t indexToRemove = it - vector.begin();
4060  VmaVectorRemove(vector, indexToRemove);
4061  return true;
4062  }
4063  return false;
4064 }
4065 
4066 template<typename CmpLess, typename IterT, typename KeyT>
4067 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
4068 {
4069  CmpLess comparator;
4070  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4071  beg, end, value, comparator);
4072  if(it == end ||
4073  (!comparator(*it, value) && !comparator(value, *it)))
4074  {
4075  return it;
4076  }
4077  return end;
4078 }
4079 
4081 // class VmaPoolAllocator
4082 
4083 /*
4084 Allocator for objects of type T using a list of arrays (pools) to speed up
4085 allocation. Number of elements that can be allocated is not bounded because
4086 allocator can create multiple blocks.
4087 */
4088 template<typename T>
4089 class VmaPoolAllocator
4090 {
4091  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4092 public:
4093  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
4094  ~VmaPoolAllocator();
4095  void Clear();
4096  T* Alloc();
4097  void Free(T* ptr);
4098 
4099 private:
4100  union Item
4101  {
4102  uint32_t NextFreeIndex;
4103  T Value;
4104  };
4105 
4106  struct ItemBlock
4107  {
4108  Item* pItems;
4109  uint32_t FirstFreeIndex;
4110  };
4111 
4112  const VkAllocationCallbacks* m_pAllocationCallbacks;
4113  size_t m_ItemsPerBlock;
4114  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4115 
4116  ItemBlock& CreateNewBlock();
4117 };
4118 
4119 template<typename T>
4120 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
4121  m_pAllocationCallbacks(pAllocationCallbacks),
4122  m_ItemsPerBlock(itemsPerBlock),
4123  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4124 {
4125  VMA_ASSERT(itemsPerBlock > 0);
4126 }
4127 
4128 template<typename T>
4129 VmaPoolAllocator<T>::~VmaPoolAllocator()
4130 {
4131  Clear();
4132 }
4133 
4134 template<typename T>
4135 void VmaPoolAllocator<T>::Clear()
4136 {
4137  for(size_t i = m_ItemBlocks.size(); i--; )
4138  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
4139  m_ItemBlocks.clear();
4140 }
4141 
4142 template<typename T>
4143 T* VmaPoolAllocator<T>::Alloc()
4144 {
4145  for(size_t i = m_ItemBlocks.size(); i--; )
4146  {
4147  ItemBlock& block = m_ItemBlocks[i];
4148  // This block has some free items: Use first one.
4149  if(block.FirstFreeIndex != UINT32_MAX)
4150  {
4151  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4152  block.FirstFreeIndex = pItem->NextFreeIndex;
4153  return &pItem->Value;
4154  }
4155  }
4156 
4157  // No block has free item: Create new one and use it.
4158  ItemBlock& newBlock = CreateNewBlock();
4159  Item* const pItem = &newBlock.pItems[0];
4160  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4161  return &pItem->Value;
4162 }
4163 
4164 template<typename T>
4165 void VmaPoolAllocator<T>::Free(T* ptr)
4166 {
4167  // Search all memory blocks to find ptr.
4168  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
4169  {
4170  ItemBlock& block = m_ItemBlocks[i];
4171 
4172  // Casting to union.
4173  Item* pItemPtr;
4174  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4175 
4176  // Check if pItemPtr is in address range of this block.
4177  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
4178  {
4179  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4180  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4181  block.FirstFreeIndex = index;
4182  return;
4183  }
4184  }
4185  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4186 }
4187 
4188 template<typename T>
4189 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4190 {
4191  ItemBlock newBlock = {
4192  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
4193 
4194  m_ItemBlocks.push_back(newBlock);
4195 
4196  // Setup singly-linked list of all free items in this block.
4197  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
4198  newBlock.pItems[i].NextFreeIndex = i + 1;
4199  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
4200  return m_ItemBlocks.back();
4201 }
4202 
4204 // class VmaRawList, VmaList
4205 
4206 #if VMA_USE_STL_LIST
4207 
4208 #define VmaList std::list
4209 
4210 #else // #if VMA_USE_STL_LIST
4211 
4212 template<typename T>
4213 struct VmaListItem
4214 {
4215  VmaListItem* pPrev;
4216  VmaListItem* pNext;
4217  T Value;
4218 };
4219 
4220 // Doubly linked list.
4221 template<typename T>
4222 class VmaRawList
4223 {
4224  VMA_CLASS_NO_COPY(VmaRawList)
4225 public:
4226  typedef VmaListItem<T> ItemType;
4227 
4228  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4229  ~VmaRawList();
4230  void Clear();
4231 
4232  size_t GetCount() const { return m_Count; }
4233  bool IsEmpty() const { return m_Count == 0; }
4234 
4235  ItemType* Front() { return m_pFront; }
4236  const ItemType* Front() const { return m_pFront; }
4237  ItemType* Back() { return m_pBack; }
4238  const ItemType* Back() const { return m_pBack; }
4239 
4240  ItemType* PushBack();
4241  ItemType* PushFront();
4242  ItemType* PushBack(const T& value);
4243  ItemType* PushFront(const T& value);
4244  void PopBack();
4245  void PopFront();
4246 
4247  // Item can be null - it means PushBack.
4248  ItemType* InsertBefore(ItemType* pItem);
4249  // Item can be null - it means PushFront.
4250  ItemType* InsertAfter(ItemType* pItem);
4251 
4252  ItemType* InsertBefore(ItemType* pItem, const T& value);
4253  ItemType* InsertAfter(ItemType* pItem, const T& value);
4254 
4255  void Remove(ItemType* pItem);
4256 
4257 private:
4258  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4259  VmaPoolAllocator<ItemType> m_ItemAllocator;
4260  ItemType* m_pFront;
4261  ItemType* m_pBack;
4262  size_t m_Count;
4263 };
4264 
4265 template<typename T>
4266 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4267  m_pAllocationCallbacks(pAllocationCallbacks),
4268  m_ItemAllocator(pAllocationCallbacks, 128),
4269  m_pFront(VMA_NULL),
4270  m_pBack(VMA_NULL),
4271  m_Count(0)
4272 {
4273 }
4274 
4275 template<typename T>
4276 VmaRawList<T>::~VmaRawList()
4277 {
4278  // Intentionally not calling Clear, because that would be unnecessary
4279  // computations to return all items to m_ItemAllocator as free.
4280 }
4281 
4282 template<typename T>
4283 void VmaRawList<T>::Clear()
4284 {
4285  if(IsEmpty() == false)
4286  {
4287  ItemType* pItem = m_pBack;
4288  while(pItem != VMA_NULL)
4289  {
4290  ItemType* const pPrevItem = pItem->pPrev;
4291  m_ItemAllocator.Free(pItem);
4292  pItem = pPrevItem;
4293  }
4294  m_pFront = VMA_NULL;
4295  m_pBack = VMA_NULL;
4296  m_Count = 0;
4297  }
4298 }
4299 
4300 template<typename T>
4301 VmaListItem<T>* VmaRawList<T>::PushBack()
4302 {
4303  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4304  pNewItem->pNext = VMA_NULL;
4305  if(IsEmpty())
4306  {
4307  pNewItem->pPrev = VMA_NULL;
4308  m_pFront = pNewItem;
4309  m_pBack = pNewItem;
4310  m_Count = 1;
4311  }
4312  else
4313  {
4314  pNewItem->pPrev = m_pBack;
4315  m_pBack->pNext = pNewItem;
4316  m_pBack = pNewItem;
4317  ++m_Count;
4318  }
4319  return pNewItem;
4320 }
4321 
4322 template<typename T>
4323 VmaListItem<T>* VmaRawList<T>::PushFront()
4324 {
4325  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4326  pNewItem->pPrev = VMA_NULL;
4327  if(IsEmpty())
4328  {
4329  pNewItem->pNext = VMA_NULL;
4330  m_pFront = pNewItem;
4331  m_pBack = pNewItem;
4332  m_Count = 1;
4333  }
4334  else
4335  {
4336  pNewItem->pNext = m_pFront;
4337  m_pFront->pPrev = pNewItem;
4338  m_pFront = pNewItem;
4339  ++m_Count;
4340  }
4341  return pNewItem;
4342 }
4343 
4344 template<typename T>
4345 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4346 {
4347  ItemType* const pNewItem = PushBack();
4348  pNewItem->Value = value;
4349  return pNewItem;
4350 }
4351 
4352 template<typename T>
4353 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4354 {
4355  ItemType* const pNewItem = PushFront();
4356  pNewItem->Value = value;
4357  return pNewItem;
4358 }
4359 
4360 template<typename T>
4361 void VmaRawList<T>::PopBack()
4362 {
4363  VMA_HEAVY_ASSERT(m_Count > 0);
4364  ItemType* const pBackItem = m_pBack;
4365  ItemType* const pPrevItem = pBackItem->pPrev;
4366  if(pPrevItem != VMA_NULL)
4367  {
4368  pPrevItem->pNext = VMA_NULL;
4369  }
4370  m_pBack = pPrevItem;
4371  m_ItemAllocator.Free(pBackItem);
4372  --m_Count;
4373 }
4374 
4375 template<typename T>
4376 void VmaRawList<T>::PopFront()
4377 {
4378  VMA_HEAVY_ASSERT(m_Count > 0);
4379  ItemType* const pFrontItem = m_pFront;
4380  ItemType* const pNextItem = pFrontItem->pNext;
4381  if(pNextItem != VMA_NULL)
4382  {
4383  pNextItem->pPrev = VMA_NULL;
4384  }
4385  m_pFront = pNextItem;
4386  m_ItemAllocator.Free(pFrontItem);
4387  --m_Count;
4388 }
4389 
4390 template<typename T>
4391 void VmaRawList<T>::Remove(ItemType* pItem)
4392 {
4393  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4394  VMA_HEAVY_ASSERT(m_Count > 0);
4395 
4396  if(pItem->pPrev != VMA_NULL)
4397  {
4398  pItem->pPrev->pNext = pItem->pNext;
4399  }
4400  else
4401  {
4402  VMA_HEAVY_ASSERT(m_pFront == pItem);
4403  m_pFront = pItem->pNext;
4404  }
4405 
4406  if(pItem->pNext != VMA_NULL)
4407  {
4408  pItem->pNext->pPrev = pItem->pPrev;
4409  }
4410  else
4411  {
4412  VMA_HEAVY_ASSERT(m_pBack == pItem);
4413  m_pBack = pItem->pPrev;
4414  }
4415 
4416  m_ItemAllocator.Free(pItem);
4417  --m_Count;
4418 }
4419 
4420 template<typename T>
4421 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4422 {
4423  if(pItem != VMA_NULL)
4424  {
4425  ItemType* const prevItem = pItem->pPrev;
4426  ItemType* const newItem = m_ItemAllocator.Alloc();
4427  newItem->pPrev = prevItem;
4428  newItem->pNext = pItem;
4429  pItem->pPrev = newItem;
4430  if(prevItem != VMA_NULL)
4431  {
4432  prevItem->pNext = newItem;
4433  }
4434  else
4435  {
4436  VMA_HEAVY_ASSERT(m_pFront == pItem);
4437  m_pFront = newItem;
4438  }
4439  ++m_Count;
4440  return newItem;
4441  }
4442  else
4443  return PushBack();
4444 }
4445 
4446 template<typename T>
4447 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4448 {
4449  if(pItem != VMA_NULL)
4450  {
4451  ItemType* const nextItem = pItem->pNext;
4452  ItemType* const newItem = m_ItemAllocator.Alloc();
4453  newItem->pNext = nextItem;
4454  newItem->pPrev = pItem;
4455  pItem->pNext = newItem;
4456  if(nextItem != VMA_NULL)
4457  {
4458  nextItem->pPrev = newItem;
4459  }
4460  else
4461  {
4462  VMA_HEAVY_ASSERT(m_pBack == pItem);
4463  m_pBack = newItem;
4464  }
4465  ++m_Count;
4466  return newItem;
4467  }
4468  else
4469  return PushFront();
4470 }
4471 
4472 template<typename T>
4473 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4474 {
4475  ItemType* const newItem = InsertBefore(pItem);
4476  newItem->Value = value;
4477  return newItem;
4478 }
4479 
4480 template<typename T>
4481 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4482 {
4483  ItemType* const newItem = InsertAfter(pItem);
4484  newItem->Value = value;
4485  return newItem;
4486 }
4487 
4488 template<typename T, typename AllocatorT>
4489 class VmaList
4490 {
4491  VMA_CLASS_NO_COPY(VmaList)
4492 public:
4493  class iterator
4494  {
4495  public:
4496  iterator() :
4497  m_pList(VMA_NULL),
4498  m_pItem(VMA_NULL)
4499  {
4500  }
4501 
4502  T& operator*() const
4503  {
4504  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4505  return m_pItem->Value;
4506  }
4507  T* operator->() const
4508  {
4509  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4510  return &m_pItem->Value;
4511  }
4512 
4513  iterator& operator++()
4514  {
4515  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4516  m_pItem = m_pItem->pNext;
4517  return *this;
4518  }
4519  iterator& operator--()
4520  {
4521  if(m_pItem != VMA_NULL)
4522  {
4523  m_pItem = m_pItem->pPrev;
4524  }
4525  else
4526  {
4527  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4528  m_pItem = m_pList->Back();
4529  }
4530  return *this;
4531  }
4532 
4533  iterator operator++(int)
4534  {
4535  iterator result = *this;
4536  ++*this;
4537  return result;
4538  }
4539  iterator operator--(int)
4540  {
4541  iterator result = *this;
4542  --*this;
4543  return result;
4544  }
4545 
4546  bool operator==(const iterator& rhs) const
4547  {
4548  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4549  return m_pItem == rhs.m_pItem;
4550  }
4551  bool operator!=(const iterator& rhs) const
4552  {
4553  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4554  return m_pItem != rhs.m_pItem;
4555  }
4556 
4557  private:
4558  VmaRawList<T>* m_pList;
4559  VmaListItem<T>* m_pItem;
4560 
4561  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4562  m_pList(pList),
4563  m_pItem(pItem)
4564  {
4565  }
4566 
4567  friend class VmaList<T, AllocatorT>;
4568  };
4569 
4570  class const_iterator
4571  {
4572  public:
4573  const_iterator() :
4574  m_pList(VMA_NULL),
4575  m_pItem(VMA_NULL)
4576  {
4577  }
4578 
4579  const_iterator(const iterator& src) :
4580  m_pList(src.m_pList),
4581  m_pItem(src.m_pItem)
4582  {
4583  }
4584 
4585  const T& operator*() const
4586  {
4587  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4588  return m_pItem->Value;
4589  }
4590  const T* operator->() const
4591  {
4592  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4593  return &m_pItem->Value;
4594  }
4595 
4596  const_iterator& operator++()
4597  {
4598  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4599  m_pItem = m_pItem->pNext;
4600  return *this;
4601  }
4602  const_iterator& operator--()
4603  {
4604  if(m_pItem != VMA_NULL)
4605  {
4606  m_pItem = m_pItem->pPrev;
4607  }
4608  else
4609  {
4610  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4611  m_pItem = m_pList->Back();
4612  }
4613  return *this;
4614  }
4615 
4616  const_iterator operator++(int)
4617  {
4618  const_iterator result = *this;
4619  ++*this;
4620  return result;
4621  }
4622  const_iterator operator--(int)
4623  {
4624  const_iterator result = *this;
4625  --*this;
4626  return result;
4627  }
4628 
4629  bool operator==(const const_iterator& rhs) const
4630  {
4631  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4632  return m_pItem == rhs.m_pItem;
4633  }
4634  bool operator!=(const const_iterator& rhs) const
4635  {
4636  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4637  return m_pItem != rhs.m_pItem;
4638  }
4639 
4640  private:
4641  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4642  m_pList(pList),
4643  m_pItem(pItem)
4644  {
4645  }
4646 
4647  const VmaRawList<T>* m_pList;
4648  const VmaListItem<T>* m_pItem;
4649 
4650  friend class VmaList<T, AllocatorT>;
4651  };
4652 
4653  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4654 
4655  bool empty() const { return m_RawList.IsEmpty(); }
4656  size_t size() const { return m_RawList.GetCount(); }
4657 
4658  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4659  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4660 
4661  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4662  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4663 
4664  void clear() { m_RawList.Clear(); }
4665  void push_back(const T& value) { m_RawList.PushBack(value); }
4666  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4667  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4668 
4669 private:
4670  VmaRawList<T> m_RawList;
4671 };
4672 
4673 #endif // #if VMA_USE_STL_LIST
4674 
4676 // class VmaMap
4677 
4678 // Unused in this version.
4679 #if 0
4680 
4681 #if VMA_USE_STL_UNORDERED_MAP
4682 
4683 #define VmaPair std::pair
4684 
4685 #define VMA_MAP_TYPE(KeyT, ValueT) \
4686  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4687 
4688 #else // #if VMA_USE_STL_UNORDERED_MAP
4689 
4690 template<typename T1, typename T2>
4691 struct VmaPair
4692 {
4693  T1 first;
4694  T2 second;
4695 
4696  VmaPair() : first(), second() { }
4697  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4698 };
4699 
4700 /* Class compatible with subset of interface of std::unordered_map.
4701 KeyT, ValueT must be POD because they will be stored in VmaVector.
4702 */
4703 template<typename KeyT, typename ValueT>
4704 class VmaMap
4705 {
4706 public:
4707  typedef VmaPair<KeyT, ValueT> PairType;
4708  typedef PairType* iterator;
4709 
4710  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4711 
4712  iterator begin() { return m_Vector.begin(); }
4713  iterator end() { return m_Vector.end(); }
4714 
4715  void insert(const PairType& pair);
4716  iterator find(const KeyT& key);
4717  void erase(iterator it);
4718 
4719 private:
4720  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4721 };
4722 
4723 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4724 
4725 template<typename FirstT, typename SecondT>
4726 struct VmaPairFirstLess
4727 {
4728  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4729  {
4730  return lhs.first < rhs.first;
4731  }
4732  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4733  {
4734  return lhs.first < rhsFirst;
4735  }
4736 };
4737 
4738 template<typename KeyT, typename ValueT>
4739 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4740 {
4741  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4742  m_Vector.data(),
4743  m_Vector.data() + m_Vector.size(),
4744  pair,
4745  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4746  VmaVectorInsert(m_Vector, indexToInsert, pair);
4747 }
4748 
4749 template<typename KeyT, typename ValueT>
4750 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4751 {
4752  PairType* it = VmaBinaryFindFirstNotLess(
4753  m_Vector.data(),
4754  m_Vector.data() + m_Vector.size(),
4755  key,
4756  VmaPairFirstLess<KeyT, ValueT>());
4757  if((it != m_Vector.end()) && (it->first == key))
4758  {
4759  return it;
4760  }
4761  else
4762  {
4763  return m_Vector.end();
4764  }
4765 }
4766 
4767 template<typename KeyT, typename ValueT>
4768 void VmaMap<KeyT, ValueT>::erase(iterator it)
4769 {
4770  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4771 }
4772 
4773 #endif // #if VMA_USE_STL_UNORDERED_MAP
4774 
4775 #endif // #if 0
4776 
4778 
4779 class VmaDeviceMemoryBlock;
4780 
4781 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4782 
4783 struct VmaAllocation_T
4784 {
4785  VMA_CLASS_NO_COPY(VmaAllocation_T)
4786 private:
4787  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4788 
4789  enum FLAGS
4790  {
4791  FLAG_USER_DATA_STRING = 0x01,
4792  };
4793 
4794 public:
4795  enum ALLOCATION_TYPE
4796  {
4797  ALLOCATION_TYPE_NONE,
4798  ALLOCATION_TYPE_BLOCK,
4799  ALLOCATION_TYPE_DEDICATED,
4800  };
4801 
4802  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4803  m_Alignment(1),
4804  m_Size(0),
4805  m_pUserData(VMA_NULL),
4806  m_LastUseFrameIndex(currentFrameIndex),
4807  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4808  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4809  m_MapCount(0),
4810  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4811  {
4812 #if VMA_STATS_STRING_ENABLED
4813  m_CreationFrameIndex = currentFrameIndex;
4814  m_BufferImageUsage = 0;
4815 #endif
4816  }
4817 
4818  ~VmaAllocation_T()
4819  {
4820  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4821 
4822  // Check if owned string was freed.
4823  VMA_ASSERT(m_pUserData == VMA_NULL);
4824  }
4825 
4826  void InitBlockAllocation(
4827  VmaPool hPool,
4828  VmaDeviceMemoryBlock* block,
4829  VkDeviceSize offset,
4830  VkDeviceSize alignment,
4831  VkDeviceSize size,
4832  VmaSuballocationType suballocationType,
4833  bool mapped,
4834  bool canBecomeLost)
4835  {
4836  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4837  VMA_ASSERT(block != VMA_NULL);
4838  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4839  m_Alignment = alignment;
4840  m_Size = size;
4841  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4842  m_SuballocationType = (uint8_t)suballocationType;
4843  m_BlockAllocation.m_hPool = hPool;
4844  m_BlockAllocation.m_Block = block;
4845  m_BlockAllocation.m_Offset = offset;
4846  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4847  }
4848 
4849  void InitLost()
4850  {
4851  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4852  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4853  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4854  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4855  m_BlockAllocation.m_Block = VMA_NULL;
4856  m_BlockAllocation.m_Offset = 0;
4857  m_BlockAllocation.m_CanBecomeLost = true;
4858  }
4859 
4860  void ChangeBlockAllocation(
4861  VmaAllocator hAllocator,
4862  VmaDeviceMemoryBlock* block,
4863  VkDeviceSize offset);
4864 
4865  void ChangeSize(VkDeviceSize newSize);
4866  void ChangeOffset(VkDeviceSize newOffset);
4867 
4868  // pMappedData not null means allocation is created with MAPPED flag.
4869  void InitDedicatedAllocation(
4870  uint32_t memoryTypeIndex,
4871  VkDeviceMemory hMemory,
4872  VmaSuballocationType suballocationType,
4873  void* pMappedData,
4874  VkDeviceSize size)
4875  {
4876  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4877  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4878  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4879  m_Alignment = 0;
4880  m_Size = size;
4881  m_SuballocationType = (uint8_t)suballocationType;
4882  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4883  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4884  m_DedicatedAllocation.m_hMemory = hMemory;
4885  m_DedicatedAllocation.m_pMappedData = pMappedData;
4886  }
4887 
4888  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4889  VkDeviceSize GetAlignment() const { return m_Alignment; }
4890  VkDeviceSize GetSize() const { return m_Size; }
4891  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4892  void* GetUserData() const { return m_pUserData; }
4893  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4894  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4895 
4896  VmaDeviceMemoryBlock* GetBlock() const
4897  {
4898  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4899  return m_BlockAllocation.m_Block;
4900  }
4901  VkDeviceSize GetOffset() const;
4902  VkDeviceMemory GetMemory() const;
4903  uint32_t GetMemoryTypeIndex() const;
4904  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4905  void* GetMappedData() const;
4906  bool CanBecomeLost() const;
4907  VmaPool GetPool() const;
4908 
4909  uint32_t GetLastUseFrameIndex() const
4910  {
4911  return m_LastUseFrameIndex.load();
4912  }
4913  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
4914  {
4915  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
4916  }
4917  /*
4918  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
4919  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
4920  - Else, returns false.
4921 
4922  If hAllocation is already lost, assert - you should not call it then.
4923  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
4924  */
4925  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4926 
4927  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
4928  {
4929  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4930  outInfo.blockCount = 1;
4931  outInfo.allocationCount = 1;
4932  outInfo.unusedRangeCount = 0;
4933  outInfo.usedBytes = m_Size;
4934  outInfo.unusedBytes = 0;
4935  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
4936  outInfo.unusedRangeSizeMin = UINT64_MAX;
4937  outInfo.unusedRangeSizeMax = 0;
4938  }
4939 
4940  void BlockAllocMap();
4941  void BlockAllocUnmap();
4942  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
4943  void DedicatedAllocUnmap(VmaAllocator hAllocator);
4944 
4945 #if VMA_STATS_STRING_ENABLED
4946  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
4947  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
4948 
4949  void InitBufferImageUsage(uint32_t bufferImageUsage)
4950  {
4951  VMA_ASSERT(m_BufferImageUsage == 0);
4952  m_BufferImageUsage = bufferImageUsage;
4953  }
4954 
4955  void PrintParameters(class VmaJsonWriter& json) const;
4956 #endif
4957 
4958 private:
4959  VkDeviceSize m_Alignment;
4960  VkDeviceSize m_Size;
4961  void* m_pUserData;
4962  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
4963  uint8_t m_Type; // ALLOCATION_TYPE
4964  uint8_t m_SuballocationType; // VmaSuballocationType
4965  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
4966  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
4967  uint8_t m_MapCount;
4968  uint8_t m_Flags; // enum FLAGS
4969 
4970  // Allocation out of VmaDeviceMemoryBlock.
4971  struct BlockAllocation
4972  {
4973  VmaPool m_hPool; // Null if belongs to general memory.
4974  VmaDeviceMemoryBlock* m_Block;
4975  VkDeviceSize m_Offset;
4976  bool m_CanBecomeLost;
4977  };
4978 
4979  // Allocation for an object that has its own private VkDeviceMemory.
4980  struct DedicatedAllocation
4981  {
4982  uint32_t m_MemoryTypeIndex;
4983  VkDeviceMemory m_hMemory;
4984  void* m_pMappedData; // Not null means memory is mapped.
4985  };
4986 
4987  union
4988  {
4989  // Allocation out of VmaDeviceMemoryBlock.
4990  BlockAllocation m_BlockAllocation;
4991  // Allocation for an object that has its own private VkDeviceMemory.
4992  DedicatedAllocation m_DedicatedAllocation;
4993  };
4994 
4995 #if VMA_STATS_STRING_ENABLED
4996  uint32_t m_CreationFrameIndex;
4997  uint32_t m_BufferImageUsage; // 0 if unknown.
4998 #endif
4999 
5000  void FreeUserDataString(VmaAllocator hAllocator);
5001 };
5002 
5003 /*
5004 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5005 allocated memory block or free.
5006 */
5007 struct VmaSuballocation
5008 {
5009  VkDeviceSize offset;
5010  VkDeviceSize size;
5011  VmaAllocation hAllocation;
5012  VmaSuballocationType type;
5013 };
5014 
5015 // Comparator for offsets.
5016 struct VmaSuballocationOffsetLess
5017 {
5018  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5019  {
5020  return lhs.offset < rhs.offset;
5021  }
5022 };
5023 struct VmaSuballocationOffsetGreater
5024 {
5025  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5026  {
5027  return lhs.offset > rhs.offset;
5028  }
5029 };
5030 
5031 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5032 
5033 // Cost of one additional allocation lost, as equivalent in bytes.
5034 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5035 
5036 /*
5037 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5038 
5039 If canMakeOtherLost was false:
5040 - item points to a FREE suballocation.
5041 - itemsToMakeLostCount is 0.
5042 
5043 If canMakeOtherLost was true:
5044 - item points to first of sequence of suballocations, which are either FREE,
5045  or point to VmaAllocations that can become lost.
5046 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5047  the requested allocation to succeed.
5048 */
5049 struct VmaAllocationRequest
5050 {
5051  VkDeviceSize offset;
5052  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5053  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5054  VmaSuballocationList::iterator item;
5055  size_t itemsToMakeLostCount;
5056  void* customData;
5057 
5058  VkDeviceSize CalcCost() const
5059  {
5060  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5061  }
5062 };
5063 
5064 /*
5065 Data structure used for bookkeeping of allocations and unused ranges of memory
5066 in a single VkDeviceMemory block.
5067 */
5068 class VmaBlockMetadata
5069 {
5070 public:
5071  VmaBlockMetadata(VmaAllocator hAllocator);
5072  virtual ~VmaBlockMetadata() { }
5073  virtual void Init(VkDeviceSize size) { m_Size = size; }
5074 
5075  // Validates all data structures inside this object. If not valid, returns false.
5076  virtual bool Validate() const = 0;
5077  VkDeviceSize GetSize() const { return m_Size; }
5078  virtual size_t GetAllocationCount() const = 0;
5079  virtual VkDeviceSize GetSumFreeSize() const = 0;
5080  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5081  // Returns true if this block is empty - contains only single free suballocation.
5082  virtual bool IsEmpty() const = 0;
5083 
5084  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5085  // Shouldn't modify blockCount.
5086  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5087 
5088 #if VMA_STATS_STRING_ENABLED
5089  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5090 #endif
5091 
5092  // Tries to find a place for suballocation with given parameters inside this block.
5093  // If succeeded, fills pAllocationRequest and returns true.
5094  // If failed, returns false.
5095  virtual bool CreateAllocationRequest(
5096  uint32_t currentFrameIndex,
5097  uint32_t frameInUseCount,
5098  VkDeviceSize bufferImageGranularity,
5099  VkDeviceSize allocSize,
5100  VkDeviceSize allocAlignment,
5101  bool upperAddress,
5102  VmaSuballocationType allocType,
5103  bool canMakeOtherLost,
5104  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5105  uint32_t strategy,
5106  VmaAllocationRequest* pAllocationRequest) = 0;
5107 
5108  virtual bool MakeRequestedAllocationsLost(
5109  uint32_t currentFrameIndex,
5110  uint32_t frameInUseCount,
5111  VmaAllocationRequest* pAllocationRequest) = 0;
5112 
5113  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5114 
5115  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5116 
5117  // Makes actual allocation based on request. Request must already be checked and valid.
5118  virtual void Alloc(
5119  const VmaAllocationRequest& request,
5120  VmaSuballocationType type,
5121  VkDeviceSize allocSize,
5122  bool upperAddress,
5123  VmaAllocation hAllocation) = 0;
5124 
5125  // Frees suballocation assigned to given memory region.
5126  virtual void Free(const VmaAllocation allocation) = 0;
5127  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5128 
5129  // Tries to resize (grow or shrink) space for given allocation, in place.
5130  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
5131 
5132 protected:
5133  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5134 
5135 #if VMA_STATS_STRING_ENABLED
5136  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5137  VkDeviceSize unusedBytes,
5138  size_t allocationCount,
5139  size_t unusedRangeCount) const;
5140  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5141  VkDeviceSize offset,
5142  VmaAllocation hAllocation) const;
5143  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5144  VkDeviceSize offset,
5145  VkDeviceSize size) const;
5146  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5147 #endif
5148 
5149 private:
5150  VkDeviceSize m_Size;
5151  const VkAllocationCallbacks* m_pAllocationCallbacks;
5152 };
5153 
5154 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5155  VMA_ASSERT(0 && "Validation failed: " #cond); \
5156  return false; \
5157  } } while(false)
5158 
5159 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5160 {
5161  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5162 public:
5163  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5164  virtual ~VmaBlockMetadata_Generic();
5165  virtual void Init(VkDeviceSize size);
5166 
5167  virtual bool Validate() const;
5168  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5169  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5170  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5171  virtual bool IsEmpty() const;
5172 
5173  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5174  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5175 
5176 #if VMA_STATS_STRING_ENABLED
5177  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5178 #endif
5179 
5180  virtual bool CreateAllocationRequest(
5181  uint32_t currentFrameIndex,
5182  uint32_t frameInUseCount,
5183  VkDeviceSize bufferImageGranularity,
5184  VkDeviceSize allocSize,
5185  VkDeviceSize allocAlignment,
5186  bool upperAddress,
5187  VmaSuballocationType allocType,
5188  bool canMakeOtherLost,
5189  uint32_t strategy,
5190  VmaAllocationRequest* pAllocationRequest);
5191 
5192  virtual bool MakeRequestedAllocationsLost(
5193  uint32_t currentFrameIndex,
5194  uint32_t frameInUseCount,
5195  VmaAllocationRequest* pAllocationRequest);
5196 
5197  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5198 
5199  virtual VkResult CheckCorruption(const void* pBlockData);
5200 
5201  virtual void Alloc(
5202  const VmaAllocationRequest& request,
5203  VmaSuballocationType type,
5204  VkDeviceSize allocSize,
5205  bool upperAddress,
5206  VmaAllocation hAllocation);
5207 
5208  virtual void Free(const VmaAllocation allocation);
5209  virtual void FreeAtOffset(VkDeviceSize offset);
5210 
5211  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
5212 
5214  // For defragmentation
5215 
5216  bool IsBufferImageGranularityConflictPossible(
5217  VkDeviceSize bufferImageGranularity,
5218  VmaSuballocationType& inOutPrevSuballocType) const;
5219 
5220 private:
5221  friend class VmaDefragmentationAlgorithm_Generic;
5222  friend class VmaDefragmentationAlgorithm_Fast;
5223 
5224  uint32_t m_FreeCount;
5225  VkDeviceSize m_SumFreeSize;
5226  VmaSuballocationList m_Suballocations;
5227  // Suballocations that are free and have size greater than certain threshold.
5228  // Sorted by size, ascending.
5229  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5230 
5231  bool ValidateFreeSuballocationList() const;
5232 
5233  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5234  // If yes, fills pOffset and returns true. If no, returns false.
5235  bool CheckAllocation(
5236  uint32_t currentFrameIndex,
5237  uint32_t frameInUseCount,
5238  VkDeviceSize bufferImageGranularity,
5239  VkDeviceSize allocSize,
5240  VkDeviceSize allocAlignment,
5241  VmaSuballocationType allocType,
5242  VmaSuballocationList::const_iterator suballocItem,
5243  bool canMakeOtherLost,
5244  VkDeviceSize* pOffset,
5245  size_t* itemsToMakeLostCount,
5246  VkDeviceSize* pSumFreeSize,
5247  VkDeviceSize* pSumItemSize) const;
5248  // Given free suballocation, it merges it with following one, which must also be free.
5249  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5250  // Releases given suballocation, making it free.
5251  // Merges it with adjacent free suballocations if applicable.
5252  // Returns iterator to new free suballocation at this place.
5253  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5254  // Given free suballocation, it inserts it into sorted list of
5255  // m_FreeSuballocationsBySize if it's suitable.
5256  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5257  // Given free suballocation, it removes it from sorted list of
5258  // m_FreeSuballocationsBySize if it's suitable.
5259  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5260 };
5261 
5262 /*
5263 Allocations and their references in internal data structure look like this:
5264 
5265 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5266 
5267  0 +-------+
5268  | |
5269  | |
5270  | |
5271  +-------+
5272  | Alloc | 1st[m_1stNullItemsBeginCount]
5273  +-------+
5274  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5275  +-------+
5276  | ... |
5277  +-------+
5278  | Alloc | 1st[1st.size() - 1]
5279  +-------+
5280  | |
5281  | |
5282  | |
5283 GetSize() +-------+
5284 
5285 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5286 
5287  0 +-------+
5288  | Alloc | 2nd[0]
5289  +-------+
5290  | Alloc | 2nd[1]
5291  +-------+
5292  | ... |
5293  +-------+
5294  | Alloc | 2nd[2nd.size() - 1]
5295  +-------+
5296  | |
5297  | |
5298  | |
5299  +-------+
5300  | Alloc | 1st[m_1stNullItemsBeginCount]
5301  +-------+
5302  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5303  +-------+
5304  | ... |
5305  +-------+
5306  | Alloc | 1st[1st.size() - 1]
5307  +-------+
5308  | |
5309 GetSize() +-------+
5310 
5311 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5312 
5313  0 +-------+
5314  | |
5315  | |
5316  | |
5317  +-------+
5318  | Alloc | 1st[m_1stNullItemsBeginCount]
5319  +-------+
5320  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5321  +-------+
5322  | ... |
5323  +-------+
5324  | Alloc | 1st[1st.size() - 1]
5325  +-------+
5326  | |
5327  | |
5328  | |
5329  +-------+
5330  | Alloc | 2nd[2nd.size() - 1]
5331  +-------+
5332  | ... |
5333  +-------+
5334  | Alloc | 2nd[1]
5335  +-------+
5336  | Alloc | 2nd[0]
5337 GetSize() +-------+
5338 
5339 */
5340 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5341 {
5342  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5343 public:
5344  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5345  virtual ~VmaBlockMetadata_Linear();
5346  virtual void Init(VkDeviceSize size);
5347 
5348  virtual bool Validate() const;
5349  virtual size_t GetAllocationCount() const;
5350  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5351  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5352  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5353 
5354  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5355  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5356 
5357 #if VMA_STATS_STRING_ENABLED
5358  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5359 #endif
5360 
5361  virtual bool CreateAllocationRequest(
5362  uint32_t currentFrameIndex,
5363  uint32_t frameInUseCount,
5364  VkDeviceSize bufferImageGranularity,
5365  VkDeviceSize allocSize,
5366  VkDeviceSize allocAlignment,
5367  bool upperAddress,
5368  VmaSuballocationType allocType,
5369  bool canMakeOtherLost,
5370  uint32_t strategy,
5371  VmaAllocationRequest* pAllocationRequest);
5372 
5373  virtual bool MakeRequestedAllocationsLost(
5374  uint32_t currentFrameIndex,
5375  uint32_t frameInUseCount,
5376  VmaAllocationRequest* pAllocationRequest);
5377 
5378  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5379 
5380  virtual VkResult CheckCorruption(const void* pBlockData);
5381 
5382  virtual void Alloc(
5383  const VmaAllocationRequest& request,
5384  VmaSuballocationType type,
5385  VkDeviceSize allocSize,
5386  bool upperAddress,
5387  VmaAllocation hAllocation);
5388 
5389  virtual void Free(const VmaAllocation allocation);
5390  virtual void FreeAtOffset(VkDeviceSize offset);
5391 
5392 private:
5393  /*
5394  There are two suballocation vectors, used in ping-pong way.
5395  The one with index m_1stVectorIndex is called 1st.
5396  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5397  2nd can be non-empty only when 1st is not empty.
5398  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5399  */
5400  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5401 
5402  enum SECOND_VECTOR_MODE
5403  {
5404  SECOND_VECTOR_EMPTY,
5405  /*
5406  Suballocations in 2nd vector are created later than the ones in 1st, but they
5407  all have smaller offset.
5408  */
5409  SECOND_VECTOR_RING_BUFFER,
5410  /*
5411  Suballocations in 2nd vector are upper side of double stack.
5412  They all have offsets higher than those in 1st vector.
5413  Top of this stack means smaller offsets, but higher indices in this vector.
5414  */
5415  SECOND_VECTOR_DOUBLE_STACK,
5416  };
5417 
5418  VkDeviceSize m_SumFreeSize;
5419  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5420  uint32_t m_1stVectorIndex;
5421  SECOND_VECTOR_MODE m_2ndVectorMode;
5422 
5423  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5424  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5425  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5426  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5427 
5428  // Number of items in 1st vector with hAllocation = null at the beginning.
5429  size_t m_1stNullItemsBeginCount;
5430  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5431  size_t m_1stNullItemsMiddleCount;
5432  // Number of items in 2nd vector with hAllocation = null.
5433  size_t m_2ndNullItemsCount;
5434 
5435  bool ShouldCompact1st() const;
5436  void CleanupAfterFree();
5437 };
5438 
5439 /*
5440 - GetSize() is the original size of allocated memory block.
5441 - m_UsableSize is this size aligned down to a power of two.
5442  All allocations and calculations happen relative to m_UsableSize.
5443 - GetUnusableSize() is the difference between them.
5444  It is repoted as separate, unused range, not available for allocations.
5445 
5446 Node at level 0 has size = m_UsableSize.
5447 Each next level contains nodes with size 2 times smaller than current level.
5448 m_LevelCount is the maximum number of levels to use in the current object.
5449 */
5450 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5451 {
5452  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5453 public:
5454  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5455  virtual ~VmaBlockMetadata_Buddy();
5456  virtual void Init(VkDeviceSize size);
5457 
5458  virtual bool Validate() const;
5459  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5460  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5461  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5462  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5463 
5464  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5465  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5466 
5467 #if VMA_STATS_STRING_ENABLED
5468  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5469 #endif
5470 
5471  virtual bool CreateAllocationRequest(
5472  uint32_t currentFrameIndex,
5473  uint32_t frameInUseCount,
5474  VkDeviceSize bufferImageGranularity,
5475  VkDeviceSize allocSize,
5476  VkDeviceSize allocAlignment,
5477  bool upperAddress,
5478  VmaSuballocationType allocType,
5479  bool canMakeOtherLost,
5480  uint32_t strategy,
5481  VmaAllocationRequest* pAllocationRequest);
5482 
5483  virtual bool MakeRequestedAllocationsLost(
5484  uint32_t currentFrameIndex,
5485  uint32_t frameInUseCount,
5486  VmaAllocationRequest* pAllocationRequest);
5487 
5488  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5489 
5490  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5491 
5492  virtual void Alloc(
5493  const VmaAllocationRequest& request,
5494  VmaSuballocationType type,
5495  VkDeviceSize allocSize,
5496  bool upperAddress,
5497  VmaAllocation hAllocation);
5498 
5499  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5500  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5501 
5502 private:
5503  static const VkDeviceSize MIN_NODE_SIZE = 32;
5504  static const size_t MAX_LEVELS = 30;
5505 
5506  struct ValidationContext
5507  {
5508  size_t calculatedAllocationCount;
5509  size_t calculatedFreeCount;
5510  VkDeviceSize calculatedSumFreeSize;
5511 
5512  ValidationContext() :
5513  calculatedAllocationCount(0),
5514  calculatedFreeCount(0),
5515  calculatedSumFreeSize(0) { }
5516  };
5517 
5518  struct Node
5519  {
5520  VkDeviceSize offset;
5521  enum TYPE
5522  {
5523  TYPE_FREE,
5524  TYPE_ALLOCATION,
5525  TYPE_SPLIT,
5526  TYPE_COUNT
5527  } type;
5528  Node* parent;
5529  Node* buddy;
5530 
5531  union
5532  {
5533  struct
5534  {
5535  Node* prev;
5536  Node* next;
5537  } free;
5538  struct
5539  {
5540  VmaAllocation alloc;
5541  } allocation;
5542  struct
5543  {
5544  Node* leftChild;
5545  } split;
5546  };
5547  };
5548 
5549  // Size of the memory block aligned down to a power of two.
5550  VkDeviceSize m_UsableSize;
5551  uint32_t m_LevelCount;
5552 
5553  Node* m_Root;
5554  struct {
5555  Node* front;
5556  Node* back;
5557  } m_FreeList[MAX_LEVELS];
5558  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5559  size_t m_AllocationCount;
5560  // Number of nodes in the tree with type == TYPE_FREE.
5561  size_t m_FreeCount;
5562  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5563  VkDeviceSize m_SumFreeSize;
5564 
5565  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5566  void DeleteNode(Node* node);
5567  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5568  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5569  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5570  // Alloc passed just for validation. Can be null.
5571  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5572  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5573  // Adds node to the front of FreeList at given level.
5574  // node->type must be FREE.
5575  // node->free.prev, next can be undefined.
5576  void AddToFreeListFront(uint32_t level, Node* node);
5577  // Removes node from FreeList at given level.
5578  // node->type must be FREE.
5579  // node->free.prev, next stay untouched.
5580  void RemoveFromFreeList(uint32_t level, Node* node);
5581 
5582 #if VMA_STATS_STRING_ENABLED
5583  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5584 #endif
5585 };
5586 
5587 /*
5588 Represents a single block of device memory (`VkDeviceMemory`) with all the
5589 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5590 
5591 Thread-safety: This class must be externally synchronized.
5592 */
5593 class VmaDeviceMemoryBlock
5594 {
5595  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5596 public:
5597  VmaBlockMetadata* m_pMetadata;
5598 
5599  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5600 
5601  ~VmaDeviceMemoryBlock()
5602  {
5603  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5604  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5605  }
5606 
5607  // Always call after construction.
5608  void Init(
5609  VmaAllocator hAllocator,
5610  uint32_t newMemoryTypeIndex,
5611  VkDeviceMemory newMemory,
5612  VkDeviceSize newSize,
5613  uint32_t id,
5614  uint32_t algorithm);
5615  // Always call before destruction.
5616  void Destroy(VmaAllocator allocator);
5617 
5618  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5619  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5620  uint32_t GetId() const { return m_Id; }
5621  void* GetMappedData() const { return m_pMappedData; }
5622 
5623  // Validates all data structures inside this object. If not valid, returns false.
5624  bool Validate() const;
5625 
5626  VkResult CheckCorruption(VmaAllocator hAllocator);
5627 
5628  // ppData can be null.
5629  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5630  void Unmap(VmaAllocator hAllocator, uint32_t count);
5631 
5632  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5633  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5634 
5635  VkResult BindBufferMemory(
5636  const VmaAllocator hAllocator,
5637  const VmaAllocation hAllocation,
5638  VkBuffer hBuffer);
5639  VkResult BindImageMemory(
5640  const VmaAllocator hAllocator,
5641  const VmaAllocation hAllocation,
5642  VkImage hImage);
5643 
5644 private:
5645  uint32_t m_MemoryTypeIndex;
5646  uint32_t m_Id;
5647  VkDeviceMemory m_hMemory;
5648 
5649  /*
5650  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5651  Also protects m_MapCount, m_pMappedData.
5652  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5653  */
5654  VMA_MUTEX m_Mutex;
5655  uint32_t m_MapCount;
5656  void* m_pMappedData;
5657 };
5658 
5659 struct VmaPointerLess
5660 {
5661  bool operator()(const void* lhs, const void* rhs) const
5662  {
5663  return lhs < rhs;
5664  }
5665 };
5666 
5667 struct VmaDefragmentationMove
5668 {
5669  size_t srcBlockIndex;
5670  size_t dstBlockIndex;
5671  VkDeviceSize srcOffset;
5672  VkDeviceSize dstOffset;
5673  VkDeviceSize size;
5674 };
5675 
5676 class VmaDefragmentationAlgorithm;
5677 
5678 /*
5679 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5680 Vulkan memory type.
5681 
5682 Synchronized internally with a mutex.
5683 */
5684 struct VmaBlockVector
5685 {
5686  VMA_CLASS_NO_COPY(VmaBlockVector)
5687 public:
5688  VmaBlockVector(
5689  VmaAllocator hAllocator,
5690  uint32_t memoryTypeIndex,
5691  VkDeviceSize preferredBlockSize,
5692  size_t minBlockCount,
5693  size_t maxBlockCount,
5694  VkDeviceSize bufferImageGranularity,
5695  uint32_t frameInUseCount,
5696  bool isCustomPool,
5697  bool explicitBlockSize,
5698  uint32_t algorithm);
5699  ~VmaBlockVector();
5700 
5701  VkResult CreateMinBlocks();
5702 
5703  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5704  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5705  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5706  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5707  uint32_t GetAlgorithm() const { return m_Algorithm; }
5708 
5709  void GetPoolStats(VmaPoolStats* pStats);
5710 
5711  bool IsEmpty() const { return m_Blocks.empty(); }
5712  bool IsCorruptionDetectionEnabled() const;
5713 
5714  VkResult Allocate(
5715  VmaPool hCurrentPool,
5716  uint32_t currentFrameIndex,
5717  VkDeviceSize size,
5718  VkDeviceSize alignment,
5719  const VmaAllocationCreateInfo& createInfo,
5720  VmaSuballocationType suballocType,
5721  VmaAllocation* pAllocation);
5722 
5723  void Free(
5724  VmaAllocation hAllocation);
5725 
5726  // Adds statistics of this BlockVector to pStats.
5727  void AddStats(VmaStats* pStats);
5728 
5729 #if VMA_STATS_STRING_ENABLED
5730  void PrintDetailedMap(class VmaJsonWriter& json);
5731 #endif
5732 
5733  void MakePoolAllocationsLost(
5734  uint32_t currentFrameIndex,
5735  size_t* pLostAllocationCount);
5736  VkResult CheckCorruption();
5737 
5738  // Saves results in pCtx->res.
5739  void Defragment(
5740  class VmaBlockVectorDefragmentationContext* pCtx,
5741  VmaDefragmentationStats* pStats,
5742  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
5743  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
5744  VkCommandBuffer commandBuffer);
5745  void DefragmentationEnd(
5746  class VmaBlockVectorDefragmentationContext* pCtx,
5747  VmaDefragmentationStats* pStats);
5748 
5750  // To be used only while the m_Mutex is locked. Used during defragmentation.
5751 
5752  size_t GetBlockCount() const { return m_Blocks.size(); }
5753  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
5754  size_t CalcAllocationCount() const;
5755  bool IsBufferImageGranularityConflictPossible() const;
5756 
5757 private:
5758  friend class VmaDefragmentationAlgorithm_Generic;
5759 
5760  const VmaAllocator m_hAllocator;
5761  const uint32_t m_MemoryTypeIndex;
5762  const VkDeviceSize m_PreferredBlockSize;
5763  const size_t m_MinBlockCount;
5764  const size_t m_MaxBlockCount;
5765  const VkDeviceSize m_BufferImageGranularity;
5766  const uint32_t m_FrameInUseCount;
5767  const bool m_IsCustomPool;
5768  const bool m_ExplicitBlockSize;
5769  const uint32_t m_Algorithm;
5770  /* There can be at most one allocation that is completely empty - a
5771  hysteresis to avoid pessimistic case of alternating creation and destruction
5772  of a VkDeviceMemory. */
5773  bool m_HasEmptyBlock;
5774  VMA_RW_MUTEX m_Mutex;
5775  // Incrementally sorted by sumFreeSize, ascending.
5776  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5777  uint32_t m_NextBlockId;
5778 
5779  VkDeviceSize CalcMaxBlockSize() const;
5780 
5781  // Finds and removes given block from vector.
5782  void Remove(VmaDeviceMemoryBlock* pBlock);
5783 
5784  // Performs single step in sorting m_Blocks. They may not be fully sorted
5785  // after this call.
5786  void IncrementallySortBlocks();
5787 
5788  // To be used only without CAN_MAKE_OTHER_LOST flag.
5789  VkResult AllocateFromBlock(
5790  VmaDeviceMemoryBlock* pBlock,
5791  VmaPool hCurrentPool,
5792  uint32_t currentFrameIndex,
5793  VkDeviceSize size,
5794  VkDeviceSize alignment,
5795  VmaAllocationCreateFlags allocFlags,
5796  void* pUserData,
5797  VmaSuballocationType suballocType,
5798  uint32_t strategy,
5799  VmaAllocation* pAllocation);
5800 
5801  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5802 
5803  // Saves result to pCtx->res.
5804  void ApplyDefragmentationMovesCpu(
5805  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5806  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
5807  // Saves result to pCtx->res.
5808  void ApplyDefragmentationMovesGpu(
5809  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5810  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5811  VkCommandBuffer commandBuffer);
5812 
5813  /*
5814  Used during defragmentation. pDefragmentationStats is optional. It's in/out
5815  - updated with new data.
5816  */
5817  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
5818 };
5819 
5820 struct VmaPool_T
5821 {
5822  VMA_CLASS_NO_COPY(VmaPool_T)
5823 public:
5824  VmaBlockVector m_BlockVector;
5825 
5826  VmaPool_T(
5827  VmaAllocator hAllocator,
5828  const VmaPoolCreateInfo& createInfo,
5829  VkDeviceSize preferredBlockSize);
5830  ~VmaPool_T();
5831 
5832  uint32_t GetId() const { return m_Id; }
5833  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5834 
5835 #if VMA_STATS_STRING_ENABLED
5836  //void PrintDetailedMap(class VmaStringBuilder& sb);
5837 #endif
5838 
5839 private:
5840  uint32_t m_Id;
5841 };
5842 
5843 /*
5844 Performs defragmentation:
5845 
5846 - Updates `pBlockVector->m_pMetadata`.
5847 - Updates allocations by calling ChangeBlockAllocation().
5848 - Does not move actual data, only returns requested moves as `moves`.
5849 */
5850 class VmaDefragmentationAlgorithm
5851 {
5852  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
5853 public:
5854  VmaDefragmentationAlgorithm(
5855  VmaAllocator hAllocator,
5856  VmaBlockVector* pBlockVector,
5857  uint32_t currentFrameIndex) :
5858  m_hAllocator(hAllocator),
5859  m_pBlockVector(pBlockVector),
5860  m_CurrentFrameIndex(currentFrameIndex)
5861  {
5862  }
5863  virtual ~VmaDefragmentationAlgorithm()
5864  {
5865  }
5866 
5867  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
5868  virtual void AddAll() = 0;
5869 
5870  virtual VkResult Defragment(
5871  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5872  VkDeviceSize maxBytesToMove,
5873  uint32_t maxAllocationsToMove) = 0;
5874 
5875  virtual VkDeviceSize GetBytesMoved() const = 0;
5876  virtual uint32_t GetAllocationsMoved() const = 0;
5877 
5878 protected:
5879  VmaAllocator const m_hAllocator;
5880  VmaBlockVector* const m_pBlockVector;
5881  const uint32_t m_CurrentFrameIndex;
5882 
5883  struct AllocationInfo
5884  {
5885  VmaAllocation m_hAllocation;
5886  VkBool32* m_pChanged;
5887 
5888  AllocationInfo() :
5889  m_hAllocation(VK_NULL_HANDLE),
5890  m_pChanged(VMA_NULL)
5891  {
5892  }
5893  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
5894  m_hAllocation(hAlloc),
5895  m_pChanged(pChanged)
5896  {
5897  }
5898  };
5899 };
5900 
5901 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
5902 {
5903  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
5904 public:
5905  VmaDefragmentationAlgorithm_Generic(
5906  VmaAllocator hAllocator,
5907  VmaBlockVector* pBlockVector,
5908  uint32_t currentFrameIndex,
5909  bool overlappingMoveSupported);
5910  virtual ~VmaDefragmentationAlgorithm_Generic();
5911 
5912  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
5913  virtual void AddAll() { m_AllAllocations = true; }
5914 
5915  virtual VkResult Defragment(
5916  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5917  VkDeviceSize maxBytesToMove,
5918  uint32_t maxAllocationsToMove);
5919 
5920  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
5921  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
5922 
5923 private:
5924  uint32_t m_AllocationCount;
5925  bool m_AllAllocations;
5926 
5927  VkDeviceSize m_BytesMoved;
5928  uint32_t m_AllocationsMoved;
5929 
5930  struct AllocationInfoSizeGreater
5931  {
5932  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5933  {
5934  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
5935  }
5936  };
5937 
5938  struct AllocationInfoOffsetGreater
5939  {
5940  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5941  {
5942  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
5943  }
5944  };
5945 
5946  struct BlockInfo
5947  {
5948  size_t m_OriginalBlockIndex;
5949  VmaDeviceMemoryBlock* m_pBlock;
5950  bool m_HasNonMovableAllocations;
5951  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5952 
5953  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
5954  m_OriginalBlockIndex(SIZE_MAX),
5955  m_pBlock(VMA_NULL),
5956  m_HasNonMovableAllocations(true),
5957  m_Allocations(pAllocationCallbacks)
5958  {
5959  }
5960 
5961  void CalcHasNonMovableAllocations()
5962  {
5963  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
5964  const size_t defragmentAllocCount = m_Allocations.size();
5965  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
5966  }
5967 
5968  void SortAllocationsBySizeDescending()
5969  {
5970  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
5971  }
5972 
5973  void SortAllocationsByOffsetDescending()
5974  {
5975  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
5976  }
5977  };
5978 
5979  struct BlockPointerLess
5980  {
5981  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
5982  {
5983  return pLhsBlockInfo->m_pBlock < pRhsBlock;
5984  }
5985  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5986  {
5987  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
5988  }
5989  };
5990 
5991  // 1. Blocks with some non-movable allocations go first.
5992  // 2. Blocks with smaller sumFreeSize go first.
5993  struct BlockInfoCompareMoveDestination
5994  {
5995  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5996  {
5997  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
5998  {
5999  return true;
6000  }
6001  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6002  {
6003  return false;
6004  }
6005  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6006  {
6007  return true;
6008  }
6009  return false;
6010  }
6011  };
6012 
6013  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6014  BlockInfoVector m_Blocks;
6015 
6016  VkResult DefragmentRound(
6017  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6018  VkDeviceSize maxBytesToMove,
6019  uint32_t maxAllocationsToMove);
6020 
6021  size_t CalcBlocksWithNonMovableCount() const;
6022 
6023  static bool MoveMakesSense(
6024  size_t dstBlockIndex, VkDeviceSize dstOffset,
6025  size_t srcBlockIndex, VkDeviceSize srcOffset);
6026 };
6027 
6028 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6029 {
6030  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6031 public:
6032  VmaDefragmentationAlgorithm_Fast(
6033  VmaAllocator hAllocator,
6034  VmaBlockVector* pBlockVector,
6035  uint32_t currentFrameIndex,
6036  bool overlappingMoveSupported);
6037  virtual ~VmaDefragmentationAlgorithm_Fast();
6038 
6039  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6040  virtual void AddAll() { m_AllAllocations = true; }
6041 
6042  virtual VkResult Defragment(
6043  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6044  VkDeviceSize maxBytesToMove,
6045  uint32_t maxAllocationsToMove);
6046 
6047  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6048  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6049 
6050 private:
6051  struct BlockInfo
6052  {
6053  size_t origBlockIndex;
6054  };
6055 
6056  class FreeSpaceDatabase
6057  {
6058  public:
6059  FreeSpaceDatabase()
6060  {
6061  FreeSpace s = {};
6062  s.blockInfoIndex = SIZE_MAX;
6063  for(size_t i = 0; i < MAX_COUNT; ++i)
6064  {
6065  m_FreeSpaces[i] = s;
6066  }
6067  }
6068 
6069  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6070  {
6071  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6072  {
6073  return;
6074  }
6075 
6076  // Find first invalid or the smallest structure.
6077  size_t bestIndex = SIZE_MAX;
6078  for(size_t i = 0; i < MAX_COUNT; ++i)
6079  {
6080  // Empty structure.
6081  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6082  {
6083  bestIndex = i;
6084  break;
6085  }
6086  if(m_FreeSpaces[i].size < size &&
6087  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6088  {
6089  bestIndex = i;
6090  }
6091  }
6092 
6093  if(bestIndex != SIZE_MAX)
6094  {
6095  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6096  m_FreeSpaces[bestIndex].offset = offset;
6097  m_FreeSpaces[bestIndex].size = size;
6098  }
6099  }
6100 
6101  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6102  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6103  {
6104  size_t bestIndex = SIZE_MAX;
6105  VkDeviceSize bestFreeSpaceAfter = 0;
6106  for(size_t i = 0; i < MAX_COUNT; ++i)
6107  {
6108  // Structure is valid.
6109  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6110  {
6111  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6112  // Allocation fits into this structure.
6113  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6114  {
6115  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6116  (dstOffset + size);
6117  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6118  {
6119  bestIndex = i;
6120  bestFreeSpaceAfter = freeSpaceAfter;
6121  }
6122  }
6123  }
6124  }
6125 
6126  if(bestIndex != SIZE_MAX)
6127  {
6128  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6129  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6130 
6131  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6132  {
6133  // Leave this structure for remaining empty space.
6134  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6135  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6136  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6137  }
6138  else
6139  {
6140  // This structure becomes invalid.
6141  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6142  }
6143 
6144  return true;
6145  }
6146 
6147  return false;
6148  }
6149 
6150  private:
6151  static const size_t MAX_COUNT = 4;
6152 
6153  struct FreeSpace
6154  {
6155  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6156  VkDeviceSize offset;
6157  VkDeviceSize size;
6158  } m_FreeSpaces[MAX_COUNT];
6159  };
6160 
6161  const bool m_OverlappingMoveSupported;
6162 
6163  uint32_t m_AllocationCount;
6164  bool m_AllAllocations;
6165 
6166  VkDeviceSize m_BytesMoved;
6167  uint32_t m_AllocationsMoved;
6168 
6169  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6170 
6171  void PreprocessMetadata();
6172  void PostprocessMetadata();
6173  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6174 };
6175 
6176 struct VmaBlockDefragmentationContext
6177 {
6178 private:
6179  VMA_CLASS_NO_COPY(VmaBlockDefragmentationContext)
6180 public:
6181  enum BLOCK_FLAG
6182  {
6183  BLOCK_FLAG_USED = 0x00000001,
6184  };
6185  uint32_t flags;
6186  VkBuffer hBuffer;
6187 
6188  VmaBlockDefragmentationContext() :
6189  flags(0),
6190  hBuffer(VK_NULL_HANDLE)
6191  {
6192  }
6193 };
6194 
6195 class VmaBlockVectorDefragmentationContext
6196 {
6197  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6198 public:
6199  VkResult res;
6200  bool mutexLocked;
6201  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6202 
6203  VmaBlockVectorDefragmentationContext(
6204  VmaAllocator hAllocator,
6205  VmaPool hCustomPool, // Optional.
6206  VmaBlockVector* pBlockVector,
6207  uint32_t currFrameIndex,
6208  uint32_t flags);
6209  ~VmaBlockVectorDefragmentationContext();
6210 
6211  VmaPool GetCustomPool() const { return m_hCustomPool; }
6212  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6213  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6214 
6215  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6216  void AddAll() { m_AllAllocations = true; }
6217 
6218  void Begin(bool overlappingMoveSupported);
6219 
6220 private:
6221  const VmaAllocator m_hAllocator;
6222  // Null if not from custom pool.
6223  const VmaPool m_hCustomPool;
6224  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6225  VmaBlockVector* const m_pBlockVector;
6226  const uint32_t m_CurrFrameIndex;
6227  const uint32_t m_AlgorithmFlags;
6228  // Owner of this object.
6229  VmaDefragmentationAlgorithm* m_pAlgorithm;
6230 
6231  struct AllocInfo
6232  {
6233  VmaAllocation hAlloc;
6234  VkBool32* pChanged;
6235  };
6236  // Used between constructor and Begin.
6237  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6238  bool m_AllAllocations;
6239 };
6240 
6241 struct VmaDefragmentationContext_T
6242 {
6243 private:
6244  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6245 public:
6246  VmaDefragmentationContext_T(
6247  VmaAllocator hAllocator,
6248  uint32_t currFrameIndex,
6249  uint32_t flags,
6250  VmaDefragmentationStats* pStats);
6251  ~VmaDefragmentationContext_T();
6252 
6253  void AddPools(uint32_t poolCount, VmaPool* pPools);
6254  void AddAllocations(
6255  uint32_t allocationCount,
6256  VmaAllocation* pAllocations,
6257  VkBool32* pAllocationsChanged);
6258 
6259  /*
6260  Returns:
6261  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6262  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6263  - Negative value if error occured and object can be destroyed immediately.
6264  */
6265  VkResult Defragment(
6266  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6267  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6268  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6269 
6270 private:
6271  const VmaAllocator m_hAllocator;
6272  const uint32_t m_CurrFrameIndex;
6273  const uint32_t m_Flags;
6274  VmaDefragmentationStats* const m_pStats;
6275  // Owner of these objects.
6276  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6277  // Owner of these objects.
6278  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6279 };
6280 
6281 #if VMA_RECORDING_ENABLED
6282 
6283 class VmaRecorder
6284 {
6285 public:
6286  VmaRecorder();
6287  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6288  void WriteConfiguration(
6289  const VkPhysicalDeviceProperties& devProps,
6290  const VkPhysicalDeviceMemoryProperties& memProps,
6291  bool dedicatedAllocationExtensionEnabled);
6292  ~VmaRecorder();
6293 
6294  void RecordCreateAllocator(uint32_t frameIndex);
6295  void RecordDestroyAllocator(uint32_t frameIndex);
6296  void RecordCreatePool(uint32_t frameIndex,
6297  const VmaPoolCreateInfo& createInfo,
6298  VmaPool pool);
6299  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6300  void RecordAllocateMemory(uint32_t frameIndex,
6301  const VkMemoryRequirements& vkMemReq,
6302  const VmaAllocationCreateInfo& createInfo,
6303  VmaAllocation allocation);
6304  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6305  const VkMemoryRequirements& vkMemReq,
6306  bool requiresDedicatedAllocation,
6307  bool prefersDedicatedAllocation,
6308  const VmaAllocationCreateInfo& createInfo,
6309  VmaAllocation allocation);
6310  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6311  const VkMemoryRequirements& vkMemReq,
6312  bool requiresDedicatedAllocation,
6313  bool prefersDedicatedAllocation,
6314  const VmaAllocationCreateInfo& createInfo,
6315  VmaAllocation allocation);
6316  void RecordFreeMemory(uint32_t frameIndex,
6317  VmaAllocation allocation);
6318  void RecordResizeAllocation(
6319  uint32_t frameIndex,
6320  VmaAllocation allocation,
6321  VkDeviceSize newSize);
6322  void RecordSetAllocationUserData(uint32_t frameIndex,
6323  VmaAllocation allocation,
6324  const void* pUserData);
6325  void RecordCreateLostAllocation(uint32_t frameIndex,
6326  VmaAllocation allocation);
6327  void RecordMapMemory(uint32_t frameIndex,
6328  VmaAllocation allocation);
6329  void RecordUnmapMemory(uint32_t frameIndex,
6330  VmaAllocation allocation);
6331  void RecordFlushAllocation(uint32_t frameIndex,
6332  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6333  void RecordInvalidateAllocation(uint32_t frameIndex,
6334  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6335  void RecordCreateBuffer(uint32_t frameIndex,
6336  const VkBufferCreateInfo& bufCreateInfo,
6337  const VmaAllocationCreateInfo& allocCreateInfo,
6338  VmaAllocation allocation);
6339  void RecordCreateImage(uint32_t frameIndex,
6340  const VkImageCreateInfo& imageCreateInfo,
6341  const VmaAllocationCreateInfo& allocCreateInfo,
6342  VmaAllocation allocation);
6343  void RecordDestroyBuffer(uint32_t frameIndex,
6344  VmaAllocation allocation);
6345  void RecordDestroyImage(uint32_t frameIndex,
6346  VmaAllocation allocation);
6347  void RecordTouchAllocation(uint32_t frameIndex,
6348  VmaAllocation allocation);
6349  void RecordGetAllocationInfo(uint32_t frameIndex,
6350  VmaAllocation allocation);
6351  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6352  VmaPool pool);
6353 
6354 private:
6355  struct CallParams
6356  {
6357  uint32_t threadId;
6358  double time;
6359  };
6360 
6361  class UserDataString
6362  {
6363  public:
6364  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6365  const char* GetString() const { return m_Str; }
6366 
6367  private:
6368  char m_PtrStr[17];
6369  const char* m_Str;
6370  };
6371 
6372  bool m_UseMutex;
6373  VmaRecordFlags m_Flags;
6374  FILE* m_File;
6375  VMA_MUTEX m_FileMutex;
6376  int64_t m_Freq;
6377  int64_t m_StartCounter;
6378 
6379  void GetBasicParams(CallParams& outParams);
6380  void Flush();
6381 };
6382 
6383 #endif // #if VMA_RECORDING_ENABLED
6384 
6385 // Main allocator object.
6386 struct VmaAllocator_T
6387 {
6388  VMA_CLASS_NO_COPY(VmaAllocator_T)
6389 public:
6390  bool m_UseMutex;
6391  bool m_UseKhrDedicatedAllocation;
6392  VkDevice m_hDevice;
6393  bool m_AllocationCallbacksSpecified;
6394  VkAllocationCallbacks m_AllocationCallbacks;
6395  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6396 
6397  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
6398  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6399  VMA_MUTEX m_HeapSizeLimitMutex;
6400 
6401  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6402  VkPhysicalDeviceMemoryProperties m_MemProps;
6403 
6404  // Default pools.
6405  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6406 
6407  // Each vector is sorted by memory (handle value).
6408  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6409  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6410  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6411 
6412  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6413  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6414  ~VmaAllocator_T();
6415 
6416  const VkAllocationCallbacks* GetAllocationCallbacks() const
6417  {
6418  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6419  }
6420  const VmaVulkanFunctions& GetVulkanFunctions() const
6421  {
6422  return m_VulkanFunctions;
6423  }
6424 
6425  VkDeviceSize GetBufferImageGranularity() const
6426  {
6427  return VMA_MAX(
6428  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6429  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6430  }
6431 
6432  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
6433  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6434 
6435  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6436  {
6437  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6438  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6439  }
6440  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
6441  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6442  {
6443  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6444  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6445  }
6446  // Minimum alignment for all allocations in specific memory type.
6447  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6448  {
6449  return IsMemoryTypeNonCoherent(memTypeIndex) ?
6450  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6451  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6452  }
6453 
6454  bool IsIntegratedGpu() const
6455  {
6456  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6457  }
6458 
6459 #if VMA_RECORDING_ENABLED
6460  VmaRecorder* GetRecorder() const { return m_pRecorder; }
6461 #endif
6462 
6463  void GetBufferMemoryRequirements(
6464  VkBuffer hBuffer,
6465  VkMemoryRequirements& memReq,
6466  bool& requiresDedicatedAllocation,
6467  bool& prefersDedicatedAllocation) const;
6468  void GetImageMemoryRequirements(
6469  VkImage hImage,
6470  VkMemoryRequirements& memReq,
6471  bool& requiresDedicatedAllocation,
6472  bool& prefersDedicatedAllocation) const;
6473 
6474  // Main allocation function.
6475  VkResult AllocateMemory(
6476  const VkMemoryRequirements& vkMemReq,
6477  bool requiresDedicatedAllocation,
6478  bool prefersDedicatedAllocation,
6479  VkBuffer dedicatedBuffer,
6480  VkImage dedicatedImage,
6481  const VmaAllocationCreateInfo& createInfo,
6482  VmaSuballocationType suballocType,
6483  VmaAllocation* pAllocation);
6484 
6485  // Main deallocation function.
6486  void FreeMemory(const VmaAllocation allocation);
6487 
6488  VkResult ResizeAllocation(
6489  const VmaAllocation alloc,
6490  VkDeviceSize newSize);
6491 
6492  void CalculateStats(VmaStats* pStats);
6493 
6494 #if VMA_STATS_STRING_ENABLED
6495  void PrintDetailedMap(class VmaJsonWriter& json);
6496 #endif
6497 
6498  VkResult DefragmentationBegin(
6499  const VmaDefragmentationInfo2& info,
6500  VmaDefragmentationStats* pStats,
6501  VmaDefragmentationContext* pContext);
6502  VkResult DefragmentationEnd(
6503  VmaDefragmentationContext context);
6504 
6505  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
6506  bool TouchAllocation(VmaAllocation hAllocation);
6507 
6508  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
6509  void DestroyPool(VmaPool pool);
6510  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
6511 
6512  void SetCurrentFrameIndex(uint32_t frameIndex);
6513  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6514 
6515  void MakePoolAllocationsLost(
6516  VmaPool hPool,
6517  size_t* pLostAllocationCount);
6518  VkResult CheckPoolCorruption(VmaPool hPool);
6519  VkResult CheckCorruption(uint32_t memoryTypeBits);
6520 
6521  void CreateLostAllocation(VmaAllocation* pAllocation);
6522 
6523  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6524  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6525 
6526  VkResult Map(VmaAllocation hAllocation, void** ppData);
6527  void Unmap(VmaAllocation hAllocation);
6528 
6529  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
6530  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
6531 
6532  void FlushOrInvalidateAllocation(
6533  VmaAllocation hAllocation,
6534  VkDeviceSize offset, VkDeviceSize size,
6535  VMA_CACHE_OPERATION op);
6536 
6537  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
6538 
6539 private:
6540  VkDeviceSize m_PreferredLargeHeapBlockSize;
6541 
6542  VkPhysicalDevice m_PhysicalDevice;
6543  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6544 
6545  VMA_RW_MUTEX m_PoolsMutex;
6546  // Protected by m_PoolsMutex. Sorted by pointer value.
6547  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6548  uint32_t m_NextPoolId;
6549 
6550  VmaVulkanFunctions m_VulkanFunctions;
6551 
6552 #if VMA_RECORDING_ENABLED
6553  VmaRecorder* m_pRecorder;
6554 #endif
6555 
6556  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
6557 
6558  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6559 
6560  VkResult AllocateMemoryOfType(
6561  VkDeviceSize size,
6562  VkDeviceSize alignment,
6563  bool dedicatedAllocation,
6564  VkBuffer dedicatedBuffer,
6565  VkImage dedicatedImage,
6566  const VmaAllocationCreateInfo& createInfo,
6567  uint32_t memTypeIndex,
6568  VmaSuballocationType suballocType,
6569  VmaAllocation* pAllocation);
6570 
6571  // Allocates and registers new VkDeviceMemory specifically for single allocation.
6572  VkResult AllocateDedicatedMemory(
6573  VkDeviceSize size,
6574  VmaSuballocationType suballocType,
6575  uint32_t memTypeIndex,
6576  bool map,
6577  bool isUserDataString,
6578  void* pUserData,
6579  VkBuffer dedicatedBuffer,
6580  VkImage dedicatedImage,
6581  VmaAllocation* pAllocation);
6582 
6583  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
6584  void FreeDedicatedMemory(VmaAllocation allocation);
6585 };
6586 
6588 // Memory allocation #2 after VmaAllocator_T definition
6589 
6590 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
6591 {
6592  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6593 }
6594 
6595 static void VmaFree(VmaAllocator hAllocator, void* ptr)
6596 {
6597  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6598 }
6599 
6600 template<typename T>
6601 static T* VmaAllocate(VmaAllocator hAllocator)
6602 {
6603  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
6604 }
6605 
6606 template<typename T>
6607 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
6608 {
6609  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
6610 }
6611 
6612 template<typename T>
6613 static void vma_delete(VmaAllocator hAllocator, T* ptr)
6614 {
6615  if(ptr != VMA_NULL)
6616  {
6617  ptr->~T();
6618  VmaFree(hAllocator, ptr);
6619  }
6620 }
6621 
6622 template<typename T>
6623 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6624 {
6625  if(ptr != VMA_NULL)
6626  {
6627  for(size_t i = count; i--; )
6628  ptr[i].~T();
6629  VmaFree(hAllocator, ptr);
6630  }
6631 }
6632 
6634 // VmaStringBuilder
6635 
6636 #if VMA_STATS_STRING_ENABLED
6637 
6638 class VmaStringBuilder
6639 {
6640 public:
6641  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
6642  size_t GetLength() const { return m_Data.size(); }
6643  const char* GetData() const { return m_Data.data(); }
6644 
6645  void Add(char ch) { m_Data.push_back(ch); }
6646  void Add(const char* pStr);
6647  void AddNewLine() { Add('\n'); }
6648  void AddNumber(uint32_t num);
6649  void AddNumber(uint64_t num);
6650  void AddPointer(const void* ptr);
6651 
6652 private:
6653  VmaVector< char, VmaStlAllocator<char> > m_Data;
6654 };
6655 
6656 void VmaStringBuilder::Add(const char* pStr)
6657 {
6658  const size_t strLen = strlen(pStr);
6659  if(strLen > 0)
6660  {
6661  const size_t oldCount = m_Data.size();
6662  m_Data.resize(oldCount + strLen);
6663  memcpy(m_Data.data() + oldCount, pStr, strLen);
6664  }
6665 }
6666 
6667 void VmaStringBuilder::AddNumber(uint32_t num)
6668 {
6669  char buf[11];
6670  VmaUint32ToStr(buf, sizeof(buf), num);
6671  Add(buf);
6672 }
6673 
6674 void VmaStringBuilder::AddNumber(uint64_t num)
6675 {
6676  char buf[21];
6677  VmaUint64ToStr(buf, sizeof(buf), num);
6678  Add(buf);
6679 }
6680 
6681 void VmaStringBuilder::AddPointer(const void* ptr)
6682 {
6683  char buf[21];
6684  VmaPtrToStr(buf, sizeof(buf), ptr);
6685  Add(buf);
6686 }
6687 
6688 #endif // #if VMA_STATS_STRING_ENABLED
6689 
6691 // VmaJsonWriter
6692 
6693 #if VMA_STATS_STRING_ENABLED
6694 
6695 class VmaJsonWriter
6696 {
6697  VMA_CLASS_NO_COPY(VmaJsonWriter)
6698 public:
6699  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6700  ~VmaJsonWriter();
6701 
6702  void BeginObject(bool singleLine = false);
6703  void EndObject();
6704 
6705  void BeginArray(bool singleLine = false);
6706  void EndArray();
6707 
6708  void WriteString(const char* pStr);
6709  void BeginString(const char* pStr = VMA_NULL);
6710  void ContinueString(const char* pStr);
6711  void ContinueString(uint32_t n);
6712  void ContinueString(uint64_t n);
6713  void ContinueString_Pointer(const void* ptr);
6714  void EndString(const char* pStr = VMA_NULL);
6715 
6716  void WriteNumber(uint32_t n);
6717  void WriteNumber(uint64_t n);
6718  void WriteBool(bool b);
6719  void WriteNull();
6720 
6721 private:
6722  static const char* const INDENT;
6723 
6724  enum COLLECTION_TYPE
6725  {
6726  COLLECTION_TYPE_OBJECT,
6727  COLLECTION_TYPE_ARRAY,
6728  };
6729  struct StackItem
6730  {
6731  COLLECTION_TYPE type;
6732  uint32_t valueCount;
6733  bool singleLineMode;
6734  };
6735 
6736  VmaStringBuilder& m_SB;
6737  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6738  bool m_InsideString;
6739 
6740  void BeginValue(bool isString);
6741  void WriteIndent(bool oneLess = false);
6742 };
6743 
6744 const char* const VmaJsonWriter::INDENT = " ";
6745 
6746 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6747  m_SB(sb),
6748  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6749  m_InsideString(false)
6750 {
6751 }
6752 
6753 VmaJsonWriter::~VmaJsonWriter()
6754 {
6755  VMA_ASSERT(!m_InsideString);
6756  VMA_ASSERT(m_Stack.empty());
6757 }
6758 
6759 void VmaJsonWriter::BeginObject(bool singleLine)
6760 {
6761  VMA_ASSERT(!m_InsideString);
6762 
6763  BeginValue(false);
6764  m_SB.Add('{');
6765 
6766  StackItem item;
6767  item.type = COLLECTION_TYPE_OBJECT;
6768  item.valueCount = 0;
6769  item.singleLineMode = singleLine;
6770  m_Stack.push_back(item);
6771 }
6772 
6773 void VmaJsonWriter::EndObject()
6774 {
6775  VMA_ASSERT(!m_InsideString);
6776 
6777  WriteIndent(true);
6778  m_SB.Add('}');
6779 
6780  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6781  m_Stack.pop_back();
6782 }
6783 
6784 void VmaJsonWriter::BeginArray(bool singleLine)
6785 {
6786  VMA_ASSERT(!m_InsideString);
6787 
6788  BeginValue(false);
6789  m_SB.Add('[');
6790 
6791  StackItem item;
6792  item.type = COLLECTION_TYPE_ARRAY;
6793  item.valueCount = 0;
6794  item.singleLineMode = singleLine;
6795  m_Stack.push_back(item);
6796 }
6797 
6798 void VmaJsonWriter::EndArray()
6799 {
6800  VMA_ASSERT(!m_InsideString);
6801 
6802  WriteIndent(true);
6803  m_SB.Add(']');
6804 
6805  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6806  m_Stack.pop_back();
6807 }
6808 
6809 void VmaJsonWriter::WriteString(const char* pStr)
6810 {
6811  BeginString(pStr);
6812  EndString();
6813 }
6814 
6815 void VmaJsonWriter::BeginString(const char* pStr)
6816 {
6817  VMA_ASSERT(!m_InsideString);
6818 
6819  BeginValue(true);
6820  m_SB.Add('"');
6821  m_InsideString = true;
6822  if(pStr != VMA_NULL && pStr[0] != '\0')
6823  {
6824  ContinueString(pStr);
6825  }
6826 }
6827 
6828 void VmaJsonWriter::ContinueString(const char* pStr)
6829 {
6830  VMA_ASSERT(m_InsideString);
6831 
6832  const size_t strLen = strlen(pStr);
6833  for(size_t i = 0; i < strLen; ++i)
6834  {
6835  char ch = pStr[i];
6836  if(ch == '\\')
6837  {
6838  m_SB.Add("\\\\");
6839  }
6840  else if(ch == '"')
6841  {
6842  m_SB.Add("\\\"");
6843  }
6844  else if(ch >= 32)
6845  {
6846  m_SB.Add(ch);
6847  }
6848  else switch(ch)
6849  {
6850  case '\b':
6851  m_SB.Add("\\b");
6852  break;
6853  case '\f':
6854  m_SB.Add("\\f");
6855  break;
6856  case '\n':
6857  m_SB.Add("\\n");
6858  break;
6859  case '\r':
6860  m_SB.Add("\\r");
6861  break;
6862  case '\t':
6863  m_SB.Add("\\t");
6864  break;
6865  default:
6866  VMA_ASSERT(0 && "Character not currently supported.");
6867  break;
6868  }
6869  }
6870 }
6871 
6872 void VmaJsonWriter::ContinueString(uint32_t n)
6873 {
6874  VMA_ASSERT(m_InsideString);
6875  m_SB.AddNumber(n);
6876 }
6877 
6878 void VmaJsonWriter::ContinueString(uint64_t n)
6879 {
6880  VMA_ASSERT(m_InsideString);
6881  m_SB.AddNumber(n);
6882 }
6883 
6884 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
6885 {
6886  VMA_ASSERT(m_InsideString);
6887  m_SB.AddPointer(ptr);
6888 }
6889 
6890 void VmaJsonWriter::EndString(const char* pStr)
6891 {
6892  VMA_ASSERT(m_InsideString);
6893  if(pStr != VMA_NULL && pStr[0] != '\0')
6894  {
6895  ContinueString(pStr);
6896  }
6897  m_SB.Add('"');
6898  m_InsideString = false;
6899 }
6900 
6901 void VmaJsonWriter::WriteNumber(uint32_t n)
6902 {
6903  VMA_ASSERT(!m_InsideString);
6904  BeginValue(false);
6905  m_SB.AddNumber(n);
6906 }
6907 
6908 void VmaJsonWriter::WriteNumber(uint64_t n)
6909 {
6910  VMA_ASSERT(!m_InsideString);
6911  BeginValue(false);
6912  m_SB.AddNumber(n);
6913 }
6914 
6915 void VmaJsonWriter::WriteBool(bool b)
6916 {
6917  VMA_ASSERT(!m_InsideString);
6918  BeginValue(false);
6919  m_SB.Add(b ? "true" : "false");
6920 }
6921 
6922 void VmaJsonWriter::WriteNull()
6923 {
6924  VMA_ASSERT(!m_InsideString);
6925  BeginValue(false);
6926  m_SB.Add("null");
6927 }
6928 
6929 void VmaJsonWriter::BeginValue(bool isString)
6930 {
6931  if(!m_Stack.empty())
6932  {
6933  StackItem& currItem = m_Stack.back();
6934  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6935  currItem.valueCount % 2 == 0)
6936  {
6937  VMA_ASSERT(isString);
6938  }
6939 
6940  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6941  currItem.valueCount % 2 != 0)
6942  {
6943  m_SB.Add(": ");
6944  }
6945  else if(currItem.valueCount > 0)
6946  {
6947  m_SB.Add(", ");
6948  WriteIndent();
6949  }
6950  else
6951  {
6952  WriteIndent();
6953  }
6954  ++currItem.valueCount;
6955  }
6956 }
6957 
6958 void VmaJsonWriter::WriteIndent(bool oneLess)
6959 {
6960  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
6961  {
6962  m_SB.AddNewLine();
6963 
6964  size_t count = m_Stack.size();
6965  if(count > 0 && oneLess)
6966  {
6967  --count;
6968  }
6969  for(size_t i = 0; i < count; ++i)
6970  {
6971  m_SB.Add(INDENT);
6972  }
6973  }
6974 }
6975 
6976 #endif // #if VMA_STATS_STRING_ENABLED
6977 
6979 
6980 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
6981 {
6982  if(IsUserDataString())
6983  {
6984  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
6985 
6986  FreeUserDataString(hAllocator);
6987 
6988  if(pUserData != VMA_NULL)
6989  {
6990  const char* const newStrSrc = (char*)pUserData;
6991  const size_t newStrLen = strlen(newStrSrc);
6992  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
6993  memcpy(newStrDst, newStrSrc, newStrLen + 1);
6994  m_pUserData = newStrDst;
6995  }
6996  }
6997  else
6998  {
6999  m_pUserData = pUserData;
7000  }
7001 }
7002 
7003 void VmaAllocation_T::ChangeBlockAllocation(
7004  VmaAllocator hAllocator,
7005  VmaDeviceMemoryBlock* block,
7006  VkDeviceSize offset)
7007 {
7008  VMA_ASSERT(block != VMA_NULL);
7009  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7010 
7011  // Move mapping reference counter from old block to new block.
7012  if(block != m_BlockAllocation.m_Block)
7013  {
7014  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7015  if(IsPersistentMap())
7016  ++mapRefCount;
7017  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7018  block->Map(hAllocator, mapRefCount, VMA_NULL);
7019  }
7020 
7021  m_BlockAllocation.m_Block = block;
7022  m_BlockAllocation.m_Offset = offset;
7023 }
7024 
7025 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
7026 {
7027  VMA_ASSERT(newSize > 0);
7028  m_Size = newSize;
7029 }
7030 
7031 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7032 {
7033  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7034  m_BlockAllocation.m_Offset = newOffset;
7035 }
7036 
7037 VkDeviceSize VmaAllocation_T::GetOffset() const
7038 {
7039  switch(m_Type)
7040  {
7041  case ALLOCATION_TYPE_BLOCK:
7042  return m_BlockAllocation.m_Offset;
7043  case ALLOCATION_TYPE_DEDICATED:
7044  return 0;
7045  default:
7046  VMA_ASSERT(0);
7047  return 0;
7048  }
7049 }
7050 
7051 VkDeviceMemory VmaAllocation_T::GetMemory() const
7052 {
7053  switch(m_Type)
7054  {
7055  case ALLOCATION_TYPE_BLOCK:
7056  return m_BlockAllocation.m_Block->GetDeviceMemory();
7057  case ALLOCATION_TYPE_DEDICATED:
7058  return m_DedicatedAllocation.m_hMemory;
7059  default:
7060  VMA_ASSERT(0);
7061  return VK_NULL_HANDLE;
7062  }
7063 }
7064 
7065 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7066 {
7067  switch(m_Type)
7068  {
7069  case ALLOCATION_TYPE_BLOCK:
7070  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7071  case ALLOCATION_TYPE_DEDICATED:
7072  return m_DedicatedAllocation.m_MemoryTypeIndex;
7073  default:
7074  VMA_ASSERT(0);
7075  return UINT32_MAX;
7076  }
7077 }
7078 
7079 void* VmaAllocation_T::GetMappedData() const
7080 {
7081  switch(m_Type)
7082  {
7083  case ALLOCATION_TYPE_BLOCK:
7084  if(m_MapCount != 0)
7085  {
7086  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7087  VMA_ASSERT(pBlockData != VMA_NULL);
7088  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7089  }
7090  else
7091  {
7092  return VMA_NULL;
7093  }
7094  break;
7095  case ALLOCATION_TYPE_DEDICATED:
7096  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7097  return m_DedicatedAllocation.m_pMappedData;
7098  default:
7099  VMA_ASSERT(0);
7100  return VMA_NULL;
7101  }
7102 }
7103 
7104 bool VmaAllocation_T::CanBecomeLost() const
7105 {
7106  switch(m_Type)
7107  {
7108  case ALLOCATION_TYPE_BLOCK:
7109  return m_BlockAllocation.m_CanBecomeLost;
7110  case ALLOCATION_TYPE_DEDICATED:
7111  return false;
7112  default:
7113  VMA_ASSERT(0);
7114  return false;
7115  }
7116 }
7117 
7118 VmaPool VmaAllocation_T::GetPool() const
7119 {
7120  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7121  return m_BlockAllocation.m_hPool;
7122 }
7123 
7124 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7125 {
7126  VMA_ASSERT(CanBecomeLost());
7127 
7128  /*
7129  Warning: This is a carefully designed algorithm.
7130  Do not modify unless you really know what you're doing :)
7131  */
7132  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7133  for(;;)
7134  {
7135  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7136  {
7137  VMA_ASSERT(0);
7138  return false;
7139  }
7140  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7141  {
7142  return false;
7143  }
7144  else // Last use time earlier than current time.
7145  {
7146  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7147  {
7148  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7149  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7150  return true;
7151  }
7152  }
7153  }
7154 }
7155 
7156 #if VMA_STATS_STRING_ENABLED
7157 
7158 // Correspond to values of enum VmaSuballocationType.
7159 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7160  "FREE",
7161  "UNKNOWN",
7162  "BUFFER",
7163  "IMAGE_UNKNOWN",
7164  "IMAGE_LINEAR",
7165  "IMAGE_OPTIMAL",
7166 };
7167 
7168 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7169 {
7170  json.WriteString("Type");
7171  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7172 
7173  json.WriteString("Size");
7174  json.WriteNumber(m_Size);
7175 
7176  if(m_pUserData != VMA_NULL)
7177  {
7178  json.WriteString("UserData");
7179  if(IsUserDataString())
7180  {
7181  json.WriteString((const char*)m_pUserData);
7182  }
7183  else
7184  {
7185  json.BeginString();
7186  json.ContinueString_Pointer(m_pUserData);
7187  json.EndString();
7188  }
7189  }
7190 
7191  json.WriteString("CreationFrameIndex");
7192  json.WriteNumber(m_CreationFrameIndex);
7193 
7194  json.WriteString("LastUseFrameIndex");
7195  json.WriteNumber(GetLastUseFrameIndex());
7196 
7197  if(m_BufferImageUsage != 0)
7198  {
7199  json.WriteString("Usage");
7200  json.WriteNumber(m_BufferImageUsage);
7201  }
7202 }
7203 
7204 #endif
7205 
7206 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7207 {
7208  VMA_ASSERT(IsUserDataString());
7209  if(m_pUserData != VMA_NULL)
7210  {
7211  char* const oldStr = (char*)m_pUserData;
7212  const size_t oldStrLen = strlen(oldStr);
7213  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7214  m_pUserData = VMA_NULL;
7215  }
7216 }
7217 
7218 void VmaAllocation_T::BlockAllocMap()
7219 {
7220  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7221 
7222  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7223  {
7224  ++m_MapCount;
7225  }
7226  else
7227  {
7228  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7229  }
7230 }
7231 
7232 void VmaAllocation_T::BlockAllocUnmap()
7233 {
7234  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7235 
7236  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7237  {
7238  --m_MapCount;
7239  }
7240  else
7241  {
7242  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7243  }
7244 }
7245 
7246 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7247 {
7248  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7249 
7250  if(m_MapCount != 0)
7251  {
7252  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7253  {
7254  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7255  *ppData = m_DedicatedAllocation.m_pMappedData;
7256  ++m_MapCount;
7257  return VK_SUCCESS;
7258  }
7259  else
7260  {
7261  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7262  return VK_ERROR_MEMORY_MAP_FAILED;
7263  }
7264  }
7265  else
7266  {
7267  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7268  hAllocator->m_hDevice,
7269  m_DedicatedAllocation.m_hMemory,
7270  0, // offset
7271  VK_WHOLE_SIZE,
7272  0, // flags
7273  ppData);
7274  if(result == VK_SUCCESS)
7275  {
7276  m_DedicatedAllocation.m_pMappedData = *ppData;
7277  m_MapCount = 1;
7278  }
7279  return result;
7280  }
7281 }
7282 
7283 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7284 {
7285  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7286 
7287  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7288  {
7289  --m_MapCount;
7290  if(m_MapCount == 0)
7291  {
7292  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7293  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7294  hAllocator->m_hDevice,
7295  m_DedicatedAllocation.m_hMemory);
7296  }
7297  }
7298  else
7299  {
7300  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7301  }
7302 }
7303 
7304 #if VMA_STATS_STRING_ENABLED
7305 
7306 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7307 {
7308  json.BeginObject();
7309 
7310  json.WriteString("Blocks");
7311  json.WriteNumber(stat.blockCount);
7312 
7313  json.WriteString("Allocations");
7314  json.WriteNumber(stat.allocationCount);
7315 
7316  json.WriteString("UnusedRanges");
7317  json.WriteNumber(stat.unusedRangeCount);
7318 
7319  json.WriteString("UsedBytes");
7320  json.WriteNumber(stat.usedBytes);
7321 
7322  json.WriteString("UnusedBytes");
7323  json.WriteNumber(stat.unusedBytes);
7324 
7325  if(stat.allocationCount > 1)
7326  {
7327  json.WriteString("AllocationSize");
7328  json.BeginObject(true);
7329  json.WriteString("Min");
7330  json.WriteNumber(stat.allocationSizeMin);
7331  json.WriteString("Avg");
7332  json.WriteNumber(stat.allocationSizeAvg);
7333  json.WriteString("Max");
7334  json.WriteNumber(stat.allocationSizeMax);
7335  json.EndObject();
7336  }
7337 
7338  if(stat.unusedRangeCount > 1)
7339  {
7340  json.WriteString("UnusedRangeSize");
7341  json.BeginObject(true);
7342  json.WriteString("Min");
7343  json.WriteNumber(stat.unusedRangeSizeMin);
7344  json.WriteString("Avg");
7345  json.WriteNumber(stat.unusedRangeSizeAvg);
7346  json.WriteString("Max");
7347  json.WriteNumber(stat.unusedRangeSizeMax);
7348  json.EndObject();
7349  }
7350 
7351  json.EndObject();
7352 }
7353 
7354 #endif // #if VMA_STATS_STRING_ENABLED
7355 
7356 struct VmaSuballocationItemSizeLess
7357 {
7358  bool operator()(
7359  const VmaSuballocationList::iterator lhs,
7360  const VmaSuballocationList::iterator rhs) const
7361  {
7362  return lhs->size < rhs->size;
7363  }
7364  bool operator()(
7365  const VmaSuballocationList::iterator lhs,
7366  VkDeviceSize rhsSize) const
7367  {
7368  return lhs->size < rhsSize;
7369  }
7370 };
7371 
7372 
7374 // class VmaBlockMetadata
7375 
7376 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7377  m_Size(0),
7378  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7379 {
7380 }
7381 
7382 #if VMA_STATS_STRING_ENABLED
7383 
7384 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7385  VkDeviceSize unusedBytes,
7386  size_t allocationCount,
7387  size_t unusedRangeCount) const
7388 {
7389  json.BeginObject();
7390 
7391  json.WriteString("TotalBytes");
7392  json.WriteNumber(GetSize());
7393 
7394  json.WriteString("UnusedBytes");
7395  json.WriteNumber(unusedBytes);
7396 
7397  json.WriteString("Allocations");
7398  json.WriteNumber((uint64_t)allocationCount);
7399 
7400  json.WriteString("UnusedRanges");
7401  json.WriteNumber((uint64_t)unusedRangeCount);
7402 
7403  json.WriteString("Suballocations");
7404  json.BeginArray();
7405 }
7406 
7407 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
7408  VkDeviceSize offset,
7409  VmaAllocation hAllocation) const
7410 {
7411  json.BeginObject(true);
7412 
7413  json.WriteString("Offset");
7414  json.WriteNumber(offset);
7415 
7416  hAllocation->PrintParameters(json);
7417 
7418  json.EndObject();
7419 }
7420 
7421 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
7422  VkDeviceSize offset,
7423  VkDeviceSize size) const
7424 {
7425  json.BeginObject(true);
7426 
7427  json.WriteString("Offset");
7428  json.WriteNumber(offset);
7429 
7430  json.WriteString("Type");
7431  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7432 
7433  json.WriteString("Size");
7434  json.WriteNumber(size);
7435 
7436  json.EndObject();
7437 }
7438 
7439 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
7440 {
7441  json.EndArray();
7442  json.EndObject();
7443 }
7444 
7445 #endif // #if VMA_STATS_STRING_ENABLED
7446 
7448 // class VmaBlockMetadata_Generic
7449 
7450 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
7451  VmaBlockMetadata(hAllocator),
7452  m_FreeCount(0),
7453  m_SumFreeSize(0),
7454  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7455  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7456 {
7457 }
7458 
7459 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7460 {
7461 }
7462 
7463 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7464 {
7465  VmaBlockMetadata::Init(size);
7466 
7467  m_FreeCount = 1;
7468  m_SumFreeSize = size;
7469 
7470  VmaSuballocation suballoc = {};
7471  suballoc.offset = 0;
7472  suballoc.size = size;
7473  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7474  suballoc.hAllocation = VK_NULL_HANDLE;
7475 
7476  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7477  m_Suballocations.push_back(suballoc);
7478  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7479  --suballocItem;
7480  m_FreeSuballocationsBySize.push_back(suballocItem);
7481 }
7482 
7483 bool VmaBlockMetadata_Generic::Validate() const
7484 {
7485  VMA_VALIDATE(!m_Suballocations.empty());
7486 
7487  // Expected offset of new suballocation as calculated from previous ones.
7488  VkDeviceSize calculatedOffset = 0;
7489  // Expected number of free suballocations as calculated from traversing their list.
7490  uint32_t calculatedFreeCount = 0;
7491  // Expected sum size of free suballocations as calculated from traversing their list.
7492  VkDeviceSize calculatedSumFreeSize = 0;
7493  // Expected number of free suballocations that should be registered in
7494  // m_FreeSuballocationsBySize calculated from traversing their list.
7495  size_t freeSuballocationsToRegister = 0;
7496  // True if previous visited suballocation was free.
7497  bool prevFree = false;
7498 
7499  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7500  suballocItem != m_Suballocations.cend();
7501  ++suballocItem)
7502  {
7503  const VmaSuballocation& subAlloc = *suballocItem;
7504 
7505  // Actual offset of this suballocation doesn't match expected one.
7506  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7507 
7508  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7509  // Two adjacent free suballocations are invalid. They should be merged.
7510  VMA_VALIDATE(!prevFree || !currFree);
7511 
7512  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7513 
7514  if(currFree)
7515  {
7516  calculatedSumFreeSize += subAlloc.size;
7517  ++calculatedFreeCount;
7518  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7519  {
7520  ++freeSuballocationsToRegister;
7521  }
7522 
7523  // Margin required between allocations - every free space must be at least that large.
7524  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7525  }
7526  else
7527  {
7528  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7529  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7530 
7531  // Margin required between allocations - previous allocation must be free.
7532  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7533  }
7534 
7535  calculatedOffset += subAlloc.size;
7536  prevFree = currFree;
7537  }
7538 
7539  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
7540  // match expected one.
7541  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7542 
7543  VkDeviceSize lastSize = 0;
7544  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7545  {
7546  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7547 
7548  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
7549  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7550  // They must be sorted by size ascending.
7551  VMA_VALIDATE(suballocItem->size >= lastSize);
7552 
7553  lastSize = suballocItem->size;
7554  }
7555 
7556  // Check if totals match calculacted values.
7557  VMA_VALIDATE(ValidateFreeSuballocationList());
7558  VMA_VALIDATE(calculatedOffset == GetSize());
7559  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7560  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7561 
7562  return true;
7563 }
7564 
7565 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
7566 {
7567  if(!m_FreeSuballocationsBySize.empty())
7568  {
7569  return m_FreeSuballocationsBySize.back()->size;
7570  }
7571  else
7572  {
7573  return 0;
7574  }
7575 }
7576 
7577 bool VmaBlockMetadata_Generic::IsEmpty() const
7578 {
7579  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7580 }
7581 
7582 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7583 {
7584  outInfo.blockCount = 1;
7585 
7586  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7587  outInfo.allocationCount = rangeCount - m_FreeCount;
7588  outInfo.unusedRangeCount = m_FreeCount;
7589 
7590  outInfo.unusedBytes = m_SumFreeSize;
7591  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
7592 
7593  outInfo.allocationSizeMin = UINT64_MAX;
7594  outInfo.allocationSizeMax = 0;
7595  outInfo.unusedRangeSizeMin = UINT64_MAX;
7596  outInfo.unusedRangeSizeMax = 0;
7597 
7598  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7599  suballocItem != m_Suballocations.cend();
7600  ++suballocItem)
7601  {
7602  const VmaSuballocation& suballoc = *suballocItem;
7603  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7604  {
7605  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7606  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
7607  }
7608  else
7609  {
7610  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
7611  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
7612  }
7613  }
7614 }
7615 
7616 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
7617 {
7618  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7619 
7620  inoutStats.size += GetSize();
7621  inoutStats.unusedSize += m_SumFreeSize;
7622  inoutStats.allocationCount += rangeCount - m_FreeCount;
7623  inoutStats.unusedRangeCount += m_FreeCount;
7624  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
7625 }
7626 
7627 #if VMA_STATS_STRING_ENABLED
7628 
7629 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7630 {
7631  PrintDetailedMap_Begin(json,
7632  m_SumFreeSize, // unusedBytes
7633  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7634  m_FreeCount); // unusedRangeCount
7635 
7636  size_t i = 0;
7637  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7638  suballocItem != m_Suballocations.cend();
7639  ++suballocItem, ++i)
7640  {
7641  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7642  {
7643  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
7644  }
7645  else
7646  {
7647  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
7648  }
7649  }
7650 
7651  PrintDetailedMap_End(json);
7652 }
7653 
7654 #endif // #if VMA_STATS_STRING_ENABLED
7655 
7656 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7657  uint32_t currentFrameIndex,
7658  uint32_t frameInUseCount,
7659  VkDeviceSize bufferImageGranularity,
7660  VkDeviceSize allocSize,
7661  VkDeviceSize allocAlignment,
7662  bool upperAddress,
7663  VmaSuballocationType allocType,
7664  bool canMakeOtherLost,
7665  uint32_t strategy,
7666  VmaAllocationRequest* pAllocationRequest)
7667 {
7668  VMA_ASSERT(allocSize > 0);
7669  VMA_ASSERT(!upperAddress);
7670  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7671  VMA_ASSERT(pAllocationRequest != VMA_NULL);
7672  VMA_HEAVY_ASSERT(Validate());
7673 
7674  // There is not enough total free space in this block to fullfill the request: Early return.
7675  if(canMakeOtherLost == false &&
7676  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
7677  {
7678  return false;
7679  }
7680 
7681  // New algorithm, efficiently searching freeSuballocationsBySize.
7682  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7683  if(freeSuballocCount > 0)
7684  {
7686  {
7687  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7688  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7689  m_FreeSuballocationsBySize.data(),
7690  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7691  allocSize + 2 * VMA_DEBUG_MARGIN,
7692  VmaSuballocationItemSizeLess());
7693  size_t index = it - m_FreeSuballocationsBySize.data();
7694  for(; index < freeSuballocCount; ++index)
7695  {
7696  if(CheckAllocation(
7697  currentFrameIndex,
7698  frameInUseCount,
7699  bufferImageGranularity,
7700  allocSize,
7701  allocAlignment,
7702  allocType,
7703  m_FreeSuballocationsBySize[index],
7704  false, // canMakeOtherLost
7705  &pAllocationRequest->offset,
7706  &pAllocationRequest->itemsToMakeLostCount,
7707  &pAllocationRequest->sumFreeSize,
7708  &pAllocationRequest->sumItemSize))
7709  {
7710  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7711  return true;
7712  }
7713  }
7714  }
7715  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7716  {
7717  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7718  it != m_Suballocations.end();
7719  ++it)
7720  {
7721  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
7722  currentFrameIndex,
7723  frameInUseCount,
7724  bufferImageGranularity,
7725  allocSize,
7726  allocAlignment,
7727  allocType,
7728  it,
7729  false, // canMakeOtherLost
7730  &pAllocationRequest->offset,
7731  &pAllocationRequest->itemsToMakeLostCount,
7732  &pAllocationRequest->sumFreeSize,
7733  &pAllocationRequest->sumItemSize))
7734  {
7735  pAllocationRequest->item = it;
7736  return true;
7737  }
7738  }
7739  }
7740  else // WORST_FIT, FIRST_FIT
7741  {
7742  // Search staring from biggest suballocations.
7743  for(size_t index = freeSuballocCount; index--; )
7744  {
7745  if(CheckAllocation(
7746  currentFrameIndex,
7747  frameInUseCount,
7748  bufferImageGranularity,
7749  allocSize,
7750  allocAlignment,
7751  allocType,
7752  m_FreeSuballocationsBySize[index],
7753  false, // canMakeOtherLost
7754  &pAllocationRequest->offset,
7755  &pAllocationRequest->itemsToMakeLostCount,
7756  &pAllocationRequest->sumFreeSize,
7757  &pAllocationRequest->sumItemSize))
7758  {
7759  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7760  return true;
7761  }
7762  }
7763  }
7764  }
7765 
7766  if(canMakeOtherLost)
7767  {
7768  // Brute-force algorithm. TODO: Come up with something better.
7769 
7770  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
7771  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
7772 
7773  VmaAllocationRequest tmpAllocRequest = {};
7774  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
7775  suballocIt != m_Suballocations.end();
7776  ++suballocIt)
7777  {
7778  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
7779  suballocIt->hAllocation->CanBecomeLost())
7780  {
7781  if(CheckAllocation(
7782  currentFrameIndex,
7783  frameInUseCount,
7784  bufferImageGranularity,
7785  allocSize,
7786  allocAlignment,
7787  allocType,
7788  suballocIt,
7789  canMakeOtherLost,
7790  &tmpAllocRequest.offset,
7791  &tmpAllocRequest.itemsToMakeLostCount,
7792  &tmpAllocRequest.sumFreeSize,
7793  &tmpAllocRequest.sumItemSize))
7794  {
7795  tmpAllocRequest.item = suballocIt;
7796 
7797  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7799  {
7800  *pAllocationRequest = tmpAllocRequest;
7801  }
7802  }
7803  }
7804  }
7805 
7806  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7807  {
7808  return true;
7809  }
7810  }
7811 
7812  return false;
7813 }
7814 
7815 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7816  uint32_t currentFrameIndex,
7817  uint32_t frameInUseCount,
7818  VmaAllocationRequest* pAllocationRequest)
7819 {
7820  while(pAllocationRequest->itemsToMakeLostCount > 0)
7821  {
7822  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7823  {
7824  ++pAllocationRequest->item;
7825  }
7826  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7827  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7828  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7829  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7830  {
7831  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7832  --pAllocationRequest->itemsToMakeLostCount;
7833  }
7834  else
7835  {
7836  return false;
7837  }
7838  }
7839 
7840  VMA_HEAVY_ASSERT(Validate());
7841  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7842  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
7843 
7844  return true;
7845 }
7846 
7847 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7848 {
7849  uint32_t lostAllocationCount = 0;
7850  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7851  it != m_Suballocations.end();
7852  ++it)
7853  {
7854  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
7855  it->hAllocation->CanBecomeLost() &&
7856  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7857  {
7858  it = FreeSuballocation(it);
7859  ++lostAllocationCount;
7860  }
7861  }
7862  return lostAllocationCount;
7863 }
7864 
7865 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
7866 {
7867  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7868  it != m_Suballocations.end();
7869  ++it)
7870  {
7871  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
7872  {
7873  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
7874  {
7875  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
7876  return VK_ERROR_VALIDATION_FAILED_EXT;
7877  }
7878  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
7879  {
7880  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
7881  return VK_ERROR_VALIDATION_FAILED_EXT;
7882  }
7883  }
7884  }
7885 
7886  return VK_SUCCESS;
7887 }
7888 
7889 void VmaBlockMetadata_Generic::Alloc(
7890  const VmaAllocationRequest& request,
7891  VmaSuballocationType type,
7892  VkDeviceSize allocSize,
7893  bool upperAddress,
7894  VmaAllocation hAllocation)
7895 {
7896  VMA_ASSERT(!upperAddress);
7897  VMA_ASSERT(request.item != m_Suballocations.end());
7898  VmaSuballocation& suballoc = *request.item;
7899  // Given suballocation is a free block.
7900  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7901  // Given offset is inside this suballocation.
7902  VMA_ASSERT(request.offset >= suballoc.offset);
7903  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
7904  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
7905  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
7906 
7907  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
7908  // it to become used.
7909  UnregisterFreeSuballocation(request.item);
7910 
7911  suballoc.offset = request.offset;
7912  suballoc.size = allocSize;
7913  suballoc.type = type;
7914  suballoc.hAllocation = hAllocation;
7915 
7916  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
7917  if(paddingEnd)
7918  {
7919  VmaSuballocation paddingSuballoc = {};
7920  paddingSuballoc.offset = request.offset + allocSize;
7921  paddingSuballoc.size = paddingEnd;
7922  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7923  VmaSuballocationList::iterator next = request.item;
7924  ++next;
7925  const VmaSuballocationList::iterator paddingEndItem =
7926  m_Suballocations.insert(next, paddingSuballoc);
7927  RegisterFreeSuballocation(paddingEndItem);
7928  }
7929 
7930  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
7931  if(paddingBegin)
7932  {
7933  VmaSuballocation paddingSuballoc = {};
7934  paddingSuballoc.offset = request.offset - paddingBegin;
7935  paddingSuballoc.size = paddingBegin;
7936  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7937  const VmaSuballocationList::iterator paddingBeginItem =
7938  m_Suballocations.insert(request.item, paddingSuballoc);
7939  RegisterFreeSuballocation(paddingBeginItem);
7940  }
7941 
7942  // Update totals.
7943  m_FreeCount = m_FreeCount - 1;
7944  if(paddingBegin > 0)
7945  {
7946  ++m_FreeCount;
7947  }
7948  if(paddingEnd > 0)
7949  {
7950  ++m_FreeCount;
7951  }
7952  m_SumFreeSize -= allocSize;
7953 }
7954 
7955 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
7956 {
7957  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7958  suballocItem != m_Suballocations.end();
7959  ++suballocItem)
7960  {
7961  VmaSuballocation& suballoc = *suballocItem;
7962  if(suballoc.hAllocation == allocation)
7963  {
7964  FreeSuballocation(suballocItem);
7965  VMA_HEAVY_ASSERT(Validate());
7966  return;
7967  }
7968  }
7969  VMA_ASSERT(0 && "Not found!");
7970 }
7971 
7972 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
7973 {
7974  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7975  suballocItem != m_Suballocations.end();
7976  ++suballocItem)
7977  {
7978  VmaSuballocation& suballoc = *suballocItem;
7979  if(suballoc.offset == offset)
7980  {
7981  FreeSuballocation(suballocItem);
7982  return;
7983  }
7984  }
7985  VMA_ASSERT(0 && "Not found!");
7986 }
7987 
7988 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
7989 {
7990  typedef VmaSuballocationList::iterator iter_type;
7991  for(iter_type suballocItem = m_Suballocations.begin();
7992  suballocItem != m_Suballocations.end();
7993  ++suballocItem)
7994  {
7995  VmaSuballocation& suballoc = *suballocItem;
7996  if(suballoc.hAllocation == alloc)
7997  {
7998  iter_type nextItem = suballocItem;
7999  ++nextItem;
8000 
8001  // Should have been ensured on higher level.
8002  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
8003 
8004  // Shrinking.
8005  if(newSize < alloc->GetSize())
8006  {
8007  const VkDeviceSize sizeDiff = suballoc.size - newSize;
8008 
8009  // There is next item.
8010  if(nextItem != m_Suballocations.end())
8011  {
8012  // Next item is free.
8013  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8014  {
8015  // Grow this next item backward.
8016  UnregisterFreeSuballocation(nextItem);
8017  nextItem->offset -= sizeDiff;
8018  nextItem->size += sizeDiff;
8019  RegisterFreeSuballocation(nextItem);
8020  }
8021  // Next item is not free.
8022  else
8023  {
8024  // Create free item after current one.
8025  VmaSuballocation newFreeSuballoc;
8026  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8027  newFreeSuballoc.offset = suballoc.offset + newSize;
8028  newFreeSuballoc.size = sizeDiff;
8029  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8030  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
8031  RegisterFreeSuballocation(newFreeSuballocIt);
8032 
8033  ++m_FreeCount;
8034  }
8035  }
8036  // This is the last item.
8037  else
8038  {
8039  // Create free item at the end.
8040  VmaSuballocation newFreeSuballoc;
8041  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8042  newFreeSuballoc.offset = suballoc.offset + newSize;
8043  newFreeSuballoc.size = sizeDiff;
8044  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8045  m_Suballocations.push_back(newFreeSuballoc);
8046 
8047  iter_type newFreeSuballocIt = m_Suballocations.end();
8048  RegisterFreeSuballocation(--newFreeSuballocIt);
8049 
8050  ++m_FreeCount;
8051  }
8052 
8053  suballoc.size = newSize;
8054  m_SumFreeSize += sizeDiff;
8055  }
8056  // Growing.
8057  else
8058  {
8059  const VkDeviceSize sizeDiff = newSize - suballoc.size;
8060 
8061  // There is next item.
8062  if(nextItem != m_Suballocations.end())
8063  {
8064  // Next item is free.
8065  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8066  {
8067  // There is not enough free space, including margin.
8068  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
8069  {
8070  return false;
8071  }
8072 
8073  // There is more free space than required.
8074  if(nextItem->size > sizeDiff)
8075  {
8076  // Move and shrink this next item.
8077  UnregisterFreeSuballocation(nextItem);
8078  nextItem->offset += sizeDiff;
8079  nextItem->size -= sizeDiff;
8080  RegisterFreeSuballocation(nextItem);
8081  }
8082  // There is exactly the amount of free space required.
8083  else
8084  {
8085  // Remove this next free item.
8086  UnregisterFreeSuballocation(nextItem);
8087  m_Suballocations.erase(nextItem);
8088  --m_FreeCount;
8089  }
8090  }
8091  // Next item is not free - there is no space to grow.
8092  else
8093  {
8094  return false;
8095  }
8096  }
8097  // This is the last item - there is no space to grow.
8098  else
8099  {
8100  return false;
8101  }
8102 
8103  suballoc.size = newSize;
8104  m_SumFreeSize -= sizeDiff;
8105  }
8106 
8107  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
8108  return true;
8109  }
8110  }
8111  VMA_ASSERT(0 && "Not found!");
8112  return false;
8113 }
8114 
8115 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8116 {
8117  VkDeviceSize lastSize = 0;
8118  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8119  {
8120  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8121 
8122  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8123  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8124  VMA_VALIDATE(it->size >= lastSize);
8125  lastSize = it->size;
8126  }
8127  return true;
8128 }
8129 
8130 bool VmaBlockMetadata_Generic::CheckAllocation(
8131  uint32_t currentFrameIndex,
8132  uint32_t frameInUseCount,
8133  VkDeviceSize bufferImageGranularity,
8134  VkDeviceSize allocSize,
8135  VkDeviceSize allocAlignment,
8136  VmaSuballocationType allocType,
8137  VmaSuballocationList::const_iterator suballocItem,
8138  bool canMakeOtherLost,
8139  VkDeviceSize* pOffset,
8140  size_t* itemsToMakeLostCount,
8141  VkDeviceSize* pSumFreeSize,
8142  VkDeviceSize* pSumItemSize) const
8143 {
8144  VMA_ASSERT(allocSize > 0);
8145  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8146  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8147  VMA_ASSERT(pOffset != VMA_NULL);
8148 
8149  *itemsToMakeLostCount = 0;
8150  *pSumFreeSize = 0;
8151  *pSumItemSize = 0;
8152 
8153  if(canMakeOtherLost)
8154  {
8155  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8156  {
8157  *pSumFreeSize = suballocItem->size;
8158  }
8159  else
8160  {
8161  if(suballocItem->hAllocation->CanBecomeLost() &&
8162  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8163  {
8164  ++*itemsToMakeLostCount;
8165  *pSumItemSize = suballocItem->size;
8166  }
8167  else
8168  {
8169  return false;
8170  }
8171  }
8172 
8173  // Remaining size is too small for this request: Early return.
8174  if(GetSize() - suballocItem->offset < allocSize)
8175  {
8176  return false;
8177  }
8178 
8179  // Start from offset equal to beginning of this suballocation.
8180  *pOffset = suballocItem->offset;
8181 
8182  // Apply VMA_DEBUG_MARGIN at the beginning.
8183  if(VMA_DEBUG_MARGIN > 0)
8184  {
8185  *pOffset += VMA_DEBUG_MARGIN;
8186  }
8187 
8188  // Apply alignment.
8189  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8190 
8191  // Check previous suballocations for BufferImageGranularity conflicts.
8192  // Make bigger alignment if necessary.
8193  if(bufferImageGranularity > 1)
8194  {
8195  bool bufferImageGranularityConflict = false;
8196  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8197  while(prevSuballocItem != m_Suballocations.cbegin())
8198  {
8199  --prevSuballocItem;
8200  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8201  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8202  {
8203  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8204  {
8205  bufferImageGranularityConflict = true;
8206  break;
8207  }
8208  }
8209  else
8210  // Already on previous page.
8211  break;
8212  }
8213  if(bufferImageGranularityConflict)
8214  {
8215  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8216  }
8217  }
8218 
8219  // Now that we have final *pOffset, check if we are past suballocItem.
8220  // If yes, return false - this function should be called for another suballocItem as starting point.
8221  if(*pOffset >= suballocItem->offset + suballocItem->size)
8222  {
8223  return false;
8224  }
8225 
8226  // Calculate padding at the beginning based on current offset.
8227  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8228 
8229  // Calculate required margin at the end.
8230  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8231 
8232  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8233  // Another early return check.
8234  if(suballocItem->offset + totalSize > GetSize())
8235  {
8236  return false;
8237  }
8238 
8239  // Advance lastSuballocItem until desired size is reached.
8240  // Update itemsToMakeLostCount.
8241  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8242  if(totalSize > suballocItem->size)
8243  {
8244  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8245  while(remainingSize > 0)
8246  {
8247  ++lastSuballocItem;
8248  if(lastSuballocItem == m_Suballocations.cend())
8249  {
8250  return false;
8251  }
8252  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8253  {
8254  *pSumFreeSize += lastSuballocItem->size;
8255  }
8256  else
8257  {
8258  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8259  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8260  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8261  {
8262  ++*itemsToMakeLostCount;
8263  *pSumItemSize += lastSuballocItem->size;
8264  }
8265  else
8266  {
8267  return false;
8268  }
8269  }
8270  remainingSize = (lastSuballocItem->size < remainingSize) ?
8271  remainingSize - lastSuballocItem->size : 0;
8272  }
8273  }
8274 
8275  // Check next suballocations for BufferImageGranularity conflicts.
8276  // If conflict exists, we must mark more allocations lost or fail.
8277  if(bufferImageGranularity > 1)
8278  {
8279  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8280  ++nextSuballocItem;
8281  while(nextSuballocItem != m_Suballocations.cend())
8282  {
8283  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8284  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8285  {
8286  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8287  {
8288  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8289  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8290  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8291  {
8292  ++*itemsToMakeLostCount;
8293  }
8294  else
8295  {
8296  return false;
8297  }
8298  }
8299  }
8300  else
8301  {
8302  // Already on next page.
8303  break;
8304  }
8305  ++nextSuballocItem;
8306  }
8307  }
8308  }
8309  else
8310  {
8311  const VmaSuballocation& suballoc = *suballocItem;
8312  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8313 
8314  *pSumFreeSize = suballoc.size;
8315 
8316  // Size of this suballocation is too small for this request: Early return.
8317  if(suballoc.size < allocSize)
8318  {
8319  return false;
8320  }
8321 
8322  // Start from offset equal to beginning of this suballocation.
8323  *pOffset = suballoc.offset;
8324 
8325  // Apply VMA_DEBUG_MARGIN at the beginning.
8326  if(VMA_DEBUG_MARGIN > 0)
8327  {
8328  *pOffset += VMA_DEBUG_MARGIN;
8329  }
8330 
8331  // Apply alignment.
8332  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8333 
8334  // Check previous suballocations for BufferImageGranularity conflicts.
8335  // Make bigger alignment if necessary.
8336  if(bufferImageGranularity > 1)
8337  {
8338  bool bufferImageGranularityConflict = false;
8339  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8340  while(prevSuballocItem != m_Suballocations.cbegin())
8341  {
8342  --prevSuballocItem;
8343  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8344  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8345  {
8346  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8347  {
8348  bufferImageGranularityConflict = true;
8349  break;
8350  }
8351  }
8352  else
8353  // Already on previous page.
8354  break;
8355  }
8356  if(bufferImageGranularityConflict)
8357  {
8358  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8359  }
8360  }
8361 
8362  // Calculate padding at the beginning based on current offset.
8363  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8364 
8365  // Calculate required margin at the end.
8366  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8367 
8368  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8369  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8370  {
8371  return false;
8372  }
8373 
8374  // Check next suballocations for BufferImageGranularity conflicts.
8375  // If conflict exists, allocation cannot be made here.
8376  if(bufferImageGranularity > 1)
8377  {
8378  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8379  ++nextSuballocItem;
8380  while(nextSuballocItem != m_Suballocations.cend())
8381  {
8382  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8383  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8384  {
8385  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8386  {
8387  return false;
8388  }
8389  }
8390  else
8391  {
8392  // Already on next page.
8393  break;
8394  }
8395  ++nextSuballocItem;
8396  }
8397  }
8398  }
8399 
8400  // All tests passed: Success. pOffset is already filled.
8401  return true;
8402 }
8403 
8404 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8405 {
8406  VMA_ASSERT(item != m_Suballocations.end());
8407  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8408 
8409  VmaSuballocationList::iterator nextItem = item;
8410  ++nextItem;
8411  VMA_ASSERT(nextItem != m_Suballocations.end());
8412  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8413 
8414  item->size += nextItem->size;
8415  --m_FreeCount;
8416  m_Suballocations.erase(nextItem);
8417 }
8418 
8419 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8420 {
8421  // Change this suballocation to be marked as free.
8422  VmaSuballocation& suballoc = *suballocItem;
8423  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8424  suballoc.hAllocation = VK_NULL_HANDLE;
8425 
8426  // Update totals.
8427  ++m_FreeCount;
8428  m_SumFreeSize += suballoc.size;
8429 
8430  // Merge with previous and/or next suballocation if it's also free.
8431  bool mergeWithNext = false;
8432  bool mergeWithPrev = false;
8433 
8434  VmaSuballocationList::iterator nextItem = suballocItem;
8435  ++nextItem;
8436  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8437  {
8438  mergeWithNext = true;
8439  }
8440 
8441  VmaSuballocationList::iterator prevItem = suballocItem;
8442  if(suballocItem != m_Suballocations.begin())
8443  {
8444  --prevItem;
8445  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8446  {
8447  mergeWithPrev = true;
8448  }
8449  }
8450 
8451  if(mergeWithNext)
8452  {
8453  UnregisterFreeSuballocation(nextItem);
8454  MergeFreeWithNext(suballocItem);
8455  }
8456 
8457  if(mergeWithPrev)
8458  {
8459  UnregisterFreeSuballocation(prevItem);
8460  MergeFreeWithNext(prevItem);
8461  RegisterFreeSuballocation(prevItem);
8462  return prevItem;
8463  }
8464  else
8465  {
8466  RegisterFreeSuballocation(suballocItem);
8467  return suballocItem;
8468  }
8469 }
8470 
8471 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8472 {
8473  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8474  VMA_ASSERT(item->size > 0);
8475 
8476  // You may want to enable this validation at the beginning or at the end of
8477  // this function, depending on what do you want to check.
8478  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8479 
8480  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8481  {
8482  if(m_FreeSuballocationsBySize.empty())
8483  {
8484  m_FreeSuballocationsBySize.push_back(item);
8485  }
8486  else
8487  {
8488  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8489  }
8490  }
8491 
8492  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8493 }
8494 
8495 
8496 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8497 {
8498  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8499  VMA_ASSERT(item->size > 0);
8500 
8501  // You may want to enable this validation at the beginning or at the end of
8502  // this function, depending on what do you want to check.
8503  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8504 
8505  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8506  {
8507  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8508  m_FreeSuballocationsBySize.data(),
8509  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8510  item,
8511  VmaSuballocationItemSizeLess());
8512  for(size_t index = it - m_FreeSuballocationsBySize.data();
8513  index < m_FreeSuballocationsBySize.size();
8514  ++index)
8515  {
8516  if(m_FreeSuballocationsBySize[index] == item)
8517  {
8518  VmaVectorRemove(m_FreeSuballocationsBySize, index);
8519  return;
8520  }
8521  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8522  }
8523  VMA_ASSERT(0 && "Not found.");
8524  }
8525 
8526  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8527 }
8528 
8529 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8530  VkDeviceSize bufferImageGranularity,
8531  VmaSuballocationType& inOutPrevSuballocType) const
8532 {
8533  if(bufferImageGranularity == 1 || IsEmpty())
8534  {
8535  return false;
8536  }
8537 
8538  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8539  bool typeConflictFound = false;
8540  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8541  it != m_Suballocations.cend();
8542  ++it)
8543  {
8544  const VmaSuballocationType suballocType = it->type;
8545  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8546  {
8547  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8548  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8549  {
8550  typeConflictFound = true;
8551  }
8552  inOutPrevSuballocType = suballocType;
8553  }
8554  }
8555 
8556  return typeConflictFound || minAlignment >= bufferImageGranularity;
8557 }
8558 
8560 // class VmaBlockMetadata_Linear
8561 
8562 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
8563  VmaBlockMetadata(hAllocator),
8564  m_SumFreeSize(0),
8565  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8566  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8567  m_1stVectorIndex(0),
8568  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8569  m_1stNullItemsBeginCount(0),
8570  m_1stNullItemsMiddleCount(0),
8571  m_2ndNullItemsCount(0)
8572 {
8573 }
8574 
8575 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8576 {
8577 }
8578 
8579 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8580 {
8581  VmaBlockMetadata::Init(size);
8582  m_SumFreeSize = size;
8583 }
8584 
8585 bool VmaBlockMetadata_Linear::Validate() const
8586 {
8587  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8588  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8589 
8590  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8591  VMA_VALIDATE(!suballocations1st.empty() ||
8592  suballocations2nd.empty() ||
8593  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8594 
8595  if(!suballocations1st.empty())
8596  {
8597  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
8598  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8599  // Null item at the end should be just pop_back().
8600  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8601  }
8602  if(!suballocations2nd.empty())
8603  {
8604  // Null item at the end should be just pop_back().
8605  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8606  }
8607 
8608  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8609  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8610 
8611  VkDeviceSize sumUsedSize = 0;
8612  const size_t suballoc1stCount = suballocations1st.size();
8613  VkDeviceSize offset = VMA_DEBUG_MARGIN;
8614 
8615  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8616  {
8617  const size_t suballoc2ndCount = suballocations2nd.size();
8618  size_t nullItem2ndCount = 0;
8619  for(size_t i = 0; i < suballoc2ndCount; ++i)
8620  {
8621  const VmaSuballocation& suballoc = suballocations2nd[i];
8622  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8623 
8624  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8625  VMA_VALIDATE(suballoc.offset >= offset);
8626 
8627  if(!currFree)
8628  {
8629  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8630  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8631  sumUsedSize += suballoc.size;
8632  }
8633  else
8634  {
8635  ++nullItem2ndCount;
8636  }
8637 
8638  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8639  }
8640 
8641  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8642  }
8643 
8644  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8645  {
8646  const VmaSuballocation& suballoc = suballocations1st[i];
8647  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8648  suballoc.hAllocation == VK_NULL_HANDLE);
8649  }
8650 
8651  size_t nullItem1stCount = m_1stNullItemsBeginCount;
8652 
8653  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8654  {
8655  const VmaSuballocation& suballoc = suballocations1st[i];
8656  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8657 
8658  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8659  VMA_VALIDATE(suballoc.offset >= offset);
8660  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8661 
8662  if(!currFree)
8663  {
8664  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8665  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8666  sumUsedSize += suballoc.size;
8667  }
8668  else
8669  {
8670  ++nullItem1stCount;
8671  }
8672 
8673  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8674  }
8675  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8676 
8677  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8678  {
8679  const size_t suballoc2ndCount = suballocations2nd.size();
8680  size_t nullItem2ndCount = 0;
8681  for(size_t i = suballoc2ndCount; i--; )
8682  {
8683  const VmaSuballocation& suballoc = suballocations2nd[i];
8684  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8685 
8686  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8687  VMA_VALIDATE(suballoc.offset >= offset);
8688 
8689  if(!currFree)
8690  {
8691  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8692  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8693  sumUsedSize += suballoc.size;
8694  }
8695  else
8696  {
8697  ++nullItem2ndCount;
8698  }
8699 
8700  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8701  }
8702 
8703  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8704  }
8705 
8706  VMA_VALIDATE(offset <= GetSize());
8707  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
8708 
8709  return true;
8710 }
8711 
8712 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
8713 {
8714  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
8715  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
8716 }
8717 
8718 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
8719 {
8720  const VkDeviceSize size = GetSize();
8721 
8722  /*
8723  We don't consider gaps inside allocation vectors with freed allocations because
8724  they are not suitable for reuse in linear allocator. We consider only space that
8725  is available for new allocations.
8726  */
8727  if(IsEmpty())
8728  {
8729  return size;
8730  }
8731 
8732  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8733 
8734  switch(m_2ndVectorMode)
8735  {
8736  case SECOND_VECTOR_EMPTY:
8737  /*
8738  Available space is after end of 1st, as well as before beginning of 1st (which
8739  whould make it a ring buffer).
8740  */
8741  {
8742  const size_t suballocations1stCount = suballocations1st.size();
8743  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
8744  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8745  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
8746  return VMA_MAX(
8747  firstSuballoc.offset,
8748  size - (lastSuballoc.offset + lastSuballoc.size));
8749  }
8750  break;
8751 
8752  case SECOND_VECTOR_RING_BUFFER:
8753  /*
8754  Available space is only between end of 2nd and beginning of 1st.
8755  */
8756  {
8757  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8758  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
8759  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
8760  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
8761  }
8762  break;
8763 
8764  case SECOND_VECTOR_DOUBLE_STACK:
8765  /*
8766  Available space is only between end of 1st and top of 2nd.
8767  */
8768  {
8769  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8770  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
8771  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
8772  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
8773  }
8774  break;
8775 
8776  default:
8777  VMA_ASSERT(0);
8778  return 0;
8779  }
8780 }
8781 
8782 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8783 {
8784  const VkDeviceSize size = GetSize();
8785  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8786  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8787  const size_t suballoc1stCount = suballocations1st.size();
8788  const size_t suballoc2ndCount = suballocations2nd.size();
8789 
8790  outInfo.blockCount = 1;
8791  outInfo.allocationCount = (uint32_t)GetAllocationCount();
8792  outInfo.unusedRangeCount = 0;
8793  outInfo.usedBytes = 0;
8794  outInfo.allocationSizeMin = UINT64_MAX;
8795  outInfo.allocationSizeMax = 0;
8796  outInfo.unusedRangeSizeMin = UINT64_MAX;
8797  outInfo.unusedRangeSizeMax = 0;
8798 
8799  VkDeviceSize lastOffset = 0;
8800 
8801  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8802  {
8803  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8804  size_t nextAlloc2ndIndex = 0;
8805  while(lastOffset < freeSpace2ndTo1stEnd)
8806  {
8807  // Find next non-null allocation or move nextAllocIndex to the end.
8808  while(nextAlloc2ndIndex < suballoc2ndCount &&
8809  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8810  {
8811  ++nextAlloc2ndIndex;
8812  }
8813 
8814  // Found non-null allocation.
8815  if(nextAlloc2ndIndex < suballoc2ndCount)
8816  {
8817  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8818 
8819  // 1. Process free space before this allocation.
8820  if(lastOffset < suballoc.offset)
8821  {
8822  // There is free space from lastOffset to suballoc.offset.
8823  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8824  ++outInfo.unusedRangeCount;
8825  outInfo.unusedBytes += unusedRangeSize;
8826  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8827  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8828  }
8829 
8830  // 2. Process this allocation.
8831  // There is allocation with suballoc.offset, suballoc.size.
8832  outInfo.usedBytes += suballoc.size;
8833  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8834  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8835 
8836  // 3. Prepare for next iteration.
8837  lastOffset = suballoc.offset + suballoc.size;
8838  ++nextAlloc2ndIndex;
8839  }
8840  // We are at the end.
8841  else
8842  {
8843  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8844  if(lastOffset < freeSpace2ndTo1stEnd)
8845  {
8846  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8847  ++outInfo.unusedRangeCount;
8848  outInfo.unusedBytes += unusedRangeSize;
8849  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8850  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8851  }
8852 
8853  // End of loop.
8854  lastOffset = freeSpace2ndTo1stEnd;
8855  }
8856  }
8857  }
8858 
8859  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8860  const VkDeviceSize freeSpace1stTo2ndEnd =
8861  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8862  while(lastOffset < freeSpace1stTo2ndEnd)
8863  {
8864  // Find next non-null allocation or move nextAllocIndex to the end.
8865  while(nextAlloc1stIndex < suballoc1stCount &&
8866  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8867  {
8868  ++nextAlloc1stIndex;
8869  }
8870 
8871  // Found non-null allocation.
8872  if(nextAlloc1stIndex < suballoc1stCount)
8873  {
8874  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8875 
8876  // 1. Process free space before this allocation.
8877  if(lastOffset < suballoc.offset)
8878  {
8879  // There is free space from lastOffset to suballoc.offset.
8880  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8881  ++outInfo.unusedRangeCount;
8882  outInfo.unusedBytes += unusedRangeSize;
8883  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8884  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8885  }
8886 
8887  // 2. Process this allocation.
8888  // There is allocation with suballoc.offset, suballoc.size.
8889  outInfo.usedBytes += suballoc.size;
8890  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8891  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8892 
8893  // 3. Prepare for next iteration.
8894  lastOffset = suballoc.offset + suballoc.size;
8895  ++nextAlloc1stIndex;
8896  }
8897  // We are at the end.
8898  else
8899  {
8900  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8901  if(lastOffset < freeSpace1stTo2ndEnd)
8902  {
8903  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8904  ++outInfo.unusedRangeCount;
8905  outInfo.unusedBytes += unusedRangeSize;
8906  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8907  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8908  }
8909 
8910  // End of loop.
8911  lastOffset = freeSpace1stTo2ndEnd;
8912  }
8913  }
8914 
8915  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8916  {
8917  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8918  while(lastOffset < size)
8919  {
8920  // Find next non-null allocation or move nextAllocIndex to the end.
8921  while(nextAlloc2ndIndex != SIZE_MAX &&
8922  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8923  {
8924  --nextAlloc2ndIndex;
8925  }
8926 
8927  // Found non-null allocation.
8928  if(nextAlloc2ndIndex != SIZE_MAX)
8929  {
8930  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8931 
8932  // 1. Process free space before this allocation.
8933  if(lastOffset < suballoc.offset)
8934  {
8935  // There is free space from lastOffset to suballoc.offset.
8936  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8937  ++outInfo.unusedRangeCount;
8938  outInfo.unusedBytes += unusedRangeSize;
8939  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8940  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8941  }
8942 
8943  // 2. Process this allocation.
8944  // There is allocation with suballoc.offset, suballoc.size.
8945  outInfo.usedBytes += suballoc.size;
8946  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8947  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8948 
8949  // 3. Prepare for next iteration.
8950  lastOffset = suballoc.offset + suballoc.size;
8951  --nextAlloc2ndIndex;
8952  }
8953  // We are at the end.
8954  else
8955  {
8956  // There is free space from lastOffset to size.
8957  if(lastOffset < size)
8958  {
8959  const VkDeviceSize unusedRangeSize = size - lastOffset;
8960  ++outInfo.unusedRangeCount;
8961  outInfo.unusedBytes += unusedRangeSize;
8962  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8963  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8964  }
8965 
8966  // End of loop.
8967  lastOffset = size;
8968  }
8969  }
8970  }
8971 
8972  outInfo.unusedBytes = size - outInfo.usedBytes;
8973 }
8974 
8975 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
8976 {
8977  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8978  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8979  const VkDeviceSize size = GetSize();
8980  const size_t suballoc1stCount = suballocations1st.size();
8981  const size_t suballoc2ndCount = suballocations2nd.size();
8982 
8983  inoutStats.size += size;
8984 
8985  VkDeviceSize lastOffset = 0;
8986 
8987  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8988  {
8989  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8990  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
8991  while(lastOffset < freeSpace2ndTo1stEnd)
8992  {
8993  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8994  while(nextAlloc2ndIndex < suballoc2ndCount &&
8995  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8996  {
8997  ++nextAlloc2ndIndex;
8998  }
8999 
9000  // Found non-null allocation.
9001  if(nextAlloc2ndIndex < suballoc2ndCount)
9002  {
9003  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9004 
9005  // 1. Process free space before this allocation.
9006  if(lastOffset < suballoc.offset)
9007  {
9008  // There is free space from lastOffset to suballoc.offset.
9009  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9010  inoutStats.unusedSize += unusedRangeSize;
9011  ++inoutStats.unusedRangeCount;
9012  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9013  }
9014 
9015  // 2. Process this allocation.
9016  // There is allocation with suballoc.offset, suballoc.size.
9017  ++inoutStats.allocationCount;
9018 
9019  // 3. Prepare for next iteration.
9020  lastOffset = suballoc.offset + suballoc.size;
9021  ++nextAlloc2ndIndex;
9022  }
9023  // We are at the end.
9024  else
9025  {
9026  if(lastOffset < freeSpace2ndTo1stEnd)
9027  {
9028  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9029  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9030  inoutStats.unusedSize += unusedRangeSize;
9031  ++inoutStats.unusedRangeCount;
9032  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9033  }
9034 
9035  // End of loop.
9036  lastOffset = freeSpace2ndTo1stEnd;
9037  }
9038  }
9039  }
9040 
9041  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9042  const VkDeviceSize freeSpace1stTo2ndEnd =
9043  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9044  while(lastOffset < freeSpace1stTo2ndEnd)
9045  {
9046  // Find next non-null allocation or move nextAllocIndex to the end.
9047  while(nextAlloc1stIndex < suballoc1stCount &&
9048  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9049  {
9050  ++nextAlloc1stIndex;
9051  }
9052 
9053  // Found non-null allocation.
9054  if(nextAlloc1stIndex < suballoc1stCount)
9055  {
9056  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9057 
9058  // 1. Process free space before this allocation.
9059  if(lastOffset < suballoc.offset)
9060  {
9061  // There is free space from lastOffset to suballoc.offset.
9062  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9063  inoutStats.unusedSize += unusedRangeSize;
9064  ++inoutStats.unusedRangeCount;
9065  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9066  }
9067 
9068  // 2. Process this allocation.
9069  // There is allocation with suballoc.offset, suballoc.size.
9070  ++inoutStats.allocationCount;
9071 
9072  // 3. Prepare for next iteration.
9073  lastOffset = suballoc.offset + suballoc.size;
9074  ++nextAlloc1stIndex;
9075  }
9076  // We are at the end.
9077  else
9078  {
9079  if(lastOffset < freeSpace1stTo2ndEnd)
9080  {
9081  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9082  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9083  inoutStats.unusedSize += unusedRangeSize;
9084  ++inoutStats.unusedRangeCount;
9085  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9086  }
9087 
9088  // End of loop.
9089  lastOffset = freeSpace1stTo2ndEnd;
9090  }
9091  }
9092 
9093  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9094  {
9095  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9096  while(lastOffset < size)
9097  {
9098  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9099  while(nextAlloc2ndIndex != SIZE_MAX &&
9100  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9101  {
9102  --nextAlloc2ndIndex;
9103  }
9104 
9105  // Found non-null allocation.
9106  if(nextAlloc2ndIndex != SIZE_MAX)
9107  {
9108  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9109 
9110  // 1. Process free space before this allocation.
9111  if(lastOffset < suballoc.offset)
9112  {
9113  // There is free space from lastOffset to suballoc.offset.
9114  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9115  inoutStats.unusedSize += unusedRangeSize;
9116  ++inoutStats.unusedRangeCount;
9117  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9118  }
9119 
9120  // 2. Process this allocation.
9121  // There is allocation with suballoc.offset, suballoc.size.
9122  ++inoutStats.allocationCount;
9123 
9124  // 3. Prepare for next iteration.
9125  lastOffset = suballoc.offset + suballoc.size;
9126  --nextAlloc2ndIndex;
9127  }
9128  // We are at the end.
9129  else
9130  {
9131  if(lastOffset < size)
9132  {
9133  // There is free space from lastOffset to size.
9134  const VkDeviceSize unusedRangeSize = size - lastOffset;
9135  inoutStats.unusedSize += unusedRangeSize;
9136  ++inoutStats.unusedRangeCount;
9137  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9138  }
9139 
9140  // End of loop.
9141  lastOffset = size;
9142  }
9143  }
9144  }
9145 }
9146 
9147 #if VMA_STATS_STRING_ENABLED
9148 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9149 {
9150  const VkDeviceSize size = GetSize();
9151  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9152  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9153  const size_t suballoc1stCount = suballocations1st.size();
9154  const size_t suballoc2ndCount = suballocations2nd.size();
9155 
9156  // FIRST PASS
9157 
9158  size_t unusedRangeCount = 0;
9159  VkDeviceSize usedBytes = 0;
9160 
9161  VkDeviceSize lastOffset = 0;
9162 
9163  size_t alloc2ndCount = 0;
9164  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9165  {
9166  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9167  size_t nextAlloc2ndIndex = 0;
9168  while(lastOffset < freeSpace2ndTo1stEnd)
9169  {
9170  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9171  while(nextAlloc2ndIndex < suballoc2ndCount &&
9172  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9173  {
9174  ++nextAlloc2ndIndex;
9175  }
9176 
9177  // Found non-null allocation.
9178  if(nextAlloc2ndIndex < suballoc2ndCount)
9179  {
9180  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9181 
9182  // 1. Process free space before this allocation.
9183  if(lastOffset < suballoc.offset)
9184  {
9185  // There is free space from lastOffset to suballoc.offset.
9186  ++unusedRangeCount;
9187  }
9188 
9189  // 2. Process this allocation.
9190  // There is allocation with suballoc.offset, suballoc.size.
9191  ++alloc2ndCount;
9192  usedBytes += suballoc.size;
9193 
9194  // 3. Prepare for next iteration.
9195  lastOffset = suballoc.offset + suballoc.size;
9196  ++nextAlloc2ndIndex;
9197  }
9198  // We are at the end.
9199  else
9200  {
9201  if(lastOffset < freeSpace2ndTo1stEnd)
9202  {
9203  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9204  ++unusedRangeCount;
9205  }
9206 
9207  // End of loop.
9208  lastOffset = freeSpace2ndTo1stEnd;
9209  }
9210  }
9211  }
9212 
9213  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9214  size_t alloc1stCount = 0;
9215  const VkDeviceSize freeSpace1stTo2ndEnd =
9216  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9217  while(lastOffset < freeSpace1stTo2ndEnd)
9218  {
9219  // Find next non-null allocation or move nextAllocIndex to the end.
9220  while(nextAlloc1stIndex < suballoc1stCount &&
9221  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9222  {
9223  ++nextAlloc1stIndex;
9224  }
9225 
9226  // Found non-null allocation.
9227  if(nextAlloc1stIndex < suballoc1stCount)
9228  {
9229  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9230 
9231  // 1. Process free space before this allocation.
9232  if(lastOffset < suballoc.offset)
9233  {
9234  // There is free space from lastOffset to suballoc.offset.
9235  ++unusedRangeCount;
9236  }
9237 
9238  // 2. Process this allocation.
9239  // There is allocation with suballoc.offset, suballoc.size.
9240  ++alloc1stCount;
9241  usedBytes += suballoc.size;
9242 
9243  // 3. Prepare for next iteration.
9244  lastOffset = suballoc.offset + suballoc.size;
9245  ++nextAlloc1stIndex;
9246  }
9247  // We are at the end.
9248  else
9249  {
9250  if(lastOffset < size)
9251  {
9252  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9253  ++unusedRangeCount;
9254  }
9255 
9256  // End of loop.
9257  lastOffset = freeSpace1stTo2ndEnd;
9258  }
9259  }
9260 
9261  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9262  {
9263  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9264  while(lastOffset < size)
9265  {
9266  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9267  while(nextAlloc2ndIndex != SIZE_MAX &&
9268  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9269  {
9270  --nextAlloc2ndIndex;
9271  }
9272 
9273  // Found non-null allocation.
9274  if(nextAlloc2ndIndex != SIZE_MAX)
9275  {
9276  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9277 
9278  // 1. Process free space before this allocation.
9279  if(lastOffset < suballoc.offset)
9280  {
9281  // There is free space from lastOffset to suballoc.offset.
9282  ++unusedRangeCount;
9283  }
9284 
9285  // 2. Process this allocation.
9286  // There is allocation with suballoc.offset, suballoc.size.
9287  ++alloc2ndCount;
9288  usedBytes += suballoc.size;
9289 
9290  // 3. Prepare for next iteration.
9291  lastOffset = suballoc.offset + suballoc.size;
9292  --nextAlloc2ndIndex;
9293  }
9294  // We are at the end.
9295  else
9296  {
9297  if(lastOffset < size)
9298  {
9299  // There is free space from lastOffset to size.
9300  ++unusedRangeCount;
9301  }
9302 
9303  // End of loop.
9304  lastOffset = size;
9305  }
9306  }
9307  }
9308 
9309  const VkDeviceSize unusedBytes = size - usedBytes;
9310  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9311 
9312  // SECOND PASS
9313  lastOffset = 0;
9314 
9315  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9316  {
9317  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9318  size_t nextAlloc2ndIndex = 0;
9319  while(lastOffset < freeSpace2ndTo1stEnd)
9320  {
9321  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9322  while(nextAlloc2ndIndex < suballoc2ndCount &&
9323  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9324  {
9325  ++nextAlloc2ndIndex;
9326  }
9327 
9328  // Found non-null allocation.
9329  if(nextAlloc2ndIndex < suballoc2ndCount)
9330  {
9331  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9332 
9333  // 1. Process free space before this allocation.
9334  if(lastOffset < suballoc.offset)
9335  {
9336  // There is free space from lastOffset to suballoc.offset.
9337  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9338  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9339  }
9340 
9341  // 2. Process this allocation.
9342  // There is allocation with suballoc.offset, suballoc.size.
9343  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9344 
9345  // 3. Prepare for next iteration.
9346  lastOffset = suballoc.offset + suballoc.size;
9347  ++nextAlloc2ndIndex;
9348  }
9349  // We are at the end.
9350  else
9351  {
9352  if(lastOffset < freeSpace2ndTo1stEnd)
9353  {
9354  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9355  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9356  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9357  }
9358 
9359  // End of loop.
9360  lastOffset = freeSpace2ndTo1stEnd;
9361  }
9362  }
9363  }
9364 
9365  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9366  while(lastOffset < freeSpace1stTo2ndEnd)
9367  {
9368  // Find next non-null allocation or move nextAllocIndex to the end.
9369  while(nextAlloc1stIndex < suballoc1stCount &&
9370  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9371  {
9372  ++nextAlloc1stIndex;
9373  }
9374 
9375  // Found non-null allocation.
9376  if(nextAlloc1stIndex < suballoc1stCount)
9377  {
9378  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9379 
9380  // 1. Process free space before this allocation.
9381  if(lastOffset < suballoc.offset)
9382  {
9383  // There is free space from lastOffset to suballoc.offset.
9384  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9385  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9386  }
9387 
9388  // 2. Process this allocation.
9389  // There is allocation with suballoc.offset, suballoc.size.
9390  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9391 
9392  // 3. Prepare for next iteration.
9393  lastOffset = suballoc.offset + suballoc.size;
9394  ++nextAlloc1stIndex;
9395  }
9396  // We are at the end.
9397  else
9398  {
9399  if(lastOffset < freeSpace1stTo2ndEnd)
9400  {
9401  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9402  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9403  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9404  }
9405 
9406  // End of loop.
9407  lastOffset = freeSpace1stTo2ndEnd;
9408  }
9409  }
9410 
9411  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9412  {
9413  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9414  while(lastOffset < size)
9415  {
9416  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9417  while(nextAlloc2ndIndex != SIZE_MAX &&
9418  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9419  {
9420  --nextAlloc2ndIndex;
9421  }
9422 
9423  // Found non-null allocation.
9424  if(nextAlloc2ndIndex != SIZE_MAX)
9425  {
9426  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9427 
9428  // 1. Process free space before this allocation.
9429  if(lastOffset < suballoc.offset)
9430  {
9431  // There is free space from lastOffset to suballoc.offset.
9432  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9433  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9434  }
9435 
9436  // 2. Process this allocation.
9437  // There is allocation with suballoc.offset, suballoc.size.
9438  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9439 
9440  // 3. Prepare for next iteration.
9441  lastOffset = suballoc.offset + suballoc.size;
9442  --nextAlloc2ndIndex;
9443  }
9444  // We are at the end.
9445  else
9446  {
9447  if(lastOffset < size)
9448  {
9449  // There is free space from lastOffset to size.
9450  const VkDeviceSize unusedRangeSize = size - lastOffset;
9451  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9452  }
9453 
9454  // End of loop.
9455  lastOffset = size;
9456  }
9457  }
9458  }
9459 
9460  PrintDetailedMap_End(json);
9461 }
9462 #endif // #if VMA_STATS_STRING_ENABLED
9463 
9464 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9465  uint32_t currentFrameIndex,
9466  uint32_t frameInUseCount,
9467  VkDeviceSize bufferImageGranularity,
9468  VkDeviceSize allocSize,
9469  VkDeviceSize allocAlignment,
9470  bool upperAddress,
9471  VmaSuballocationType allocType,
9472  bool canMakeOtherLost,
9473  uint32_t strategy,
9474  VmaAllocationRequest* pAllocationRequest)
9475 {
9476  VMA_ASSERT(allocSize > 0);
9477  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9478  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9479  VMA_HEAVY_ASSERT(Validate());
9480 
9481  const VkDeviceSize size = GetSize();
9482  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9483  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9484 
9485  if(upperAddress)
9486  {
9487  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9488  {
9489  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9490  return false;
9491  }
9492 
9493  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9494  if(allocSize > size)
9495  {
9496  return false;
9497  }
9498  VkDeviceSize resultBaseOffset = size - allocSize;
9499  if(!suballocations2nd.empty())
9500  {
9501  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9502  resultBaseOffset = lastSuballoc.offset - allocSize;
9503  if(allocSize > lastSuballoc.offset)
9504  {
9505  return false;
9506  }
9507  }
9508 
9509  // Start from offset equal to end of free space.
9510  VkDeviceSize resultOffset = resultBaseOffset;
9511 
9512  // Apply VMA_DEBUG_MARGIN at the end.
9513  if(VMA_DEBUG_MARGIN > 0)
9514  {
9515  if(resultOffset < VMA_DEBUG_MARGIN)
9516  {
9517  return false;
9518  }
9519  resultOffset -= VMA_DEBUG_MARGIN;
9520  }
9521 
9522  // Apply alignment.
9523  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9524 
9525  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9526  // Make bigger alignment if necessary.
9527  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9528  {
9529  bool bufferImageGranularityConflict = false;
9530  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9531  {
9532  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9533  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9534  {
9535  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9536  {
9537  bufferImageGranularityConflict = true;
9538  break;
9539  }
9540  }
9541  else
9542  // Already on previous page.
9543  break;
9544  }
9545  if(bufferImageGranularityConflict)
9546  {
9547  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9548  }
9549  }
9550 
9551  // There is enough free space.
9552  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9553  suballocations1st.back().offset + suballocations1st.back().size :
9554  0;
9555  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9556  {
9557  // Check previous suballocations for BufferImageGranularity conflicts.
9558  // If conflict exists, allocation cannot be made here.
9559  if(bufferImageGranularity > 1)
9560  {
9561  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9562  {
9563  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9564  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9565  {
9566  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9567  {
9568  return false;
9569  }
9570  }
9571  else
9572  {
9573  // Already on next page.
9574  break;
9575  }
9576  }
9577  }
9578 
9579  // All tests passed: Success.
9580  pAllocationRequest->offset = resultOffset;
9581  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9582  pAllocationRequest->sumItemSize = 0;
9583  // pAllocationRequest->item unused.
9584  pAllocationRequest->itemsToMakeLostCount = 0;
9585  return true;
9586  }
9587  }
9588  else // !upperAddress
9589  {
9590  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9591  {
9592  // Try to allocate at the end of 1st vector.
9593 
9594  VkDeviceSize resultBaseOffset = 0;
9595  if(!suballocations1st.empty())
9596  {
9597  const VmaSuballocation& lastSuballoc = suballocations1st.back();
9598  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9599  }
9600 
9601  // Start from offset equal to beginning of free space.
9602  VkDeviceSize resultOffset = resultBaseOffset;
9603 
9604  // Apply VMA_DEBUG_MARGIN at the beginning.
9605  if(VMA_DEBUG_MARGIN > 0)
9606  {
9607  resultOffset += VMA_DEBUG_MARGIN;
9608  }
9609 
9610  // Apply alignment.
9611  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9612 
9613  // Check previous suballocations for BufferImageGranularity conflicts.
9614  // Make bigger alignment if necessary.
9615  if(bufferImageGranularity > 1 && !suballocations1st.empty())
9616  {
9617  bool bufferImageGranularityConflict = false;
9618  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9619  {
9620  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9621  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9622  {
9623  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9624  {
9625  bufferImageGranularityConflict = true;
9626  break;
9627  }
9628  }
9629  else
9630  // Already on previous page.
9631  break;
9632  }
9633  if(bufferImageGranularityConflict)
9634  {
9635  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9636  }
9637  }
9638 
9639  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9640  suballocations2nd.back().offset : size;
9641 
9642  // There is enough free space at the end after alignment.
9643  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9644  {
9645  // Check next suballocations for BufferImageGranularity conflicts.
9646  // If conflict exists, allocation cannot be made here.
9647  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9648  {
9649  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9650  {
9651  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9652  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9653  {
9654  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9655  {
9656  return false;
9657  }
9658  }
9659  else
9660  {
9661  // Already on previous page.
9662  break;
9663  }
9664  }
9665  }
9666 
9667  // All tests passed: Success.
9668  pAllocationRequest->offset = resultOffset;
9669  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
9670  pAllocationRequest->sumItemSize = 0;
9671  // pAllocationRequest->item unused.
9672  pAllocationRequest->itemsToMakeLostCount = 0;
9673  return true;
9674  }
9675  }
9676 
9677  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9678  // beginning of 1st vector as the end of free space.
9679  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9680  {
9681  VMA_ASSERT(!suballocations1st.empty());
9682 
9683  VkDeviceSize resultBaseOffset = 0;
9684  if(!suballocations2nd.empty())
9685  {
9686  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9687  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9688  }
9689 
9690  // Start from offset equal to beginning of free space.
9691  VkDeviceSize resultOffset = resultBaseOffset;
9692 
9693  // Apply VMA_DEBUG_MARGIN at the beginning.
9694  if(VMA_DEBUG_MARGIN > 0)
9695  {
9696  resultOffset += VMA_DEBUG_MARGIN;
9697  }
9698 
9699  // Apply alignment.
9700  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9701 
9702  // Check previous suballocations for BufferImageGranularity conflicts.
9703  // Make bigger alignment if necessary.
9704  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9705  {
9706  bool bufferImageGranularityConflict = false;
9707  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
9708  {
9709  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
9710  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9711  {
9712  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9713  {
9714  bufferImageGranularityConflict = true;
9715  break;
9716  }
9717  }
9718  else
9719  // Already on previous page.
9720  break;
9721  }
9722  if(bufferImageGranularityConflict)
9723  {
9724  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9725  }
9726  }
9727 
9728  pAllocationRequest->itemsToMakeLostCount = 0;
9729  pAllocationRequest->sumItemSize = 0;
9730  size_t index1st = m_1stNullItemsBeginCount;
9731 
9732  if(canMakeOtherLost)
9733  {
9734  while(index1st < suballocations1st.size() &&
9735  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
9736  {
9737  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
9738  const VmaSuballocation& suballoc = suballocations1st[index1st];
9739  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
9740  {
9741  // No problem.
9742  }
9743  else
9744  {
9745  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9746  if(suballoc.hAllocation->CanBecomeLost() &&
9747  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9748  {
9749  ++pAllocationRequest->itemsToMakeLostCount;
9750  pAllocationRequest->sumItemSize += suballoc.size;
9751  }
9752  else
9753  {
9754  return false;
9755  }
9756  }
9757  ++index1st;
9758  }
9759 
9760  // Check next suballocations for BufferImageGranularity conflicts.
9761  // If conflict exists, we must mark more allocations lost or fail.
9762  if(bufferImageGranularity > 1)
9763  {
9764  while(index1st < suballocations1st.size())
9765  {
9766  const VmaSuballocation& suballoc = suballocations1st[index1st];
9767  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
9768  {
9769  if(suballoc.hAllocation != VK_NULL_HANDLE)
9770  {
9771  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
9772  if(suballoc.hAllocation->CanBecomeLost() &&
9773  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9774  {
9775  ++pAllocationRequest->itemsToMakeLostCount;
9776  pAllocationRequest->sumItemSize += suballoc.size;
9777  }
9778  else
9779  {
9780  return false;
9781  }
9782  }
9783  }
9784  else
9785  {
9786  // Already on next page.
9787  break;
9788  }
9789  ++index1st;
9790  }
9791  }
9792  }
9793 
9794  // There is enough free space at the end after alignment.
9795  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
9796  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
9797  {
9798  // Check next suballocations for BufferImageGranularity conflicts.
9799  // If conflict exists, allocation cannot be made here.
9800  if(bufferImageGranularity > 1)
9801  {
9802  for(size_t nextSuballocIndex = index1st;
9803  nextSuballocIndex < suballocations1st.size();
9804  nextSuballocIndex++)
9805  {
9806  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
9807  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9808  {
9809  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9810  {
9811  return false;
9812  }
9813  }
9814  else
9815  {
9816  // Already on next page.
9817  break;
9818  }
9819  }
9820  }
9821 
9822  // All tests passed: Success.
9823  pAllocationRequest->offset = resultOffset;
9824  pAllocationRequest->sumFreeSize =
9825  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
9826  - resultBaseOffset
9827  - pAllocationRequest->sumItemSize;
9828  // pAllocationRequest->item unused.
9829  return true;
9830  }
9831  }
9832  }
9833 
9834  return false;
9835 }
9836 
9837 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
9838  uint32_t currentFrameIndex,
9839  uint32_t frameInUseCount,
9840  VmaAllocationRequest* pAllocationRequest)
9841 {
9842  if(pAllocationRequest->itemsToMakeLostCount == 0)
9843  {
9844  return true;
9845  }
9846 
9847  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
9848 
9849  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9850  size_t index1st = m_1stNullItemsBeginCount;
9851  size_t madeLostCount = 0;
9852  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
9853  {
9854  VMA_ASSERT(index1st < suballocations1st.size());
9855  VmaSuballocation& suballoc = suballocations1st[index1st];
9856  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9857  {
9858  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9859  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
9860  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9861  {
9862  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9863  suballoc.hAllocation = VK_NULL_HANDLE;
9864  m_SumFreeSize += suballoc.size;
9865  ++m_1stNullItemsMiddleCount;
9866  ++madeLostCount;
9867  }
9868  else
9869  {
9870  return false;
9871  }
9872  }
9873  ++index1st;
9874  }
9875 
9876  CleanupAfterFree();
9877  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
9878 
9879  return true;
9880 }
9881 
9882 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9883 {
9884  uint32_t lostAllocationCount = 0;
9885 
9886  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9887  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9888  {
9889  VmaSuballocation& suballoc = suballocations1st[i];
9890  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9891  suballoc.hAllocation->CanBecomeLost() &&
9892  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9893  {
9894  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9895  suballoc.hAllocation = VK_NULL_HANDLE;
9896  ++m_1stNullItemsMiddleCount;
9897  m_SumFreeSize += suballoc.size;
9898  ++lostAllocationCount;
9899  }
9900  }
9901 
9902  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9903  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9904  {
9905  VmaSuballocation& suballoc = suballocations2nd[i];
9906  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9907  suballoc.hAllocation->CanBecomeLost() &&
9908  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9909  {
9910  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9911  suballoc.hAllocation = VK_NULL_HANDLE;
9912  ++m_2ndNullItemsCount;
9913  ++lostAllocationCount;
9914  }
9915  }
9916 
9917  if(lostAllocationCount)
9918  {
9919  CleanupAfterFree();
9920  }
9921 
9922  return lostAllocationCount;
9923 }
9924 
9925 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
9926 {
9927  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9928  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9929  {
9930  const VmaSuballocation& suballoc = suballocations1st[i];
9931  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9932  {
9933  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9934  {
9935  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9936  return VK_ERROR_VALIDATION_FAILED_EXT;
9937  }
9938  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9939  {
9940  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9941  return VK_ERROR_VALIDATION_FAILED_EXT;
9942  }
9943  }
9944  }
9945 
9946  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9947  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9948  {
9949  const VmaSuballocation& suballoc = suballocations2nd[i];
9950  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9951  {
9952  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9953  {
9954  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9955  return VK_ERROR_VALIDATION_FAILED_EXT;
9956  }
9957  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9958  {
9959  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9960  return VK_ERROR_VALIDATION_FAILED_EXT;
9961  }
9962  }
9963  }
9964 
9965  return VK_SUCCESS;
9966 }
9967 
9968 void VmaBlockMetadata_Linear::Alloc(
9969  const VmaAllocationRequest& request,
9970  VmaSuballocationType type,
9971  VkDeviceSize allocSize,
9972  bool upperAddress,
9973  VmaAllocation hAllocation)
9974 {
9975  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
9976 
9977  if(upperAddress)
9978  {
9979  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
9980  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
9981  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9982  suballocations2nd.push_back(newSuballoc);
9983  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
9984  }
9985  else
9986  {
9987  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9988 
9989  // First allocation.
9990  if(suballocations1st.empty())
9991  {
9992  suballocations1st.push_back(newSuballoc);
9993  }
9994  else
9995  {
9996  // New allocation at the end of 1st vector.
9997  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
9998  {
9999  // Check if it fits before the end of the block.
10000  VMA_ASSERT(request.offset + allocSize <= GetSize());
10001  suballocations1st.push_back(newSuballoc);
10002  }
10003  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10004  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
10005  {
10006  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10007 
10008  switch(m_2ndVectorMode)
10009  {
10010  case SECOND_VECTOR_EMPTY:
10011  // First allocation from second part ring buffer.
10012  VMA_ASSERT(suballocations2nd.empty());
10013  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10014  break;
10015  case SECOND_VECTOR_RING_BUFFER:
10016  // 2-part ring buffer is already started.
10017  VMA_ASSERT(!suballocations2nd.empty());
10018  break;
10019  case SECOND_VECTOR_DOUBLE_STACK:
10020  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10021  break;
10022  default:
10023  VMA_ASSERT(0);
10024  }
10025 
10026  suballocations2nd.push_back(newSuballoc);
10027  }
10028  else
10029  {
10030  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10031  }
10032  }
10033  }
10034 
10035  m_SumFreeSize -= newSuballoc.size;
10036 }
10037 
10038 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10039 {
10040  FreeAtOffset(allocation->GetOffset());
10041 }
10042 
10043 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10044 {
10045  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10046  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10047 
10048  if(!suballocations1st.empty())
10049  {
10050  // First allocation: Mark it as next empty at the beginning.
10051  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10052  if(firstSuballoc.offset == offset)
10053  {
10054  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10055  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10056  m_SumFreeSize += firstSuballoc.size;
10057  ++m_1stNullItemsBeginCount;
10058  CleanupAfterFree();
10059  return;
10060  }
10061  }
10062 
10063  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10064  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10065  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10066  {
10067  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10068  if(lastSuballoc.offset == offset)
10069  {
10070  m_SumFreeSize += lastSuballoc.size;
10071  suballocations2nd.pop_back();
10072  CleanupAfterFree();
10073  return;
10074  }
10075  }
10076  // Last allocation in 1st vector.
10077  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10078  {
10079  VmaSuballocation& lastSuballoc = suballocations1st.back();
10080  if(lastSuballoc.offset == offset)
10081  {
10082  m_SumFreeSize += lastSuballoc.size;
10083  suballocations1st.pop_back();
10084  CleanupAfterFree();
10085  return;
10086  }
10087  }
10088 
10089  // Item from the middle of 1st vector.
10090  {
10091  VmaSuballocation refSuballoc;
10092  refSuballoc.offset = offset;
10093  // Rest of members stays uninitialized intentionally for better performance.
10094  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
10095  suballocations1st.begin() + m_1stNullItemsBeginCount,
10096  suballocations1st.end(),
10097  refSuballoc);
10098  if(it != suballocations1st.end())
10099  {
10100  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10101  it->hAllocation = VK_NULL_HANDLE;
10102  ++m_1stNullItemsMiddleCount;
10103  m_SumFreeSize += it->size;
10104  CleanupAfterFree();
10105  return;
10106  }
10107  }
10108 
10109  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10110  {
10111  // Item from the middle of 2nd vector.
10112  VmaSuballocation refSuballoc;
10113  refSuballoc.offset = offset;
10114  // Rest of members stays uninitialized intentionally for better performance.
10115  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10116  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
10117  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
10118  if(it != suballocations2nd.end())
10119  {
10120  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10121  it->hAllocation = VK_NULL_HANDLE;
10122  ++m_2ndNullItemsCount;
10123  m_SumFreeSize += it->size;
10124  CleanupAfterFree();
10125  return;
10126  }
10127  }
10128 
10129  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10130 }
10131 
10132 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10133 {
10134  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10135  const size_t suballocCount = AccessSuballocations1st().size();
10136  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10137 }
10138 
10139 void VmaBlockMetadata_Linear::CleanupAfterFree()
10140 {
10141  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10142  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10143 
10144  if(IsEmpty())
10145  {
10146  suballocations1st.clear();
10147  suballocations2nd.clear();
10148  m_1stNullItemsBeginCount = 0;
10149  m_1stNullItemsMiddleCount = 0;
10150  m_2ndNullItemsCount = 0;
10151  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10152  }
10153  else
10154  {
10155  const size_t suballoc1stCount = suballocations1st.size();
10156  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10157  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10158 
10159  // Find more null items at the beginning of 1st vector.
10160  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10161  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10162  {
10163  ++m_1stNullItemsBeginCount;
10164  --m_1stNullItemsMiddleCount;
10165  }
10166 
10167  // Find more null items at the end of 1st vector.
10168  while(m_1stNullItemsMiddleCount > 0 &&
10169  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10170  {
10171  --m_1stNullItemsMiddleCount;
10172  suballocations1st.pop_back();
10173  }
10174 
10175  // Find more null items at the end of 2nd vector.
10176  while(m_2ndNullItemsCount > 0 &&
10177  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10178  {
10179  --m_2ndNullItemsCount;
10180  suballocations2nd.pop_back();
10181  }
10182 
10183  if(ShouldCompact1st())
10184  {
10185  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10186  size_t srcIndex = m_1stNullItemsBeginCount;
10187  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10188  {
10189  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10190  {
10191  ++srcIndex;
10192  }
10193  if(dstIndex != srcIndex)
10194  {
10195  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10196  }
10197  ++srcIndex;
10198  }
10199  suballocations1st.resize(nonNullItemCount);
10200  m_1stNullItemsBeginCount = 0;
10201  m_1stNullItemsMiddleCount = 0;
10202  }
10203 
10204  // 2nd vector became empty.
10205  if(suballocations2nd.empty())
10206  {
10207  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10208  }
10209 
10210  // 1st vector became empty.
10211  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10212  {
10213  suballocations1st.clear();
10214  m_1stNullItemsBeginCount = 0;
10215 
10216  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10217  {
10218  // Swap 1st with 2nd. Now 2nd is empty.
10219  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10220  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10221  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10222  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10223  {
10224  ++m_1stNullItemsBeginCount;
10225  --m_1stNullItemsMiddleCount;
10226  }
10227  m_2ndNullItemsCount = 0;
10228  m_1stVectorIndex ^= 1;
10229  }
10230  }
10231  }
10232 
10233  VMA_HEAVY_ASSERT(Validate());
10234 }
10235 
10236 
10238 // class VmaBlockMetadata_Buddy
10239 
10240 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10241  VmaBlockMetadata(hAllocator),
10242  m_Root(VMA_NULL),
10243  m_AllocationCount(0),
10244  m_FreeCount(1),
10245  m_SumFreeSize(0)
10246 {
10247  memset(m_FreeList, 0, sizeof(m_FreeList));
10248 }
10249 
10250 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10251 {
10252  DeleteNode(m_Root);
10253 }
10254 
10255 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10256 {
10257  VmaBlockMetadata::Init(size);
10258 
10259  m_UsableSize = VmaPrevPow2(size);
10260  m_SumFreeSize = m_UsableSize;
10261 
10262  // Calculate m_LevelCount.
10263  m_LevelCount = 1;
10264  while(m_LevelCount < MAX_LEVELS &&
10265  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10266  {
10267  ++m_LevelCount;
10268  }
10269 
10270  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10271  rootNode->offset = 0;
10272  rootNode->type = Node::TYPE_FREE;
10273  rootNode->parent = VMA_NULL;
10274  rootNode->buddy = VMA_NULL;
10275 
10276  m_Root = rootNode;
10277  AddToFreeListFront(0, rootNode);
10278 }
10279 
10280 bool VmaBlockMetadata_Buddy::Validate() const
10281 {
10282  // Validate tree.
10283  ValidationContext ctx;
10284  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10285  {
10286  VMA_VALIDATE(false && "ValidateNode failed.");
10287  }
10288  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10289  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10290 
10291  // Validate free node lists.
10292  for(uint32_t level = 0; level < m_LevelCount; ++level)
10293  {
10294  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10295  m_FreeList[level].front->free.prev == VMA_NULL);
10296 
10297  for(Node* node = m_FreeList[level].front;
10298  node != VMA_NULL;
10299  node = node->free.next)
10300  {
10301  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10302 
10303  if(node->free.next == VMA_NULL)
10304  {
10305  VMA_VALIDATE(m_FreeList[level].back == node);
10306  }
10307  else
10308  {
10309  VMA_VALIDATE(node->free.next->free.prev == node);
10310  }
10311  }
10312  }
10313 
10314  // Validate that free lists ar higher levels are empty.
10315  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10316  {
10317  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10318  }
10319 
10320  return true;
10321 }
10322 
10323 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10324 {
10325  for(uint32_t level = 0; level < m_LevelCount; ++level)
10326  {
10327  if(m_FreeList[level].front != VMA_NULL)
10328  {
10329  return LevelToNodeSize(level);
10330  }
10331  }
10332  return 0;
10333 }
10334 
10335 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10336 {
10337  const VkDeviceSize unusableSize = GetUnusableSize();
10338 
10339  outInfo.blockCount = 1;
10340 
10341  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10342  outInfo.usedBytes = outInfo.unusedBytes = 0;
10343 
10344  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10345  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10346  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10347 
10348  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10349 
10350  if(unusableSize > 0)
10351  {
10352  ++outInfo.unusedRangeCount;
10353  outInfo.unusedBytes += unusableSize;
10354  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10355  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10356  }
10357 }
10358 
10359 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10360 {
10361  const VkDeviceSize unusableSize = GetUnusableSize();
10362 
10363  inoutStats.size += GetSize();
10364  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10365  inoutStats.allocationCount += m_AllocationCount;
10366  inoutStats.unusedRangeCount += m_FreeCount;
10367  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10368 
10369  if(unusableSize > 0)
10370  {
10371  ++inoutStats.unusedRangeCount;
10372  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10373  }
10374 }
10375 
10376 #if VMA_STATS_STRING_ENABLED
10377 
10378 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10379 {
10380  // TODO optimize
10381  VmaStatInfo stat;
10382  CalcAllocationStatInfo(stat);
10383 
10384  PrintDetailedMap_Begin(
10385  json,
10386  stat.unusedBytes,
10387  stat.allocationCount,
10388  stat.unusedRangeCount);
10389 
10390  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10391 
10392  const VkDeviceSize unusableSize = GetUnusableSize();
10393  if(unusableSize > 0)
10394  {
10395  PrintDetailedMap_UnusedRange(json,
10396  m_UsableSize, // offset
10397  unusableSize); // size
10398  }
10399 
10400  PrintDetailedMap_End(json);
10401 }
10402 
10403 #endif // #if VMA_STATS_STRING_ENABLED
10404 
10405 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10406  uint32_t currentFrameIndex,
10407  uint32_t frameInUseCount,
10408  VkDeviceSize bufferImageGranularity,
10409  VkDeviceSize allocSize,
10410  VkDeviceSize allocAlignment,
10411  bool upperAddress,
10412  VmaSuballocationType allocType,
10413  bool canMakeOtherLost,
10414  uint32_t strategy,
10415  VmaAllocationRequest* pAllocationRequest)
10416 {
10417  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10418 
10419  // Simple way to respect bufferImageGranularity. May be optimized some day.
10420  // Whenever it might be an OPTIMAL image...
10421  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10422  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10423  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10424  {
10425  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10426  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10427  }
10428 
10429  if(allocSize > m_UsableSize)
10430  {
10431  return false;
10432  }
10433 
10434  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10435  for(uint32_t level = targetLevel + 1; level--; )
10436  {
10437  for(Node* freeNode = m_FreeList[level].front;
10438  freeNode != VMA_NULL;
10439  freeNode = freeNode->free.next)
10440  {
10441  if(freeNode->offset % allocAlignment == 0)
10442  {
10443  pAllocationRequest->offset = freeNode->offset;
10444  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10445  pAllocationRequest->sumItemSize = 0;
10446  pAllocationRequest->itemsToMakeLostCount = 0;
10447  pAllocationRequest->customData = (void*)(uintptr_t)level;
10448  return true;
10449  }
10450  }
10451  }
10452 
10453  return false;
10454 }
10455 
10456 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10457  uint32_t currentFrameIndex,
10458  uint32_t frameInUseCount,
10459  VmaAllocationRequest* pAllocationRequest)
10460 {
10461  /*
10462  Lost allocations are not supported in buddy allocator at the moment.
10463  Support might be added in the future.
10464  */
10465  return pAllocationRequest->itemsToMakeLostCount == 0;
10466 }
10467 
10468 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10469 {
10470  /*
10471  Lost allocations are not supported in buddy allocator at the moment.
10472  Support might be added in the future.
10473  */
10474  return 0;
10475 }
10476 
10477 void VmaBlockMetadata_Buddy::Alloc(
10478  const VmaAllocationRequest& request,
10479  VmaSuballocationType type,
10480  VkDeviceSize allocSize,
10481  bool upperAddress,
10482  VmaAllocation hAllocation)
10483 {
10484  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10485  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10486 
10487  Node* currNode = m_FreeList[currLevel].front;
10488  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10489  while(currNode->offset != request.offset)
10490  {
10491  currNode = currNode->free.next;
10492  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10493  }
10494 
10495  // Go down, splitting free nodes.
10496  while(currLevel < targetLevel)
10497  {
10498  // currNode is already first free node at currLevel.
10499  // Remove it from list of free nodes at this currLevel.
10500  RemoveFromFreeList(currLevel, currNode);
10501 
10502  const uint32_t childrenLevel = currLevel + 1;
10503 
10504  // Create two free sub-nodes.
10505  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10506  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10507 
10508  leftChild->offset = currNode->offset;
10509  leftChild->type = Node::TYPE_FREE;
10510  leftChild->parent = currNode;
10511  leftChild->buddy = rightChild;
10512 
10513  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10514  rightChild->type = Node::TYPE_FREE;
10515  rightChild->parent = currNode;
10516  rightChild->buddy = leftChild;
10517 
10518  // Convert current currNode to split type.
10519  currNode->type = Node::TYPE_SPLIT;
10520  currNode->split.leftChild = leftChild;
10521 
10522  // Add child nodes to free list. Order is important!
10523  AddToFreeListFront(childrenLevel, rightChild);
10524  AddToFreeListFront(childrenLevel, leftChild);
10525 
10526  ++m_FreeCount;
10527  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
10528  ++currLevel;
10529  currNode = m_FreeList[currLevel].front;
10530 
10531  /*
10532  We can be sure that currNode, as left child of node previously split,
10533  also fullfills the alignment requirement.
10534  */
10535  }
10536 
10537  // Remove from free list.
10538  VMA_ASSERT(currLevel == targetLevel &&
10539  currNode != VMA_NULL &&
10540  currNode->type == Node::TYPE_FREE);
10541  RemoveFromFreeList(currLevel, currNode);
10542 
10543  // Convert to allocation node.
10544  currNode->type = Node::TYPE_ALLOCATION;
10545  currNode->allocation.alloc = hAllocation;
10546 
10547  ++m_AllocationCount;
10548  --m_FreeCount;
10549  m_SumFreeSize -= allocSize;
10550 }
10551 
10552 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10553 {
10554  if(node->type == Node::TYPE_SPLIT)
10555  {
10556  DeleteNode(node->split.leftChild->buddy);
10557  DeleteNode(node->split.leftChild);
10558  }
10559 
10560  vma_delete(GetAllocationCallbacks(), node);
10561 }
10562 
10563 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
10564 {
10565  VMA_VALIDATE(level < m_LevelCount);
10566  VMA_VALIDATE(curr->parent == parent);
10567  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10568  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10569  switch(curr->type)
10570  {
10571  case Node::TYPE_FREE:
10572  // curr->free.prev, next are validated separately.
10573  ctx.calculatedSumFreeSize += levelNodeSize;
10574  ++ctx.calculatedFreeCount;
10575  break;
10576  case Node::TYPE_ALLOCATION:
10577  ++ctx.calculatedAllocationCount;
10578  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10579  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10580  break;
10581  case Node::TYPE_SPLIT:
10582  {
10583  const uint32_t childrenLevel = level + 1;
10584  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10585  const Node* const leftChild = curr->split.leftChild;
10586  VMA_VALIDATE(leftChild != VMA_NULL);
10587  VMA_VALIDATE(leftChild->offset == curr->offset);
10588  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10589  {
10590  VMA_VALIDATE(false && "ValidateNode for left child failed.");
10591  }
10592  const Node* const rightChild = leftChild->buddy;
10593  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10594  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10595  {
10596  VMA_VALIDATE(false && "ValidateNode for right child failed.");
10597  }
10598  }
10599  break;
10600  default:
10601  return false;
10602  }
10603 
10604  return true;
10605 }
10606 
10607 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
10608 {
10609  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
10610  uint32_t level = 0;
10611  VkDeviceSize currLevelNodeSize = m_UsableSize;
10612  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10613  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10614  {
10615  ++level;
10616  currLevelNodeSize = nextLevelNodeSize;
10617  nextLevelNodeSize = currLevelNodeSize >> 1;
10618  }
10619  return level;
10620 }
10621 
10622 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
10623 {
10624  // Find node and level.
10625  Node* node = m_Root;
10626  VkDeviceSize nodeOffset = 0;
10627  uint32_t level = 0;
10628  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
10629  while(node->type == Node::TYPE_SPLIT)
10630  {
10631  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
10632  if(offset < nodeOffset + nextLevelSize)
10633  {
10634  node = node->split.leftChild;
10635  }
10636  else
10637  {
10638  node = node->split.leftChild->buddy;
10639  nodeOffset += nextLevelSize;
10640  }
10641  ++level;
10642  levelNodeSize = nextLevelSize;
10643  }
10644 
10645  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
10646  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
10647 
10648  ++m_FreeCount;
10649  --m_AllocationCount;
10650  m_SumFreeSize += alloc->GetSize();
10651 
10652  node->type = Node::TYPE_FREE;
10653 
10654  // Join free nodes if possible.
10655  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
10656  {
10657  RemoveFromFreeList(level, node->buddy);
10658  Node* const parent = node->parent;
10659 
10660  vma_delete(GetAllocationCallbacks(), node->buddy);
10661  vma_delete(GetAllocationCallbacks(), node);
10662  parent->type = Node::TYPE_FREE;
10663 
10664  node = parent;
10665  --level;
10666  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
10667  --m_FreeCount;
10668  }
10669 
10670  AddToFreeListFront(level, node);
10671 }
10672 
10673 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
10674 {
10675  switch(node->type)
10676  {
10677  case Node::TYPE_FREE:
10678  ++outInfo.unusedRangeCount;
10679  outInfo.unusedBytes += levelNodeSize;
10680  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
10681  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
10682  break;
10683  case Node::TYPE_ALLOCATION:
10684  {
10685  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10686  ++outInfo.allocationCount;
10687  outInfo.usedBytes += allocSize;
10688  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
10689  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
10690 
10691  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
10692  if(unusedRangeSize > 0)
10693  {
10694  ++outInfo.unusedRangeCount;
10695  outInfo.unusedBytes += unusedRangeSize;
10696  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
10697  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
10698  }
10699  }
10700  break;
10701  case Node::TYPE_SPLIT:
10702  {
10703  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10704  const Node* const leftChild = node->split.leftChild;
10705  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
10706  const Node* const rightChild = leftChild->buddy;
10707  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
10708  }
10709  break;
10710  default:
10711  VMA_ASSERT(0);
10712  }
10713 }
10714 
10715 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
10716 {
10717  VMA_ASSERT(node->type == Node::TYPE_FREE);
10718 
10719  // List is empty.
10720  Node* const frontNode = m_FreeList[level].front;
10721  if(frontNode == VMA_NULL)
10722  {
10723  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
10724  node->free.prev = node->free.next = VMA_NULL;
10725  m_FreeList[level].front = m_FreeList[level].back = node;
10726  }
10727  else
10728  {
10729  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
10730  node->free.prev = VMA_NULL;
10731  node->free.next = frontNode;
10732  frontNode->free.prev = node;
10733  m_FreeList[level].front = node;
10734  }
10735 }
10736 
10737 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
10738 {
10739  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
10740 
10741  // It is at the front.
10742  if(node->free.prev == VMA_NULL)
10743  {
10744  VMA_ASSERT(m_FreeList[level].front == node);
10745  m_FreeList[level].front = node->free.next;
10746  }
10747  else
10748  {
10749  Node* const prevFreeNode = node->free.prev;
10750  VMA_ASSERT(prevFreeNode->free.next == node);
10751  prevFreeNode->free.next = node->free.next;
10752  }
10753 
10754  // It is at the back.
10755  if(node->free.next == VMA_NULL)
10756  {
10757  VMA_ASSERT(m_FreeList[level].back == node);
10758  m_FreeList[level].back = node->free.prev;
10759  }
10760  else
10761  {
10762  Node* const nextFreeNode = node->free.next;
10763  VMA_ASSERT(nextFreeNode->free.prev == node);
10764  nextFreeNode->free.prev = node->free.prev;
10765  }
10766 }
10767 
10768 #if VMA_STATS_STRING_ENABLED
10769 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
10770 {
10771  switch(node->type)
10772  {
10773  case Node::TYPE_FREE:
10774  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
10775  break;
10776  case Node::TYPE_ALLOCATION:
10777  {
10778  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
10779  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10780  if(allocSize < levelNodeSize)
10781  {
10782  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
10783  }
10784  }
10785  break;
10786  case Node::TYPE_SPLIT:
10787  {
10788  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10789  const Node* const leftChild = node->split.leftChild;
10790  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
10791  const Node* const rightChild = leftChild->buddy;
10792  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
10793  }
10794  break;
10795  default:
10796  VMA_ASSERT(0);
10797  }
10798 }
10799 #endif // #if VMA_STATS_STRING_ENABLED
10800 
10801 
10803 // class VmaDeviceMemoryBlock
10804 
10805 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
10806  m_pMetadata(VMA_NULL),
10807  m_MemoryTypeIndex(UINT32_MAX),
10808  m_Id(0),
10809  m_hMemory(VK_NULL_HANDLE),
10810  m_MapCount(0),
10811  m_pMappedData(VMA_NULL)
10812 {
10813 }
10814 
10815 void VmaDeviceMemoryBlock::Init(
10816  VmaAllocator hAllocator,
10817  uint32_t newMemoryTypeIndex,
10818  VkDeviceMemory newMemory,
10819  VkDeviceSize newSize,
10820  uint32_t id,
10821  uint32_t algorithm)
10822 {
10823  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
10824 
10825  m_MemoryTypeIndex = newMemoryTypeIndex;
10826  m_Id = id;
10827  m_hMemory = newMemory;
10828 
10829  switch(algorithm)
10830  {
10832  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
10833  break;
10835  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
10836  break;
10837  default:
10838  VMA_ASSERT(0);
10839  // Fall-through.
10840  case 0:
10841  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
10842  }
10843  m_pMetadata->Init(newSize);
10844 }
10845 
10846 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
10847 {
10848  // This is the most important assert in the entire library.
10849  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
10850  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
10851 
10852  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
10853  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
10854  m_hMemory = VK_NULL_HANDLE;
10855 
10856  vma_delete(allocator, m_pMetadata);
10857  m_pMetadata = VMA_NULL;
10858 }
10859 
10860 bool VmaDeviceMemoryBlock::Validate() const
10861 {
10862  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
10863  (m_pMetadata->GetSize() != 0));
10864 
10865  return m_pMetadata->Validate();
10866 }
10867 
10868 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
10869 {
10870  void* pData = nullptr;
10871  VkResult res = Map(hAllocator, 1, &pData);
10872  if(res != VK_SUCCESS)
10873  {
10874  return res;
10875  }
10876 
10877  res = m_pMetadata->CheckCorruption(pData);
10878 
10879  Unmap(hAllocator, 1);
10880 
10881  return res;
10882 }
10883 
10884 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
10885 {
10886  if(count == 0)
10887  {
10888  return VK_SUCCESS;
10889  }
10890 
10891  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10892  if(m_MapCount != 0)
10893  {
10894  m_MapCount += count;
10895  VMA_ASSERT(m_pMappedData != VMA_NULL);
10896  if(ppData != VMA_NULL)
10897  {
10898  *ppData = m_pMappedData;
10899  }
10900  return VK_SUCCESS;
10901  }
10902  else
10903  {
10904  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
10905  hAllocator->m_hDevice,
10906  m_hMemory,
10907  0, // offset
10908  VK_WHOLE_SIZE,
10909  0, // flags
10910  &m_pMappedData);
10911  if(result == VK_SUCCESS)
10912  {
10913  if(ppData != VMA_NULL)
10914  {
10915  *ppData = m_pMappedData;
10916  }
10917  m_MapCount = count;
10918  }
10919  return result;
10920  }
10921 }
10922 
10923 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
10924 {
10925  if(count == 0)
10926  {
10927  return;
10928  }
10929 
10930  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10931  if(m_MapCount >= count)
10932  {
10933  m_MapCount -= count;
10934  if(m_MapCount == 0)
10935  {
10936  m_pMappedData = VMA_NULL;
10937  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
10938  }
10939  }
10940  else
10941  {
10942  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
10943  }
10944 }
10945 
10946 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10947 {
10948  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10949  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10950 
10951  void* pData;
10952  VkResult res = Map(hAllocator, 1, &pData);
10953  if(res != VK_SUCCESS)
10954  {
10955  return res;
10956  }
10957 
10958  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
10959  VmaWriteMagicValue(pData, allocOffset + allocSize);
10960 
10961  Unmap(hAllocator, 1);
10962 
10963  return VK_SUCCESS;
10964 }
10965 
10966 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10967 {
10968  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10969  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10970 
10971  void* pData;
10972  VkResult res = Map(hAllocator, 1, &pData);
10973  if(res != VK_SUCCESS)
10974  {
10975  return res;
10976  }
10977 
10978  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
10979  {
10980  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
10981  }
10982  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
10983  {
10984  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
10985  }
10986 
10987  Unmap(hAllocator, 1);
10988 
10989  return VK_SUCCESS;
10990 }
10991 
10992 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
10993  const VmaAllocator hAllocator,
10994  const VmaAllocation hAllocation,
10995  VkBuffer hBuffer)
10996 {
10997  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10998  hAllocation->GetBlock() == this);
10999  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11000  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11001  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
11002  hAllocator->m_hDevice,
11003  hBuffer,
11004  m_hMemory,
11005  hAllocation->GetOffset());
11006 }
11007 
11008 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11009  const VmaAllocator hAllocator,
11010  const VmaAllocation hAllocation,
11011  VkImage hImage)
11012 {
11013  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11014  hAllocation->GetBlock() == this);
11015  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11016  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11017  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
11018  hAllocator->m_hDevice,
11019  hImage,
11020  m_hMemory,
11021  hAllocation->GetOffset());
11022 }
11023 
11024 static void InitStatInfo(VmaStatInfo& outInfo)
11025 {
11026  memset(&outInfo, 0, sizeof(outInfo));
11027  outInfo.allocationSizeMin = UINT64_MAX;
11028  outInfo.unusedRangeSizeMin = UINT64_MAX;
11029 }
11030 
11031 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11032 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11033 {
11034  inoutInfo.blockCount += srcInfo.blockCount;
11035  inoutInfo.allocationCount += srcInfo.allocationCount;
11036  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11037  inoutInfo.usedBytes += srcInfo.usedBytes;
11038  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11039  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11040  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11041  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11042  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11043 }
11044 
11045 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11046 {
11047  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11048  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11049  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11050  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11051 }
11052 
11053 VmaPool_T::VmaPool_T(
11054  VmaAllocator hAllocator,
11055  const VmaPoolCreateInfo& createInfo,
11056  VkDeviceSize preferredBlockSize) :
11057  m_BlockVector(
11058  hAllocator,
11059  createInfo.memoryTypeIndex,
11060  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11061  createInfo.minBlockCount,
11062  createInfo.maxBlockCount,
11063  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11064  createInfo.frameInUseCount,
11065  true, // isCustomPool
11066  createInfo.blockSize != 0, // explicitBlockSize
11067  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11068  m_Id(0)
11069 {
11070 }
11071 
11072 VmaPool_T::~VmaPool_T()
11073 {
11074 }
11075 
11076 #if VMA_STATS_STRING_ENABLED
11077 
11078 #endif // #if VMA_STATS_STRING_ENABLED
11079 
11080 VmaBlockVector::VmaBlockVector(
11081  VmaAllocator hAllocator,
11082  uint32_t memoryTypeIndex,
11083  VkDeviceSize preferredBlockSize,
11084  size_t minBlockCount,
11085  size_t maxBlockCount,
11086  VkDeviceSize bufferImageGranularity,
11087  uint32_t frameInUseCount,
11088  bool isCustomPool,
11089  bool explicitBlockSize,
11090  uint32_t algorithm) :
11091  m_hAllocator(hAllocator),
11092  m_MemoryTypeIndex(memoryTypeIndex),
11093  m_PreferredBlockSize(preferredBlockSize),
11094  m_MinBlockCount(minBlockCount),
11095  m_MaxBlockCount(maxBlockCount),
11096  m_BufferImageGranularity(bufferImageGranularity),
11097  m_FrameInUseCount(frameInUseCount),
11098  m_IsCustomPool(isCustomPool),
11099  m_ExplicitBlockSize(explicitBlockSize),
11100  m_Algorithm(algorithm),
11101  m_HasEmptyBlock(false),
11102  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11103  m_NextBlockId(0)
11104 {
11105 }
11106 
11107 VmaBlockVector::~VmaBlockVector()
11108 {
11109  for(size_t i = m_Blocks.size(); i--; )
11110  {
11111  m_Blocks[i]->Destroy(m_hAllocator);
11112  vma_delete(m_hAllocator, m_Blocks[i]);
11113  }
11114 }
11115 
11116 VkResult VmaBlockVector::CreateMinBlocks()
11117 {
11118  for(size_t i = 0; i < m_MinBlockCount; ++i)
11119  {
11120  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11121  if(res != VK_SUCCESS)
11122  {
11123  return res;
11124  }
11125  }
11126  return VK_SUCCESS;
11127 }
11128 
11129 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11130 {
11131  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11132 
11133  const size_t blockCount = m_Blocks.size();
11134 
11135  pStats->size = 0;
11136  pStats->unusedSize = 0;
11137  pStats->allocationCount = 0;
11138  pStats->unusedRangeCount = 0;
11139  pStats->unusedRangeSizeMax = 0;
11140  pStats->blockCount = blockCount;
11141 
11142  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11143  {
11144  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11145  VMA_ASSERT(pBlock);
11146  VMA_HEAVY_ASSERT(pBlock->Validate());
11147  pBlock->m_pMetadata->AddPoolStats(*pStats);
11148  }
11149 }
11150 
11151 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11152 {
11153  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11154  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11155  (VMA_DEBUG_MARGIN > 0) &&
11156  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11157 }
11158 
11159 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11160 
11161 VkResult VmaBlockVector::Allocate(
11162  VmaPool hCurrentPool,
11163  uint32_t currentFrameIndex,
11164  VkDeviceSize size,
11165  VkDeviceSize alignment,
11166  const VmaAllocationCreateInfo& createInfo,
11167  VmaSuballocationType suballocType,
11168  VmaAllocation* pAllocation)
11169 {
11170  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11171  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11172  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11173  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11174  const bool canCreateNewBlock =
11175  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11176  (m_Blocks.size() < m_MaxBlockCount);
11177  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11178 
11179  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11180  // Which in turn is available only when maxBlockCount = 1.
11181  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11182  {
11183  canMakeOtherLost = false;
11184  }
11185 
11186  // Upper address can only be used with linear allocator and within single memory block.
11187  if(isUpperAddress &&
11188  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11189  {
11190  return VK_ERROR_FEATURE_NOT_PRESENT;
11191  }
11192 
11193  // Validate strategy.
11194  switch(strategy)
11195  {
11196  case 0:
11198  break;
11202  break;
11203  default:
11204  return VK_ERROR_FEATURE_NOT_PRESENT;
11205  }
11206 
11207  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11208  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11209  {
11210  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11211  }
11212 
11213  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11214 
11215  /*
11216  Under certain condition, this whole section can be skipped for optimization, so
11217  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11218  e.g. for custom pools with linear algorithm.
11219  */
11220  if(!canMakeOtherLost || canCreateNewBlock)
11221  {
11222  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11223  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11225 
11226  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11227  {
11228  // Use only last block.
11229  if(!m_Blocks.empty())
11230  {
11231  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11232  VMA_ASSERT(pCurrBlock);
11233  VkResult res = AllocateFromBlock(
11234  pCurrBlock,
11235  hCurrentPool,
11236  currentFrameIndex,
11237  size,
11238  alignment,
11239  allocFlagsCopy,
11240  createInfo.pUserData,
11241  suballocType,
11242  strategy,
11243  pAllocation);
11244  if(res == VK_SUCCESS)
11245  {
11246  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11247  return VK_SUCCESS;
11248  }
11249  }
11250  }
11251  else
11252  {
11254  {
11255  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11256  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11257  {
11258  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11259  VMA_ASSERT(pCurrBlock);
11260  VkResult res = AllocateFromBlock(
11261  pCurrBlock,
11262  hCurrentPool,
11263  currentFrameIndex,
11264  size,
11265  alignment,
11266  allocFlagsCopy,
11267  createInfo.pUserData,
11268  suballocType,
11269  strategy,
11270  pAllocation);
11271  if(res == VK_SUCCESS)
11272  {
11273  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11274  return VK_SUCCESS;
11275  }
11276  }
11277  }
11278  else // WORST_FIT, FIRST_FIT
11279  {
11280  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11281  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11282  {
11283  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11284  VMA_ASSERT(pCurrBlock);
11285  VkResult res = AllocateFromBlock(
11286  pCurrBlock,
11287  hCurrentPool,
11288  currentFrameIndex,
11289  size,
11290  alignment,
11291  allocFlagsCopy,
11292  createInfo.pUserData,
11293  suballocType,
11294  strategy,
11295  pAllocation);
11296  if(res == VK_SUCCESS)
11297  {
11298  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11299  return VK_SUCCESS;
11300  }
11301  }
11302  }
11303  }
11304 
11305  // 2. Try to create new block.
11306  if(canCreateNewBlock)
11307  {
11308  // Calculate optimal size for new block.
11309  VkDeviceSize newBlockSize = m_PreferredBlockSize;
11310  uint32_t newBlockSizeShift = 0;
11311  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11312 
11313  if(!m_ExplicitBlockSize)
11314  {
11315  // Allocate 1/8, 1/4, 1/2 as first blocks.
11316  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11317  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11318  {
11319  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11320  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11321  {
11322  newBlockSize = smallerNewBlockSize;
11323  ++newBlockSizeShift;
11324  }
11325  else
11326  {
11327  break;
11328  }
11329  }
11330  }
11331 
11332  size_t newBlockIndex = 0;
11333  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11334  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11335  if(!m_ExplicitBlockSize)
11336  {
11337  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11338  {
11339  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11340  if(smallerNewBlockSize >= size)
11341  {
11342  newBlockSize = smallerNewBlockSize;
11343  ++newBlockSizeShift;
11344  res = CreateBlock(newBlockSize, &newBlockIndex);
11345  }
11346  else
11347  {
11348  break;
11349  }
11350  }
11351  }
11352 
11353  if(res == VK_SUCCESS)
11354  {
11355  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11356  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11357 
11358  res = AllocateFromBlock(
11359  pBlock,
11360  hCurrentPool,
11361  currentFrameIndex,
11362  size,
11363  alignment,
11364  allocFlagsCopy,
11365  createInfo.pUserData,
11366  suballocType,
11367  strategy,
11368  pAllocation);
11369  if(res == VK_SUCCESS)
11370  {
11371  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
11372  return VK_SUCCESS;
11373  }
11374  else
11375  {
11376  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11377  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11378  }
11379  }
11380  }
11381  }
11382 
11383  // 3. Try to allocate from existing blocks with making other allocations lost.
11384  if(canMakeOtherLost)
11385  {
11386  uint32_t tryIndex = 0;
11387  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11388  {
11389  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11390  VmaAllocationRequest bestRequest = {};
11391  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11392 
11393  // 1. Search existing allocations.
11395  {
11396  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11397  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11398  {
11399  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11400  VMA_ASSERT(pCurrBlock);
11401  VmaAllocationRequest currRequest = {};
11402  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11403  currentFrameIndex,
11404  m_FrameInUseCount,
11405  m_BufferImageGranularity,
11406  size,
11407  alignment,
11408  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11409  suballocType,
11410  canMakeOtherLost,
11411  strategy,
11412  &currRequest))
11413  {
11414  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11415  if(pBestRequestBlock == VMA_NULL ||
11416  currRequestCost < bestRequestCost)
11417  {
11418  pBestRequestBlock = pCurrBlock;
11419  bestRequest = currRequest;
11420  bestRequestCost = currRequestCost;
11421 
11422  if(bestRequestCost == 0)
11423  {
11424  break;
11425  }
11426  }
11427  }
11428  }
11429  }
11430  else // WORST_FIT, FIRST_FIT
11431  {
11432  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11433  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11434  {
11435  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11436  VMA_ASSERT(pCurrBlock);
11437  VmaAllocationRequest currRequest = {};
11438  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11439  currentFrameIndex,
11440  m_FrameInUseCount,
11441  m_BufferImageGranularity,
11442  size,
11443  alignment,
11444  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11445  suballocType,
11446  canMakeOtherLost,
11447  strategy,
11448  &currRequest))
11449  {
11450  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11451  if(pBestRequestBlock == VMA_NULL ||
11452  currRequestCost < bestRequestCost ||
11454  {
11455  pBestRequestBlock = pCurrBlock;
11456  bestRequest = currRequest;
11457  bestRequestCost = currRequestCost;
11458 
11459  if(bestRequestCost == 0 ||
11461  {
11462  break;
11463  }
11464  }
11465  }
11466  }
11467  }
11468 
11469  if(pBestRequestBlock != VMA_NULL)
11470  {
11471  if(mapped)
11472  {
11473  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11474  if(res != VK_SUCCESS)
11475  {
11476  return res;
11477  }
11478  }
11479 
11480  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11481  currentFrameIndex,
11482  m_FrameInUseCount,
11483  &bestRequest))
11484  {
11485  // We no longer have an empty Allocation.
11486  if(pBestRequestBlock->m_pMetadata->IsEmpty())
11487  {
11488  m_HasEmptyBlock = false;
11489  }
11490  // Allocate from this pBlock.
11491  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11492  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
11493  (*pAllocation)->InitBlockAllocation(
11494  hCurrentPool,
11495  pBestRequestBlock,
11496  bestRequest.offset,
11497  alignment,
11498  size,
11499  suballocType,
11500  mapped,
11501  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11502  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11503  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
11504  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
11505  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11506  {
11507  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11508  }
11509  if(IsCorruptionDetectionEnabled())
11510  {
11511  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11512  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11513  }
11514  return VK_SUCCESS;
11515  }
11516  // else: Some allocations must have been touched while we are here. Next try.
11517  }
11518  else
11519  {
11520  // Could not find place in any of the blocks - break outer loop.
11521  break;
11522  }
11523  }
11524  /* Maximum number of tries exceeded - a very unlike event when many other
11525  threads are simultaneously touching allocations making it impossible to make
11526  lost at the same time as we try to allocate. */
11527  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11528  {
11529  return VK_ERROR_TOO_MANY_OBJECTS;
11530  }
11531  }
11532 
11533  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11534 }
11535 
11536 void VmaBlockVector::Free(
11537  VmaAllocation hAllocation)
11538 {
11539  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11540 
11541  // Scope for lock.
11542  {
11543  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11544 
11545  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11546 
11547  if(IsCorruptionDetectionEnabled())
11548  {
11549  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11550  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
11551  }
11552 
11553  if(hAllocation->IsPersistentMap())
11554  {
11555  pBlock->Unmap(m_hAllocator, 1);
11556  }
11557 
11558  pBlock->m_pMetadata->Free(hAllocation);
11559  VMA_HEAVY_ASSERT(pBlock->Validate());
11560 
11561  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
11562 
11563  // pBlock became empty after this deallocation.
11564  if(pBlock->m_pMetadata->IsEmpty())
11565  {
11566  // Already has empty Allocation. We don't want to have two, so delete this one.
11567  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11568  {
11569  pBlockToDelete = pBlock;
11570  Remove(pBlock);
11571  }
11572  // We now have first empty block.
11573  else
11574  {
11575  m_HasEmptyBlock = true;
11576  }
11577  }
11578  // pBlock didn't become empty, but we have another empty block - find and free that one.
11579  // (This is optional, heuristics.)
11580  else if(m_HasEmptyBlock)
11581  {
11582  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
11583  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
11584  {
11585  pBlockToDelete = pLastBlock;
11586  m_Blocks.pop_back();
11587  m_HasEmptyBlock = false;
11588  }
11589  }
11590 
11591  IncrementallySortBlocks();
11592  }
11593 
11594  // Destruction of a free Allocation. Deferred until this point, outside of mutex
11595  // lock, for performance reason.
11596  if(pBlockToDelete != VMA_NULL)
11597  {
11598  VMA_DEBUG_LOG(" Deleted empty allocation");
11599  pBlockToDelete->Destroy(m_hAllocator);
11600  vma_delete(m_hAllocator, pBlockToDelete);
11601  }
11602 }
11603 
11604 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
11605 {
11606  VkDeviceSize result = 0;
11607  for(size_t i = m_Blocks.size(); i--; )
11608  {
11609  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
11610  if(result >= m_PreferredBlockSize)
11611  {
11612  break;
11613  }
11614  }
11615  return result;
11616 }
11617 
11618 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
11619 {
11620  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11621  {
11622  if(m_Blocks[blockIndex] == pBlock)
11623  {
11624  VmaVectorRemove(m_Blocks, blockIndex);
11625  return;
11626  }
11627  }
11628  VMA_ASSERT(0);
11629 }
11630 
11631 void VmaBlockVector::IncrementallySortBlocks()
11632 {
11633  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11634  {
11635  // Bubble sort only until first swap.
11636  for(size_t i = 1; i < m_Blocks.size(); ++i)
11637  {
11638  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
11639  {
11640  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
11641  return;
11642  }
11643  }
11644  }
11645 }
11646 
11647 VkResult VmaBlockVector::AllocateFromBlock(
11648  VmaDeviceMemoryBlock* pBlock,
11649  VmaPool hCurrentPool,
11650  uint32_t currentFrameIndex,
11651  VkDeviceSize size,
11652  VkDeviceSize alignment,
11653  VmaAllocationCreateFlags allocFlags,
11654  void* pUserData,
11655  VmaSuballocationType suballocType,
11656  uint32_t strategy,
11657  VmaAllocation* pAllocation)
11658 {
11659  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
11660  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11661  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11662  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11663 
11664  VmaAllocationRequest currRequest = {};
11665  if(pBlock->m_pMetadata->CreateAllocationRequest(
11666  currentFrameIndex,
11667  m_FrameInUseCount,
11668  m_BufferImageGranularity,
11669  size,
11670  alignment,
11671  isUpperAddress,
11672  suballocType,
11673  false, // canMakeOtherLost
11674  strategy,
11675  &currRequest))
11676  {
11677  // Allocate from pCurrBlock.
11678  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
11679 
11680  if(mapped)
11681  {
11682  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
11683  if(res != VK_SUCCESS)
11684  {
11685  return res;
11686  }
11687  }
11688 
11689  // We no longer have an empty Allocation.
11690  if(pBlock->m_pMetadata->IsEmpty())
11691  {
11692  m_HasEmptyBlock = false;
11693  }
11694 
11695  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11696  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
11697  (*pAllocation)->InitBlockAllocation(
11698  hCurrentPool,
11699  pBlock,
11700  currRequest.offset,
11701  alignment,
11702  size,
11703  suballocType,
11704  mapped,
11705  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11706  VMA_HEAVY_ASSERT(pBlock->Validate());
11707  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
11708  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11709  {
11710  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11711  }
11712  if(IsCorruptionDetectionEnabled())
11713  {
11714  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
11715  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11716  }
11717  return VK_SUCCESS;
11718  }
11719  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11720 }
11721 
11722 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
11723 {
11724  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
11725  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
11726  allocInfo.allocationSize = blockSize;
11727  VkDeviceMemory mem = VK_NULL_HANDLE;
11728  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
11729  if(res < 0)
11730  {
11731  return res;
11732  }
11733 
11734  // New VkDeviceMemory successfully created.
11735 
11736  // Create new Allocation for it.
11737  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
11738  pBlock->Init(
11739  m_hAllocator,
11740  m_MemoryTypeIndex,
11741  mem,
11742  allocInfo.allocationSize,
11743  m_NextBlockId++,
11744  m_Algorithm);
11745 
11746  m_Blocks.push_back(pBlock);
11747  if(pNewBlockIndex != VMA_NULL)
11748  {
11749  *pNewBlockIndex = m_Blocks.size() - 1;
11750  }
11751 
11752  return VK_SUCCESS;
11753 }
11754 
11755 void VmaBlockVector::ApplyDefragmentationMovesCpu(
11756  class VmaBlockVectorDefragmentationContext* pDefragCtx,
11757  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
11758 {
11759  const size_t blockCount = m_Blocks.size();
11760  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
11761 
11762  enum BLOCK_FLAG
11763  {
11764  BLOCK_FLAG_USED = 0x00000001,
11765  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
11766  };
11767 
11768  struct BlockInfo
11769  {
11770  uint32_t flags;
11771  void* pMappedData;
11772  };
11773  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
11774  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
11775  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
11776 
11777  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
11778  const size_t moveCount = moves.size();
11779  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11780  {
11781  const VmaDefragmentationMove& move = moves[moveIndex];
11782  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
11783  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
11784  }
11785 
11786  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
11787 
11788  // Go over all blocks. Get mapped pointer or map if necessary.
11789  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
11790  {
11791  BlockInfo& currBlockInfo = blockInfo[blockIndex];
11792  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11793  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
11794  {
11795  currBlockInfo.pMappedData = pBlock->GetMappedData();
11796  // It is not originally mapped - map it.
11797  if(currBlockInfo.pMappedData == VMA_NULL)
11798  {
11799  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
11800  if(pDefragCtx->res == VK_SUCCESS)
11801  {
11802  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
11803  }
11804  }
11805  }
11806  }
11807 
11808  // Go over all moves. Do actual data transfer.
11809  if(pDefragCtx->res == VK_SUCCESS)
11810  {
11811  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
11812  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
11813 
11814  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11815  {
11816  const VmaDefragmentationMove& move = moves[moveIndex];
11817 
11818  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
11819  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
11820 
11821  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
11822 
11823  // Invalidate source.
11824  if(isNonCoherent)
11825  {
11826  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
11827  memRange.memory = pSrcBlock->GetDeviceMemory();
11828  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
11829  memRange.size = VMA_MIN(
11830  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
11831  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
11832  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
11833  }
11834 
11835  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
11836  memmove(
11837  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
11838  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
11839  static_cast<size_t>(move.size));
11840 
11841  if(IsCorruptionDetectionEnabled())
11842  {
11843  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
11844  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
11845  }
11846 
11847  // Flush destination.
11848  if(isNonCoherent)
11849  {
11850  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
11851  memRange.memory = pDstBlock->GetDeviceMemory();
11852  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
11853  memRange.size = VMA_MIN(
11854  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
11855  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
11856  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
11857  }
11858  }
11859  }
11860 
11861  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
11862  // Regardless of pCtx->res == VK_SUCCESS.
11863  for(size_t blockIndex = blockCount; blockIndex--; )
11864  {
11865  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
11866  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
11867  {
11868  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11869  pBlock->Unmap(m_hAllocator, 1);
11870  }
11871  }
11872 }
11873 
11874 void VmaBlockVector::ApplyDefragmentationMovesGpu(
11875  class VmaBlockVectorDefragmentationContext* pDefragCtx,
11876  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
11877  VkCommandBuffer commandBuffer)
11878 {
11879  const size_t blockCount = m_Blocks.size();
11880 
11881  pDefragCtx->blockContexts.resize(blockCount);
11882  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
11883 
11884  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
11885  const size_t moveCount = moves.size();
11886  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11887  {
11888  const VmaDefragmentationMove& move = moves[moveIndex];
11889  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
11890  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
11891  }
11892 
11893  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
11894 
11895  // Go over all blocks. Create and bind buffer for whole block if necessary.
11896  {
11897  VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
11898  bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
11899  VK_BUFFER_USAGE_TRANSFER_DST_BIT;
11900 
11901  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
11902  {
11903  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
11904  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11905  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
11906  {
11907  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
11908  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
11909  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
11910  if(pDefragCtx->res == VK_SUCCESS)
11911  {
11912  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
11913  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
11914  }
11915  }
11916  }
11917  }
11918 
11919  // Go over all moves. Post data transfer commands to command buffer.
11920  if(pDefragCtx->res == VK_SUCCESS)
11921  {
11922  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
11923  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
11924 
11925  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11926  {
11927  const VmaDefragmentationMove& move = moves[moveIndex];
11928 
11929  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
11930  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
11931 
11932  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
11933 
11934  VkBufferCopy region = {
11935  move.srcOffset,
11936  move.dstOffset,
11937  move.size };
11938  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
11939  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
11940  }
11941  }
11942 
11943  // Save buffers to defrag context for later destruction.
11944  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
11945  {
11946  pDefragCtx->res = VK_NOT_READY;
11947  }
11948 }
11949 
11950 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
11951 {
11952  m_HasEmptyBlock = false;
11953  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11954  {
11955  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11956  if(pBlock->m_pMetadata->IsEmpty())
11957  {
11958  if(m_Blocks.size() > m_MinBlockCount)
11959  {
11960  if(pDefragmentationStats != VMA_NULL)
11961  {
11962  ++pDefragmentationStats->deviceMemoryBlocksFreed;
11963  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
11964  }
11965 
11966  VmaVectorRemove(m_Blocks, blockIndex);
11967  pBlock->Destroy(m_hAllocator);
11968  vma_delete(m_hAllocator, pBlock);
11969  }
11970  else
11971  {
11972  m_HasEmptyBlock = true;
11973  }
11974  }
11975  }
11976 }
11977 
11978 #if VMA_STATS_STRING_ENABLED
11979 
11980 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
11981 {
11982  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11983 
11984  json.BeginObject();
11985 
11986  if(m_IsCustomPool)
11987  {
11988  json.WriteString("MemoryTypeIndex");
11989  json.WriteNumber(m_MemoryTypeIndex);
11990 
11991  json.WriteString("BlockSize");
11992  json.WriteNumber(m_PreferredBlockSize);
11993 
11994  json.WriteString("BlockCount");
11995  json.BeginObject(true);
11996  if(m_MinBlockCount > 0)
11997  {
11998  json.WriteString("Min");
11999  json.WriteNumber((uint64_t)m_MinBlockCount);
12000  }
12001  if(m_MaxBlockCount < SIZE_MAX)
12002  {
12003  json.WriteString("Max");
12004  json.WriteNumber((uint64_t)m_MaxBlockCount);
12005  }
12006  json.WriteString("Cur");
12007  json.WriteNumber((uint64_t)m_Blocks.size());
12008  json.EndObject();
12009 
12010  if(m_FrameInUseCount > 0)
12011  {
12012  json.WriteString("FrameInUseCount");
12013  json.WriteNumber(m_FrameInUseCount);
12014  }
12015 
12016  if(m_Algorithm != 0)
12017  {
12018  json.WriteString("Algorithm");
12019  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12020  }
12021  }
12022  else
12023  {
12024  json.WriteString("PreferredBlockSize");
12025  json.WriteNumber(m_PreferredBlockSize);
12026  }
12027 
12028  json.WriteString("Blocks");
12029  json.BeginObject();
12030  for(size_t i = 0; i < m_Blocks.size(); ++i)
12031  {
12032  json.BeginString();
12033  json.ContinueString(m_Blocks[i]->GetId());
12034  json.EndString();
12035 
12036  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12037  }
12038  json.EndObject();
12039 
12040  json.EndObject();
12041 }
12042 
12043 #endif // #if VMA_STATS_STRING_ENABLED
12044 
12045 void VmaBlockVector::Defragment(
12046  class VmaBlockVectorDefragmentationContext* pCtx,
12047  VmaDefragmentationStats* pStats,
12048  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12049  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12050  VkCommandBuffer commandBuffer)
12051 {
12052  pCtx->res = VK_SUCCESS;
12053 
12054  const VkMemoryPropertyFlags memPropFlags =
12055  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12056  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12057  (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12058  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0;
12059 
12060  // There are options to defragment this memory type.
12061  if(canDefragmentOnCpu || canDefragmentOnGpu)
12062  {
12063  bool defragmentOnGpu;
12064  // There is only one option to defragment this memory type.
12065  if(canDefragmentOnGpu != canDefragmentOnCpu)
12066  {
12067  defragmentOnGpu = canDefragmentOnGpu;
12068  }
12069  // Both options are available: Heuristics to choose the best one.
12070  else
12071  {
12072  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12073  m_hAllocator->IsIntegratedGpu();
12074  }
12075 
12076  bool overlappingMoveSupported = !defragmentOnGpu;
12077 
12078  if(m_hAllocator->m_UseMutex)
12079  {
12080  m_Mutex.LockWrite();
12081  pCtx->mutexLocked = true;
12082  }
12083 
12084  pCtx->Begin(overlappingMoveSupported);
12085 
12086  // Defragment.
12087 
12088  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12089  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12090  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12091  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12092  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12093 
12094  // Accumulate statistics.
12095  if(pStats != VMA_NULL)
12096  {
12097  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12098  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12099  pStats->bytesMoved += bytesMoved;
12100  pStats->allocationsMoved += allocationsMoved;
12101  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12102  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12103  if(defragmentOnGpu)
12104  {
12105  maxGpuBytesToMove -= bytesMoved;
12106  maxGpuAllocationsToMove -= allocationsMoved;
12107  }
12108  else
12109  {
12110  maxCpuBytesToMove -= bytesMoved;
12111  maxCpuAllocationsToMove -= allocationsMoved;
12112  }
12113  }
12114 
12115  if(pCtx->res >= VK_SUCCESS)
12116  {
12117  if(defragmentOnGpu)
12118  {
12119  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12120  }
12121  else
12122  {
12123  ApplyDefragmentationMovesCpu(pCtx, moves);
12124  }
12125  }
12126  }
12127 }
12128 
12129 void VmaBlockVector::DefragmentationEnd(
12130  class VmaBlockVectorDefragmentationContext* pCtx,
12131  VmaDefragmentationStats* pStats)
12132 {
12133  // Destroy buffers.
12134  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12135  {
12136  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12137  if(blockCtx.hBuffer)
12138  {
12139  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12140  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12141  }
12142  }
12143 
12144  if(pCtx->res >= VK_SUCCESS)
12145  {
12146  FreeEmptyBlocks(pStats);
12147  }
12148 
12149  if(pCtx->mutexLocked)
12150  {
12151  VMA_ASSERT(m_hAllocator->m_UseMutex);
12152  m_Mutex.UnlockWrite();
12153  }
12154 }
12155 
12156 size_t VmaBlockVector::CalcAllocationCount() const
12157 {
12158  size_t result = 0;
12159  for(size_t i = 0; i < m_Blocks.size(); ++i)
12160  {
12161  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12162  }
12163  return result;
12164 }
12165 
12166 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12167 {
12168  if(m_BufferImageGranularity == 1)
12169  {
12170  return false;
12171  }
12172  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12173  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12174  {
12175  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12176  VMA_ASSERT(m_Algorithm == 0);
12177  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12178  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12179  {
12180  return true;
12181  }
12182  }
12183  return false;
12184 }
12185 
12186 void VmaBlockVector::MakePoolAllocationsLost(
12187  uint32_t currentFrameIndex,
12188  size_t* pLostAllocationCount)
12189 {
12190  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12191  size_t lostAllocationCount = 0;
12192  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12193  {
12194  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12195  VMA_ASSERT(pBlock);
12196  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12197  }
12198  if(pLostAllocationCount != VMA_NULL)
12199  {
12200  *pLostAllocationCount = lostAllocationCount;
12201  }
12202 }
12203 
12204 VkResult VmaBlockVector::CheckCorruption()
12205 {
12206  if(!IsCorruptionDetectionEnabled())
12207  {
12208  return VK_ERROR_FEATURE_NOT_PRESENT;
12209  }
12210 
12211  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12212  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12213  {
12214  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12215  VMA_ASSERT(pBlock);
12216  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12217  if(res != VK_SUCCESS)
12218  {
12219  return res;
12220  }
12221  }
12222  return VK_SUCCESS;
12223 }
12224 
12225 void VmaBlockVector::AddStats(VmaStats* pStats)
12226 {
12227  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12228  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12229 
12230  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12231 
12232  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12233  {
12234  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12235  VMA_ASSERT(pBlock);
12236  VMA_HEAVY_ASSERT(pBlock->Validate());
12237  VmaStatInfo allocationStatInfo;
12238  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12239  VmaAddStatInfo(pStats->total, allocationStatInfo);
12240  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12241  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12242  }
12243 }
12244 
12246 // VmaDefragmentationAlgorithm_Generic members definition
12247 
12248 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12249  VmaAllocator hAllocator,
12250  VmaBlockVector* pBlockVector,
12251  uint32_t currentFrameIndex,
12252  bool overlappingMoveSupported) :
12253  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12254  m_AllAllocations(false),
12255  m_AllocationCount(0),
12256  m_BytesMoved(0),
12257  m_AllocationsMoved(0),
12258  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12259 {
12260  // Create block info for each block.
12261  const size_t blockCount = m_pBlockVector->m_Blocks.size();
12262  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12263  {
12264  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12265  pBlockInfo->m_OriginalBlockIndex = blockIndex;
12266  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12267  m_Blocks.push_back(pBlockInfo);
12268  }
12269 
12270  // Sort them by m_pBlock pointer value.
12271  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12272 }
12273 
12274 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12275 {
12276  for(size_t i = m_Blocks.size(); i--; )
12277  {
12278  vma_delete(m_hAllocator, m_Blocks[i]);
12279  }
12280 }
12281 
12282 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12283 {
12284  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12285  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12286  {
12287  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12288  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12289  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12290  {
12291  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12292  (*it)->m_Allocations.push_back(allocInfo);
12293  }
12294  else
12295  {
12296  VMA_ASSERT(0);
12297  }
12298 
12299  ++m_AllocationCount;
12300  }
12301 }
12302 
12303 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12304  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12305  VkDeviceSize maxBytesToMove,
12306  uint32_t maxAllocationsToMove)
12307 {
12308  if(m_Blocks.empty())
12309  {
12310  return VK_SUCCESS;
12311  }
12312 
12313  // This is a choice based on research.
12314  // Option 1:
12315  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12316  // Option 2:
12317  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12318  // Option 3:
12319  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12320 
12321  size_t srcBlockMinIndex = 0;
12322  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12323  /*
12324  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12325  {
12326  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12327  if(blocksWithNonMovableCount > 0)
12328  {
12329  srcBlockMinIndex = blocksWithNonMovableCount - 1;
12330  }
12331  }
12332  */
12333 
12334  size_t srcBlockIndex = m_Blocks.size() - 1;
12335  size_t srcAllocIndex = SIZE_MAX;
12336  for(;;)
12337  {
12338  // 1. Find next allocation to move.
12339  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12340  // 1.2. Then start from last to first m_Allocations.
12341  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12342  {
12343  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12344  {
12345  // Finished: no more allocations to process.
12346  if(srcBlockIndex == srcBlockMinIndex)
12347  {
12348  return VK_SUCCESS;
12349  }
12350  else
12351  {
12352  --srcBlockIndex;
12353  srcAllocIndex = SIZE_MAX;
12354  }
12355  }
12356  else
12357  {
12358  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12359  }
12360  }
12361 
12362  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12363  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12364 
12365  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12366  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12367  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12368  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12369 
12370  // 2. Try to find new place for this allocation in preceding or current block.
12371  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12372  {
12373  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12374  VmaAllocationRequest dstAllocRequest;
12375  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12376  m_CurrentFrameIndex,
12377  m_pBlockVector->GetFrameInUseCount(),
12378  m_pBlockVector->GetBufferImageGranularity(),
12379  size,
12380  alignment,
12381  false, // upperAddress
12382  suballocType,
12383  false, // canMakeOtherLost
12384  strategy,
12385  &dstAllocRequest) &&
12386  MoveMakesSense(
12387  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12388  {
12389  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12390 
12391  // Reached limit on number of allocations or bytes to move.
12392  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12393  (m_BytesMoved + size > maxBytesToMove))
12394  {
12395  return VK_SUCCESS;
12396  }
12397 
12398  VmaDefragmentationMove move;
12399  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12400  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12401  move.srcOffset = srcOffset;
12402  move.dstOffset = dstAllocRequest.offset;
12403  move.size = size;
12404  moves.push_back(move);
12405 
12406  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12407  dstAllocRequest,
12408  suballocType,
12409  size,
12410  false, // upperAddress
12411  allocInfo.m_hAllocation);
12412  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12413 
12414  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12415 
12416  if(allocInfo.m_pChanged != VMA_NULL)
12417  {
12418  *allocInfo.m_pChanged = VK_TRUE;
12419  }
12420 
12421  ++m_AllocationsMoved;
12422  m_BytesMoved += size;
12423 
12424  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12425 
12426  break;
12427  }
12428  }
12429 
12430  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
12431 
12432  if(srcAllocIndex > 0)
12433  {
12434  --srcAllocIndex;
12435  }
12436  else
12437  {
12438  if(srcBlockIndex > 0)
12439  {
12440  --srcBlockIndex;
12441  srcAllocIndex = SIZE_MAX;
12442  }
12443  else
12444  {
12445  return VK_SUCCESS;
12446  }
12447  }
12448  }
12449 }
12450 
12451 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
12452 {
12453  size_t result = 0;
12454  for(size_t i = 0; i < m_Blocks.size(); ++i)
12455  {
12456  if(m_Blocks[i]->m_HasNonMovableAllocations)
12457  {
12458  ++result;
12459  }
12460  }
12461  return result;
12462 }
12463 
12464 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12465  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12466  VkDeviceSize maxBytesToMove,
12467  uint32_t maxAllocationsToMove)
12468 {
12469  if(!m_AllAllocations && m_AllocationCount == 0)
12470  {
12471  return VK_SUCCESS;
12472  }
12473 
12474  const size_t blockCount = m_Blocks.size();
12475  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12476  {
12477  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12478 
12479  if(m_AllAllocations)
12480  {
12481  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12482  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12483  it != pMetadata->m_Suballocations.end();
12484  ++it)
12485  {
12486  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12487  {
12488  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12489  pBlockInfo->m_Allocations.push_back(allocInfo);
12490  }
12491  }
12492  }
12493 
12494  pBlockInfo->CalcHasNonMovableAllocations();
12495 
12496  // This is a choice based on research.
12497  // Option 1:
12498  pBlockInfo->SortAllocationsByOffsetDescending();
12499  // Option 2:
12500  //pBlockInfo->SortAllocationsBySizeDescending();
12501  }
12502 
12503  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
12504  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12505 
12506  // This is a choice based on research.
12507  const uint32_t roundCount = 2;
12508 
12509  // Execute defragmentation rounds (the main part).
12510  VkResult result = VK_SUCCESS;
12511  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12512  {
12513  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12514  }
12515 
12516  return result;
12517 }
12518 
12519 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12520  size_t dstBlockIndex, VkDeviceSize dstOffset,
12521  size_t srcBlockIndex, VkDeviceSize srcOffset)
12522 {
12523  if(dstBlockIndex < srcBlockIndex)
12524  {
12525  return true;
12526  }
12527  if(dstBlockIndex > srcBlockIndex)
12528  {
12529  return false;
12530  }
12531  if(dstOffset < srcOffset)
12532  {
12533  return true;
12534  }
12535  return false;
12536 }
12537 
12539 // VmaDefragmentationAlgorithm_Fast
12540 
12541 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12542  VmaAllocator hAllocator,
12543  VmaBlockVector* pBlockVector,
12544  uint32_t currentFrameIndex,
12545  bool overlappingMoveSupported) :
12546  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12547  m_OverlappingMoveSupported(overlappingMoveSupported),
12548  m_AllocationCount(0),
12549  m_AllAllocations(false),
12550  m_BytesMoved(0),
12551  m_AllocationsMoved(0),
12552  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12553 {
12554  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12555 
12556 }
12557 
12558 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12559 {
12560 }
12561 
12562 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12563  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12564  VkDeviceSize maxBytesToMove,
12565  uint32_t maxAllocationsToMove)
12566 {
12567  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12568 
12569  const size_t blockCount = m_pBlockVector->GetBlockCount();
12570  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12571  {
12572  return VK_SUCCESS;
12573  }
12574 
12575  PreprocessMetadata();
12576 
12577  // Sort blocks in order from most destination.
12578 
12579  m_BlockInfos.resize(blockCount);
12580  for(size_t i = 0; i < blockCount; ++i)
12581  {
12582  m_BlockInfos[i].origBlockIndex = i;
12583  }
12584 
12585  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
12586  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
12587  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
12588  });
12589 
12590  // THE MAIN ALGORITHM
12591 
12592  FreeSpaceDatabase freeSpaceDb;
12593 
12594  size_t dstBlockInfoIndex = 0;
12595  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12596  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12597  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12598  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
12599  VkDeviceSize dstOffset = 0;
12600 
12601  bool end = false;
12602  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
12603  {
12604  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
12605  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
12606  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
12607  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
12608  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
12609  {
12610  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
12611  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
12612  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
12613  if(m_AllocationsMoved == maxAllocationsToMove ||
12614  m_BytesMoved + srcAllocSize > maxBytesToMove)
12615  {
12616  end = true;
12617  break;
12618  }
12619  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
12620 
12621  // Try to place it in one of free spaces from the database.
12622  size_t freeSpaceInfoIndex;
12623  VkDeviceSize dstAllocOffset;
12624  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
12625  freeSpaceInfoIndex, dstAllocOffset))
12626  {
12627  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
12628  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
12629  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
12630  VkDeviceSize freeSpaceBlockSize = pFreeSpaceMetadata->GetSize();
12631 
12632  // Same block
12633  if(freeSpaceInfoIndex == srcBlockInfoIndex)
12634  {
12635  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12636 
12637  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12638 
12639  VmaSuballocation suballoc = *srcSuballocIt;
12640  suballoc.offset = dstAllocOffset;
12641  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
12642  m_BytesMoved += srcAllocSize;
12643  ++m_AllocationsMoved;
12644 
12645  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12646  ++nextSuballocIt;
12647  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12648  srcSuballocIt = nextSuballocIt;
12649 
12650  InsertSuballoc(pFreeSpaceMetadata, suballoc);
12651 
12652  VmaDefragmentationMove move = {
12653  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12654  srcAllocOffset, dstAllocOffset,
12655  srcAllocSize };
12656  moves.push_back(move);
12657  }
12658  // Different block
12659  else
12660  {
12661  // MOVE OPTION 2: Move the allocation to a different block.
12662 
12663  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
12664 
12665  VmaSuballocation suballoc = *srcSuballocIt;
12666  suballoc.offset = dstAllocOffset;
12667  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
12668  m_BytesMoved += srcAllocSize;
12669  ++m_AllocationsMoved;
12670 
12671  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12672  ++nextSuballocIt;
12673  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12674  srcSuballocIt = nextSuballocIt;
12675 
12676  InsertSuballoc(pFreeSpaceMetadata, suballoc);
12677 
12678  VmaDefragmentationMove move = {
12679  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12680  srcAllocOffset, dstAllocOffset,
12681  srcAllocSize };
12682  moves.push_back(move);
12683  }
12684  }
12685  else
12686  {
12687  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
12688 
12689  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
12690  while(dstBlockInfoIndex < srcBlockInfoIndex &&
12691  dstAllocOffset + srcAllocSize > dstBlockSize)
12692  {
12693  // But before that, register remaining free space at the end of dst block.
12694  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
12695 
12696  ++dstBlockInfoIndex;
12697  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12698  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12699  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12700  dstBlockSize = pDstMetadata->GetSize();
12701  dstOffset = 0;
12702  dstAllocOffset = 0;
12703  }
12704 
12705  // Same block
12706  if(dstBlockInfoIndex == srcBlockInfoIndex)
12707  {
12708  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12709 
12710  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
12711 
12712  bool skipOver = overlap;
12713  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
12714  {
12715  // If destination and source place overlap, skip if it would move it
12716  // by only < 1/64 of its size.
12717  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
12718  }
12719 
12720  if(skipOver)
12721  {
12722  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
12723 
12724  dstOffset = srcAllocOffset + srcAllocSize;
12725  ++srcSuballocIt;
12726  }
12727  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12728  else
12729  {
12730  srcSuballocIt->offset = dstAllocOffset;
12731  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
12732  dstOffset = dstAllocOffset + srcAllocSize;
12733  m_BytesMoved += srcAllocSize;
12734  ++m_AllocationsMoved;
12735  ++srcSuballocIt;
12736  VmaDefragmentationMove move = {
12737  srcOrigBlockIndex, dstOrigBlockIndex,
12738  srcAllocOffset, dstAllocOffset,
12739  srcAllocSize };
12740  moves.push_back(move);
12741  }
12742  }
12743  // Different block
12744  else
12745  {
12746  // MOVE OPTION 2: Move the allocation to a different block.
12747 
12748  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
12749  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
12750 
12751  VmaSuballocation suballoc = *srcSuballocIt;
12752  suballoc.offset = dstAllocOffset;
12753  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
12754  dstOffset = dstAllocOffset + srcAllocSize;
12755  m_BytesMoved += srcAllocSize;
12756  ++m_AllocationsMoved;
12757 
12758  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12759  ++nextSuballocIt;
12760  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12761  srcSuballocIt = nextSuballocIt;
12762 
12763  pDstMetadata->m_Suballocations.push_back(suballoc);
12764 
12765  VmaDefragmentationMove move = {
12766  srcOrigBlockIndex, dstOrigBlockIndex,
12767  srcAllocOffset, dstAllocOffset,
12768  srcAllocSize };
12769  moves.push_back(move);
12770  }
12771  }
12772  }
12773  }
12774 
12775  m_BlockInfos.clear();
12776 
12777  PostprocessMetadata();
12778 
12779  return VK_SUCCESS;
12780 }
12781 
12782 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
12783 {
12784  const size_t blockCount = m_pBlockVector->GetBlockCount();
12785  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12786  {
12787  VmaBlockMetadata_Generic* const pMetadata =
12788  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
12789  pMetadata->m_FreeCount = 0;
12790  pMetadata->m_SumFreeSize = pMetadata->GetSize();
12791  pMetadata->m_FreeSuballocationsBySize.clear();
12792  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
12793  it != pMetadata->m_Suballocations.end(); )
12794  {
12795  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
12796  {
12797  VmaSuballocationList::iterator nextIt = it;
12798  ++nextIt;
12799  pMetadata->m_Suballocations.erase(it);
12800  it = nextIt;
12801  }
12802  else
12803  {
12804  ++it;
12805  }
12806  }
12807  }
12808 }
12809 
12810 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
12811 {
12812  const size_t blockCount = m_pBlockVector->GetBlockCount();
12813  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12814  {
12815  VmaBlockMetadata_Generic* const pMetadata =
12816  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
12817  const VkDeviceSize blockSize = pMetadata->GetSize();
12818 
12819  // No allocations in this block - entire area is free.
12820  if(pMetadata->m_Suballocations.empty())
12821  {
12822  pMetadata->m_FreeCount = 1;
12823  //pMetadata->m_SumFreeSize is already set to blockSize.
12824  VmaSuballocation suballoc = {
12825  0, // offset
12826  blockSize, // size
12827  VMA_NULL, // hAllocation
12828  VMA_SUBALLOCATION_TYPE_FREE };
12829  pMetadata->m_Suballocations.push_back(suballoc);
12830  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
12831  }
12832  // There are some allocations in this block.
12833  else
12834  {
12835  VkDeviceSize offset = 0;
12836  VmaSuballocationList::iterator it;
12837  for(it = pMetadata->m_Suballocations.begin();
12838  it != pMetadata->m_Suballocations.end();
12839  ++it)
12840  {
12841  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
12842  VMA_ASSERT(it->offset >= offset);
12843 
12844  // Need to insert preceding free space.
12845  if(it->offset > offset)
12846  {
12847  ++pMetadata->m_FreeCount;
12848  const VkDeviceSize freeSize = it->offset - offset;
12849  VmaSuballocation suballoc = {
12850  offset, // offset
12851  freeSize, // size
12852  VMA_NULL, // hAllocation
12853  VMA_SUBALLOCATION_TYPE_FREE };
12854  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
12855  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
12856  {
12857  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
12858  }
12859  }
12860 
12861  pMetadata->m_SumFreeSize -= it->size;
12862  offset = it->offset + it->size;
12863  }
12864 
12865  // Need to insert trailing free space.
12866  if(offset < blockSize)
12867  {
12868  ++pMetadata->m_FreeCount;
12869  const VkDeviceSize freeSize = blockSize - offset;
12870  VmaSuballocation suballoc = {
12871  offset, // offset
12872  freeSize, // size
12873  VMA_NULL, // hAllocation
12874  VMA_SUBALLOCATION_TYPE_FREE };
12875  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
12876  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
12877  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
12878  {
12879  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
12880  }
12881  }
12882 
12883  VMA_SORT(
12884  pMetadata->m_FreeSuballocationsBySize.begin(),
12885  pMetadata->m_FreeSuballocationsBySize.end(),
12886  VmaSuballocationItemSizeLess());
12887  }
12888 
12889  VMA_HEAVY_ASSERT(pMetadata->Validate());
12890  }
12891 }
12892 
12893 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
12894 {
12895  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
12896  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
12897  while(it != pMetadata->m_Suballocations.end())
12898  {
12899  if(it->offset < suballoc.offset)
12900  {
12901  ++it;
12902  }
12903  }
12904  pMetadata->m_Suballocations.insert(it, suballoc);
12905 }
12906 
12908 // VmaBlockVectorDefragmentationContext
12909 
12910 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
12911  VmaAllocator hAllocator,
12912  VmaPool hCustomPool,
12913  VmaBlockVector* pBlockVector,
12914  uint32_t currFrameIndex,
12915  uint32_t algorithmFlags) :
12916  res(VK_SUCCESS),
12917  mutexLocked(false),
12918  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
12919  m_hAllocator(hAllocator),
12920  m_hCustomPool(hCustomPool),
12921  m_pBlockVector(pBlockVector),
12922  m_CurrFrameIndex(currFrameIndex),
12923  m_AlgorithmFlags(algorithmFlags),
12924  m_pAlgorithm(VMA_NULL),
12925  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
12926  m_AllAllocations(false)
12927 {
12928 }
12929 
12930 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
12931 {
12932  vma_delete(m_hAllocator, m_pAlgorithm);
12933 }
12934 
12935 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12936 {
12937  AllocInfo info = { hAlloc, pChanged };
12938  m_Allocations.push_back(info);
12939 }
12940 
12941 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
12942 {
12943  const bool allAllocations = m_AllAllocations ||
12944  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
12945 
12946  /********************************
12947  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
12948  ********************************/
12949 
12950  /*
12951  Fast algorithm is supported only when certain criteria are met:
12952  - VMA_DEBUG_MARGIN is 0.
12953  - All allocations in this block vector are moveable.
12954  - There is no possibility of image/buffer granularity conflict.
12955  */
12956  if(VMA_DEBUG_MARGIN == 0 &&
12957  allAllocations &&
12958  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
12959  {
12960  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
12961  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
12962  }
12963  else
12964  {
12965  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
12966  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
12967  }
12968 
12969  if(allAllocations)
12970  {
12971  m_pAlgorithm->AddAll();
12972  }
12973  else
12974  {
12975  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
12976  {
12977  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
12978  }
12979  }
12980 }
12981 
12983 // VmaDefragmentationContext
12984 
12985 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
12986  VmaAllocator hAllocator,
12987  uint32_t currFrameIndex,
12988  uint32_t flags,
12989  VmaDefragmentationStats* pStats) :
12990  m_hAllocator(hAllocator),
12991  m_CurrFrameIndex(currFrameIndex),
12992  m_Flags(flags),
12993  m_pStats(pStats),
12994  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
12995 {
12996  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
12997 }
12998 
12999 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13000 {
13001  for(size_t i = m_CustomPoolContexts.size(); i--; )
13002  {
13003  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13004  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13005  vma_delete(m_hAllocator, pBlockVectorCtx);
13006  }
13007  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13008  {
13009  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13010  if(pBlockVectorCtx)
13011  {
13012  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13013  vma_delete(m_hAllocator, pBlockVectorCtx);
13014  }
13015  }
13016 }
13017 
13018 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13019 {
13020  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13021  {
13022  VmaPool pool = pPools[poolIndex];
13023  VMA_ASSERT(pool);
13024  // Pools with algorithm other than default are not defragmented.
13025  if(pool->m_BlockVector.GetAlgorithm() == 0)
13026  {
13027  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13028 
13029  for(size_t i = m_CustomPoolContexts.size(); i--; )
13030  {
13031  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13032  {
13033  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13034  break;
13035  }
13036  }
13037 
13038  if(!pBlockVectorDefragCtx)
13039  {
13040  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13041  m_hAllocator,
13042  pool,
13043  &pool->m_BlockVector,
13044  m_CurrFrameIndex,
13045  m_Flags);
13046  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13047  }
13048 
13049  pBlockVectorDefragCtx->AddAll();
13050  }
13051  }
13052 }
13053 
13054 void VmaDefragmentationContext_T::AddAllocations(
13055  uint32_t allocationCount,
13056  VmaAllocation* pAllocations,
13057  VkBool32* pAllocationsChanged)
13058 {
13059  // Dispatch pAllocations among defragmentators. Create them when necessary.
13060  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13061  {
13062  const VmaAllocation hAlloc = pAllocations[allocIndex];
13063  VMA_ASSERT(hAlloc);
13064  // DedicatedAlloc cannot be defragmented.
13065  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13066  // Lost allocation cannot be defragmented.
13067  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13068  {
13069  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13070 
13071  const VmaPool hAllocPool = hAlloc->GetPool();
13072  // This allocation belongs to custom pool.
13073  if(hAllocPool != VK_NULL_HANDLE)
13074  {
13075  // Pools with algorithm other than default are not defragmented.
13076  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13077  {
13078  for(size_t i = m_CustomPoolContexts.size(); i--; )
13079  {
13080  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13081  {
13082  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13083  break;
13084  }
13085  }
13086  if(!pBlockVectorDefragCtx)
13087  {
13088  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13089  m_hAllocator,
13090  hAllocPool,
13091  &hAllocPool->m_BlockVector,
13092  m_CurrFrameIndex,
13093  m_Flags);
13094  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13095  }
13096  }
13097  }
13098  // This allocation belongs to default pool.
13099  else
13100  {
13101  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13102  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13103  if(!pBlockVectorDefragCtx)
13104  {
13105  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13106  m_hAllocator,
13107  VMA_NULL, // hCustomPool
13108  m_hAllocator->m_pBlockVectors[memTypeIndex],
13109  m_CurrFrameIndex,
13110  m_Flags);
13111  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13112  }
13113  }
13114 
13115  if(pBlockVectorDefragCtx)
13116  {
13117  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13118  &pAllocationsChanged[allocIndex] : VMA_NULL;
13119  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13120  }
13121  }
13122  }
13123 }
13124 
13125 VkResult VmaDefragmentationContext_T::Defragment(
13126  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13127  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13128  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13129 {
13130  if(pStats)
13131  {
13132  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13133  }
13134 
13135  if(commandBuffer == VK_NULL_HANDLE)
13136  {
13137  maxGpuBytesToMove = 0;
13138  maxGpuAllocationsToMove = 0;
13139  }
13140 
13141  VkResult res = VK_SUCCESS;
13142 
13143  // Process default pools.
13144  for(uint32_t memTypeIndex = 0;
13145  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13146  ++memTypeIndex)
13147  {
13148  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13149  if(pBlockVectorCtx)
13150  {
13151  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13152  pBlockVectorCtx->GetBlockVector()->Defragment(
13153  pBlockVectorCtx,
13154  pStats,
13155  maxCpuBytesToMove, maxCpuAllocationsToMove,
13156  maxGpuBytesToMove, maxGpuAllocationsToMove,
13157  commandBuffer);
13158  if(pBlockVectorCtx->res != VK_SUCCESS)
13159  {
13160  res = pBlockVectorCtx->res;
13161  }
13162  }
13163  }
13164 
13165  // Process custom pools.
13166  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13167  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13168  ++customCtxIndex)
13169  {
13170  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13171  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13172  pBlockVectorCtx->GetBlockVector()->Defragment(
13173  pBlockVectorCtx,
13174  pStats,
13175  maxCpuBytesToMove, maxCpuAllocationsToMove,
13176  maxGpuBytesToMove, maxGpuAllocationsToMove,
13177  commandBuffer);
13178  if(pBlockVectorCtx->res != VK_SUCCESS)
13179  {
13180  res = pBlockVectorCtx->res;
13181  }
13182  }
13183 
13184  return res;
13185 }
13186 
13188 // VmaRecorder
13189 
13190 #if VMA_RECORDING_ENABLED
13191 
13192 VmaRecorder::VmaRecorder() :
13193  m_UseMutex(true),
13194  m_Flags(0),
13195  m_File(VMA_NULL),
13196  m_Freq(INT64_MAX),
13197  m_StartCounter(INT64_MAX)
13198 {
13199 }
13200 
13201 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13202 {
13203  m_UseMutex = useMutex;
13204  m_Flags = settings.flags;
13205 
13206  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13207  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13208 
13209  // Open file for writing.
13210  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13211  if(err != 0)
13212  {
13213  return VK_ERROR_INITIALIZATION_FAILED;
13214  }
13215 
13216  // Write header.
13217  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13218  fprintf(m_File, "%s\n", "1,4");
13219 
13220  return VK_SUCCESS;
13221 }
13222 
13223 VmaRecorder::~VmaRecorder()
13224 {
13225  if(m_File != VMA_NULL)
13226  {
13227  fclose(m_File);
13228  }
13229 }
13230 
13231 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13232 {
13233  CallParams callParams;
13234  GetBasicParams(callParams);
13235 
13236  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13237  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13238  Flush();
13239 }
13240 
13241 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13242 {
13243  CallParams callParams;
13244  GetBasicParams(callParams);
13245 
13246  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13247  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13248  Flush();
13249 }
13250 
13251 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13252 {
13253  CallParams callParams;
13254  GetBasicParams(callParams);
13255 
13256  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13257  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13258  createInfo.memoryTypeIndex,
13259  createInfo.flags,
13260  createInfo.blockSize,
13261  (uint64_t)createInfo.minBlockCount,
13262  (uint64_t)createInfo.maxBlockCount,
13263  createInfo.frameInUseCount,
13264  pool);
13265  Flush();
13266 }
13267 
13268 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13269 {
13270  CallParams callParams;
13271  GetBasicParams(callParams);
13272 
13273  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13274  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13275  pool);
13276  Flush();
13277 }
13278 
13279 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13280  const VkMemoryRequirements& vkMemReq,
13281  const VmaAllocationCreateInfo& createInfo,
13282  VmaAllocation allocation)
13283 {
13284  CallParams callParams;
13285  GetBasicParams(callParams);
13286 
13287  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13288  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13289  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13290  vkMemReq.size,
13291  vkMemReq.alignment,
13292  vkMemReq.memoryTypeBits,
13293  createInfo.flags,
13294  createInfo.usage,
13295  createInfo.requiredFlags,
13296  createInfo.preferredFlags,
13297  createInfo.memoryTypeBits,
13298  createInfo.pool,
13299  allocation,
13300  userDataStr.GetString());
13301  Flush();
13302 }
13303 
13304 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13305  const VkMemoryRequirements& vkMemReq,
13306  bool requiresDedicatedAllocation,
13307  bool prefersDedicatedAllocation,
13308  const VmaAllocationCreateInfo& createInfo,
13309  VmaAllocation allocation)
13310 {
13311  CallParams callParams;
13312  GetBasicParams(callParams);
13313 
13314  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13315  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13316  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13317  vkMemReq.size,
13318  vkMemReq.alignment,
13319  vkMemReq.memoryTypeBits,
13320  requiresDedicatedAllocation ? 1 : 0,
13321  prefersDedicatedAllocation ? 1 : 0,
13322  createInfo.flags,
13323  createInfo.usage,
13324  createInfo.requiredFlags,
13325  createInfo.preferredFlags,
13326  createInfo.memoryTypeBits,
13327  createInfo.pool,
13328  allocation,
13329  userDataStr.GetString());
13330  Flush();
13331 }
13332 
13333 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13334  const VkMemoryRequirements& vkMemReq,
13335  bool requiresDedicatedAllocation,
13336  bool prefersDedicatedAllocation,
13337  const VmaAllocationCreateInfo& createInfo,
13338  VmaAllocation allocation)
13339 {
13340  CallParams callParams;
13341  GetBasicParams(callParams);
13342 
13343  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13344  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13345  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13346  vkMemReq.size,
13347  vkMemReq.alignment,
13348  vkMemReq.memoryTypeBits,
13349  requiresDedicatedAllocation ? 1 : 0,
13350  prefersDedicatedAllocation ? 1 : 0,
13351  createInfo.flags,
13352  createInfo.usage,
13353  createInfo.requiredFlags,
13354  createInfo.preferredFlags,
13355  createInfo.memoryTypeBits,
13356  createInfo.pool,
13357  allocation,
13358  userDataStr.GetString());
13359  Flush();
13360 }
13361 
13362 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13363  VmaAllocation allocation)
13364 {
13365  CallParams callParams;
13366  GetBasicParams(callParams);
13367 
13368  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13369  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13370  allocation);
13371  Flush();
13372 }
13373 
13374 void VmaRecorder::RecordResizeAllocation(
13375  uint32_t frameIndex,
13376  VmaAllocation allocation,
13377  VkDeviceSize newSize)
13378 {
13379  CallParams callParams;
13380  GetBasicParams(callParams);
13381 
13382  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13383  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
13384  allocation, newSize);
13385  Flush();
13386 }
13387 
13388 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13389  VmaAllocation allocation,
13390  const void* pUserData)
13391 {
13392  CallParams callParams;
13393  GetBasicParams(callParams);
13394 
13395  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13396  UserDataString userDataStr(
13397  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
13398  pUserData);
13399  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13400  allocation,
13401  userDataStr.GetString());
13402  Flush();
13403 }
13404 
13405 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13406  VmaAllocation allocation)
13407 {
13408  CallParams callParams;
13409  GetBasicParams(callParams);
13410 
13411  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13412  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13413  allocation);
13414  Flush();
13415 }
13416 
13417 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13418  VmaAllocation allocation)
13419 {
13420  CallParams callParams;
13421  GetBasicParams(callParams);
13422 
13423  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13424  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13425  allocation);
13426  Flush();
13427 }
13428 
13429 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13430  VmaAllocation allocation)
13431 {
13432  CallParams callParams;
13433  GetBasicParams(callParams);
13434 
13435  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13436  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13437  allocation);
13438  Flush();
13439 }
13440 
13441 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13442  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13443 {
13444  CallParams callParams;
13445  GetBasicParams(callParams);
13446 
13447  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13448  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13449  allocation,
13450  offset,
13451  size);
13452  Flush();
13453 }
13454 
13455 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13456  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13457 {
13458  CallParams callParams;
13459  GetBasicParams(callParams);
13460 
13461  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13462  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13463  allocation,
13464  offset,
13465  size);
13466  Flush();
13467 }
13468 
13469 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13470  const VkBufferCreateInfo& bufCreateInfo,
13471  const VmaAllocationCreateInfo& allocCreateInfo,
13472  VmaAllocation allocation)
13473 {
13474  CallParams callParams;
13475  GetBasicParams(callParams);
13476 
13477  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13478  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13479  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13480  bufCreateInfo.flags,
13481  bufCreateInfo.size,
13482  bufCreateInfo.usage,
13483  bufCreateInfo.sharingMode,
13484  allocCreateInfo.flags,
13485  allocCreateInfo.usage,
13486  allocCreateInfo.requiredFlags,
13487  allocCreateInfo.preferredFlags,
13488  allocCreateInfo.memoryTypeBits,
13489  allocCreateInfo.pool,
13490  allocation,
13491  userDataStr.GetString());
13492  Flush();
13493 }
13494 
13495 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13496  const VkImageCreateInfo& imageCreateInfo,
13497  const VmaAllocationCreateInfo& allocCreateInfo,
13498  VmaAllocation allocation)
13499 {
13500  CallParams callParams;
13501  GetBasicParams(callParams);
13502 
13503  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13504  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13505  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13506  imageCreateInfo.flags,
13507  imageCreateInfo.imageType,
13508  imageCreateInfo.format,
13509  imageCreateInfo.extent.width,
13510  imageCreateInfo.extent.height,
13511  imageCreateInfo.extent.depth,
13512  imageCreateInfo.mipLevels,
13513  imageCreateInfo.arrayLayers,
13514  imageCreateInfo.samples,
13515  imageCreateInfo.tiling,
13516  imageCreateInfo.usage,
13517  imageCreateInfo.sharingMode,
13518  imageCreateInfo.initialLayout,
13519  allocCreateInfo.flags,
13520  allocCreateInfo.usage,
13521  allocCreateInfo.requiredFlags,
13522  allocCreateInfo.preferredFlags,
13523  allocCreateInfo.memoryTypeBits,
13524  allocCreateInfo.pool,
13525  allocation,
13526  userDataStr.GetString());
13527  Flush();
13528 }
13529 
13530 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13531  VmaAllocation allocation)
13532 {
13533  CallParams callParams;
13534  GetBasicParams(callParams);
13535 
13536  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13537  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13538  allocation);
13539  Flush();
13540 }
13541 
13542 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
13543  VmaAllocation allocation)
13544 {
13545  CallParams callParams;
13546  GetBasicParams(callParams);
13547 
13548  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13549  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
13550  allocation);
13551  Flush();
13552 }
13553 
13554 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
13555  VmaAllocation allocation)
13556 {
13557  CallParams callParams;
13558  GetBasicParams(callParams);
13559 
13560  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13561  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13562  allocation);
13563  Flush();
13564 }
13565 
13566 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
13567  VmaAllocation allocation)
13568 {
13569  CallParams callParams;
13570  GetBasicParams(callParams);
13571 
13572  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13573  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
13574  allocation);
13575  Flush();
13576 }
13577 
13578 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
13579  VmaPool pool)
13580 {
13581  CallParams callParams;
13582  GetBasicParams(callParams);
13583 
13584  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13585  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
13586  pool);
13587  Flush();
13588 }
13589 
13590 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
13591 {
13592  if(pUserData != VMA_NULL)
13593  {
13594  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
13595  {
13596  m_Str = (const char*)pUserData;
13597  }
13598  else
13599  {
13600  sprintf_s(m_PtrStr, "%p", pUserData);
13601  m_Str = m_PtrStr;
13602  }
13603  }
13604  else
13605  {
13606  m_Str = "";
13607  }
13608 }
13609 
13610 void VmaRecorder::WriteConfiguration(
13611  const VkPhysicalDeviceProperties& devProps,
13612  const VkPhysicalDeviceMemoryProperties& memProps,
13613  bool dedicatedAllocationExtensionEnabled)
13614 {
13615  fprintf(m_File, "Config,Begin\n");
13616 
13617  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
13618  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
13619  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
13620  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
13621  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
13622  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
13623 
13624  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
13625  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
13626  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
13627 
13628  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
13629  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
13630  {
13631  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
13632  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
13633  }
13634  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
13635  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
13636  {
13637  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
13638  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
13639  }
13640 
13641  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
13642 
13643  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
13644  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
13645  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
13646  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
13647  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
13648  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
13649  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
13650  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
13651  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
13652 
13653  fprintf(m_File, "Config,End\n");
13654 }
13655 
13656 void VmaRecorder::GetBasicParams(CallParams& outParams)
13657 {
13658  outParams.threadId = GetCurrentThreadId();
13659 
13660  LARGE_INTEGER counter;
13661  QueryPerformanceCounter(&counter);
13662  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
13663 }
13664 
13665 void VmaRecorder::Flush()
13666 {
13667  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
13668  {
13669  fflush(m_File);
13670  }
13671 }
13672 
13673 #endif // #if VMA_RECORDING_ENABLED
13674 
13676 // VmaAllocator_T
13677 
13678 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
13679  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
13680  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
13681  m_hDevice(pCreateInfo->device),
13682  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
13683  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
13684  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
13685  m_PreferredLargeHeapBlockSize(0),
13686  m_PhysicalDevice(pCreateInfo->physicalDevice),
13687  m_CurrentFrameIndex(0),
13688  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
13689  m_NextPoolId(0)
13691  ,m_pRecorder(VMA_NULL)
13692 #endif
13693 {
13694  if(VMA_DEBUG_DETECT_CORRUPTION)
13695  {
13696  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
13697  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
13698  }
13699 
13700  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
13701 
13702 #if !(VMA_DEDICATED_ALLOCATION)
13704  {
13705  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
13706  }
13707 #endif
13708 
13709  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
13710  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
13711  memset(&m_MemProps, 0, sizeof(m_MemProps));
13712 
13713  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
13714  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
13715 
13716  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
13717  {
13718  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
13719  }
13720 
13721  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
13722  {
13723  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
13724  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
13725  }
13726 
13727  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
13728 
13729  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
13730  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
13731 
13732  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
13733  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
13734  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
13735  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
13736 
13737  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
13738  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
13739 
13740  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
13741  {
13742  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
13743  {
13744  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
13745  if(limit != VK_WHOLE_SIZE)
13746  {
13747  m_HeapSizeLimit[heapIndex] = limit;
13748  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
13749  {
13750  m_MemProps.memoryHeaps[heapIndex].size = limit;
13751  }
13752  }
13753  }
13754  }
13755 
13756  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13757  {
13758  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
13759 
13760  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
13761  this,
13762  memTypeIndex,
13763  preferredBlockSize,
13764  0,
13765  SIZE_MAX,
13766  GetBufferImageGranularity(),
13767  pCreateInfo->frameInUseCount,
13768  false, // isCustomPool
13769  false, // explicitBlockSize
13770  false); // linearAlgorithm
13771  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
13772  // becase minBlockCount is 0.
13773  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
13774 
13775  }
13776 }
13777 
13778 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
13779 {
13780  VkResult res = VK_SUCCESS;
13781 
13782  if(pCreateInfo->pRecordSettings != VMA_NULL &&
13783  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
13784  {
13785 #if VMA_RECORDING_ENABLED
13786  m_pRecorder = vma_new(this, VmaRecorder)();
13787  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
13788  if(res != VK_SUCCESS)
13789  {
13790  return res;
13791  }
13792  m_pRecorder->WriteConfiguration(
13793  m_PhysicalDeviceProperties,
13794  m_MemProps,
13795  m_UseKhrDedicatedAllocation);
13796  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
13797 #else
13798  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
13799  return VK_ERROR_FEATURE_NOT_PRESENT;
13800 #endif
13801  }
13802 
13803  return res;
13804 }
13805 
13806 VmaAllocator_T::~VmaAllocator_T()
13807 {
13808 #if VMA_RECORDING_ENABLED
13809  if(m_pRecorder != VMA_NULL)
13810  {
13811  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
13812  vma_delete(this, m_pRecorder);
13813  }
13814 #endif
13815 
13816  VMA_ASSERT(m_Pools.empty());
13817 
13818  for(size_t i = GetMemoryTypeCount(); i--; )
13819  {
13820  vma_delete(this, m_pDedicatedAllocations[i]);
13821  vma_delete(this, m_pBlockVectors[i]);
13822  }
13823 }
13824 
13825 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
13826 {
13827 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
13828  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
13829  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
13830  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
13831  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
13832  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
13833  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
13834  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
13835  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
13836  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
13837  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
13838  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
13839  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
13840  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
13841  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
13842  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
13843  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
13844  m_VulkanFunctions.vkCmdCopyBuffer = &vkCmdCopyBuffer;
13845 #if VMA_DEDICATED_ALLOCATION
13846  if(m_UseKhrDedicatedAllocation)
13847  {
13848  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
13849  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
13850  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
13851  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
13852  }
13853 #endif // #if VMA_DEDICATED_ALLOCATION
13854 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
13855 
13856 #define VMA_COPY_IF_NOT_NULL(funcName) \
13857  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
13858 
13859  if(pVulkanFunctions != VMA_NULL)
13860  {
13861  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
13862  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
13863  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
13864  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
13865  VMA_COPY_IF_NOT_NULL(vkMapMemory);
13866  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
13867  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
13868  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
13869  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
13870  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
13871  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
13872  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
13873  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
13874  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
13875  VMA_COPY_IF_NOT_NULL(vkCreateImage);
13876  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
13877  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
13878 #if VMA_DEDICATED_ALLOCATION
13879  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
13880  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
13881 #endif
13882  }
13883 
13884 #undef VMA_COPY_IF_NOT_NULL
13885 
13886  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
13887  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
13888  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
13889  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
13890  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
13891  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
13892  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
13893  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
13894  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
13895  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
13896  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
13897  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
13898  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
13899  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
13900  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
13901  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
13902  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
13903  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
13904  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
13905 #if VMA_DEDICATED_ALLOCATION
13906  if(m_UseKhrDedicatedAllocation)
13907  {
13908  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
13909  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
13910  }
13911 #endif
13912 }
13913 
13914 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
13915 {
13916  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
13917  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
13918  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
13919  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
13920 }
13921 
13922 VkResult VmaAllocator_T::AllocateMemoryOfType(
13923  VkDeviceSize size,
13924  VkDeviceSize alignment,
13925  bool dedicatedAllocation,
13926  VkBuffer dedicatedBuffer,
13927  VkImage dedicatedImage,
13928  const VmaAllocationCreateInfo& createInfo,
13929  uint32_t memTypeIndex,
13930  VmaSuballocationType suballocType,
13931  VmaAllocation* pAllocation)
13932 {
13933  VMA_ASSERT(pAllocation != VMA_NULL);
13934  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
13935 
13936  VmaAllocationCreateInfo finalCreateInfo = createInfo;
13937 
13938  // If memory type is not HOST_VISIBLE, disable MAPPED.
13939  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
13940  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13941  {
13942  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
13943  }
13944 
13945  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
13946  VMA_ASSERT(blockVector);
13947 
13948  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
13949  bool preferDedicatedMemory =
13950  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
13951  dedicatedAllocation ||
13952  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
13953  size > preferredBlockSize / 2;
13954 
13955  if(preferDedicatedMemory &&
13956  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
13957  finalCreateInfo.pool == VK_NULL_HANDLE)
13958  {
13960  }
13961 
13962  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
13963  {
13964  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
13965  {
13966  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13967  }
13968  else
13969  {
13970  return AllocateDedicatedMemory(
13971  size,
13972  suballocType,
13973  memTypeIndex,
13974  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
13975  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
13976  finalCreateInfo.pUserData,
13977  dedicatedBuffer,
13978  dedicatedImage,
13979  pAllocation);
13980  }
13981  }
13982  else
13983  {
13984  VkResult res = blockVector->Allocate(
13985  VK_NULL_HANDLE, // hCurrentPool
13986  m_CurrentFrameIndex.load(),
13987  size,
13988  alignment,
13989  finalCreateInfo,
13990  suballocType,
13991  pAllocation);
13992  if(res == VK_SUCCESS)
13993  {
13994  return res;
13995  }
13996 
13997  // 5. Try dedicated memory.
13998  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
13999  {
14000  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14001  }
14002  else
14003  {
14004  res = AllocateDedicatedMemory(
14005  size,
14006  suballocType,
14007  memTypeIndex,
14008  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14009  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14010  finalCreateInfo.pUserData,
14011  dedicatedBuffer,
14012  dedicatedImage,
14013  pAllocation);
14014  if(res == VK_SUCCESS)
14015  {
14016  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14017  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14018  return VK_SUCCESS;
14019  }
14020  else
14021  {
14022  // Everything failed: Return error code.
14023  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14024  return res;
14025  }
14026  }
14027  }
14028 }
14029 
14030 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14031  VkDeviceSize size,
14032  VmaSuballocationType suballocType,
14033  uint32_t memTypeIndex,
14034  bool map,
14035  bool isUserDataString,
14036  void* pUserData,
14037  VkBuffer dedicatedBuffer,
14038  VkImage dedicatedImage,
14039  VmaAllocation* pAllocation)
14040 {
14041  VMA_ASSERT(pAllocation);
14042 
14043  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14044  allocInfo.memoryTypeIndex = memTypeIndex;
14045  allocInfo.allocationSize = size;
14046 
14047 #if VMA_DEDICATED_ALLOCATION
14048  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14049  if(m_UseKhrDedicatedAllocation)
14050  {
14051  if(dedicatedBuffer != VK_NULL_HANDLE)
14052  {
14053  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14054  dedicatedAllocInfo.buffer = dedicatedBuffer;
14055  allocInfo.pNext = &dedicatedAllocInfo;
14056  }
14057  else if(dedicatedImage != VK_NULL_HANDLE)
14058  {
14059  dedicatedAllocInfo.image = dedicatedImage;
14060  allocInfo.pNext = &dedicatedAllocInfo;
14061  }
14062  }
14063 #endif // #if VMA_DEDICATED_ALLOCATION
14064 
14065  // Allocate VkDeviceMemory.
14066  VkDeviceMemory hMemory = VK_NULL_HANDLE;
14067  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14068  if(res < 0)
14069  {
14070  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14071  return res;
14072  }
14073 
14074  void* pMappedData = VMA_NULL;
14075  if(map)
14076  {
14077  res = (*m_VulkanFunctions.vkMapMemory)(
14078  m_hDevice,
14079  hMemory,
14080  0,
14081  VK_WHOLE_SIZE,
14082  0,
14083  &pMappedData);
14084  if(res < 0)
14085  {
14086  VMA_DEBUG_LOG(" vkMapMemory FAILED");
14087  FreeVulkanMemory(memTypeIndex, size, hMemory);
14088  return res;
14089  }
14090  }
14091 
14092  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
14093  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14094  (*pAllocation)->SetUserData(this, pUserData);
14095  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14096  {
14097  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14098  }
14099 
14100  // Register it in m_pDedicatedAllocations.
14101  {
14102  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14103  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14104  VMA_ASSERT(pDedicatedAllocations);
14105  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
14106  }
14107 
14108  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
14109 
14110  return VK_SUCCESS;
14111 }
14112 
14113 void VmaAllocator_T::GetBufferMemoryRequirements(
14114  VkBuffer hBuffer,
14115  VkMemoryRequirements& memReq,
14116  bool& requiresDedicatedAllocation,
14117  bool& prefersDedicatedAllocation) const
14118 {
14119 #if VMA_DEDICATED_ALLOCATION
14120  if(m_UseKhrDedicatedAllocation)
14121  {
14122  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14123  memReqInfo.buffer = hBuffer;
14124 
14125  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14126 
14127  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14128  memReq2.pNext = &memDedicatedReq;
14129 
14130  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14131 
14132  memReq = memReq2.memoryRequirements;
14133  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14134  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14135  }
14136  else
14137 #endif // #if VMA_DEDICATED_ALLOCATION
14138  {
14139  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14140  requiresDedicatedAllocation = false;
14141  prefersDedicatedAllocation = false;
14142  }
14143 }
14144 
14145 void VmaAllocator_T::GetImageMemoryRequirements(
14146  VkImage hImage,
14147  VkMemoryRequirements& memReq,
14148  bool& requiresDedicatedAllocation,
14149  bool& prefersDedicatedAllocation) const
14150 {
14151 #if VMA_DEDICATED_ALLOCATION
14152  if(m_UseKhrDedicatedAllocation)
14153  {
14154  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14155  memReqInfo.image = hImage;
14156 
14157  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14158 
14159  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14160  memReq2.pNext = &memDedicatedReq;
14161 
14162  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14163 
14164  memReq = memReq2.memoryRequirements;
14165  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14166  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14167  }
14168  else
14169 #endif // #if VMA_DEDICATED_ALLOCATION
14170  {
14171  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14172  requiresDedicatedAllocation = false;
14173  prefersDedicatedAllocation = false;
14174  }
14175 }
14176 
14177 VkResult VmaAllocator_T::AllocateMemory(
14178  const VkMemoryRequirements& vkMemReq,
14179  bool requiresDedicatedAllocation,
14180  bool prefersDedicatedAllocation,
14181  VkBuffer dedicatedBuffer,
14182  VkImage dedicatedImage,
14183  const VmaAllocationCreateInfo& createInfo,
14184  VmaSuballocationType suballocType,
14185  VmaAllocation* pAllocation)
14186 {
14187  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14188 
14189  if(vkMemReq.size == 0)
14190  {
14191  return VK_ERROR_VALIDATION_FAILED_EXT;
14192  }
14193  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14194  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14195  {
14196  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14197  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14198  }
14199  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14201  {
14202  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14203  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14204  }
14205  if(requiresDedicatedAllocation)
14206  {
14207  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14208  {
14209  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14210  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14211  }
14212  if(createInfo.pool != VK_NULL_HANDLE)
14213  {
14214  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
14215  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14216  }
14217  }
14218  if((createInfo.pool != VK_NULL_HANDLE) &&
14219  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
14220  {
14221  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14222  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14223  }
14224 
14225  if(createInfo.pool != VK_NULL_HANDLE)
14226  {
14227  const VkDeviceSize alignmentForPool = VMA_MAX(
14228  vkMemReq.alignment,
14229  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
14230  return createInfo.pool->m_BlockVector.Allocate(
14231  createInfo.pool,
14232  m_CurrentFrameIndex.load(),
14233  vkMemReq.size,
14234  alignmentForPool,
14235  createInfo,
14236  suballocType,
14237  pAllocation);
14238  }
14239  else
14240  {
14241  // Bit mask of memory Vulkan types acceptable for this allocation.
14242  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14243  uint32_t memTypeIndex = UINT32_MAX;
14244  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14245  if(res == VK_SUCCESS)
14246  {
14247  VkDeviceSize alignmentForMemType = VMA_MAX(
14248  vkMemReq.alignment,
14249  GetMemoryTypeMinAlignment(memTypeIndex));
14250 
14251  res = AllocateMemoryOfType(
14252  vkMemReq.size,
14253  alignmentForMemType,
14254  requiresDedicatedAllocation || prefersDedicatedAllocation,
14255  dedicatedBuffer,
14256  dedicatedImage,
14257  createInfo,
14258  memTypeIndex,
14259  suballocType,
14260  pAllocation);
14261  // Succeeded on first try.
14262  if(res == VK_SUCCESS)
14263  {
14264  return res;
14265  }
14266  // Allocation from this memory type failed. Try other compatible memory types.
14267  else
14268  {
14269  for(;;)
14270  {
14271  // Remove old memTypeIndex from list of possibilities.
14272  memoryTypeBits &= ~(1u << memTypeIndex);
14273  // Find alternative memTypeIndex.
14274  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14275  if(res == VK_SUCCESS)
14276  {
14277  alignmentForMemType = VMA_MAX(
14278  vkMemReq.alignment,
14279  GetMemoryTypeMinAlignment(memTypeIndex));
14280 
14281  res = AllocateMemoryOfType(
14282  vkMemReq.size,
14283  alignmentForMemType,
14284  requiresDedicatedAllocation || prefersDedicatedAllocation,
14285  dedicatedBuffer,
14286  dedicatedImage,
14287  createInfo,
14288  memTypeIndex,
14289  suballocType,
14290  pAllocation);
14291  // Allocation from this alternative memory type succeeded.
14292  if(res == VK_SUCCESS)
14293  {
14294  return res;
14295  }
14296  // else: Allocation from this memory type failed. Try next one - next loop iteration.
14297  }
14298  // No other matching memory type index could be found.
14299  else
14300  {
14301  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
14302  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14303  }
14304  }
14305  }
14306  }
14307  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
14308  else
14309  return res;
14310  }
14311 }
14312 
14313 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
14314 {
14315  VMA_ASSERT(allocation);
14316 
14317  if(TouchAllocation(allocation))
14318  {
14319  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14320  {
14321  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14322  }
14323 
14324  switch(allocation->GetType())
14325  {
14326  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14327  {
14328  VmaBlockVector* pBlockVector = VMA_NULL;
14329  VmaPool hPool = allocation->GetPool();
14330  if(hPool != VK_NULL_HANDLE)
14331  {
14332  pBlockVector = &hPool->m_BlockVector;
14333  }
14334  else
14335  {
14336  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14337  pBlockVector = m_pBlockVectors[memTypeIndex];
14338  }
14339  pBlockVector->Free(allocation);
14340  }
14341  break;
14342  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14343  FreeDedicatedMemory(allocation);
14344  break;
14345  default:
14346  VMA_ASSERT(0);
14347  }
14348  }
14349 
14350  allocation->SetUserData(this, VMA_NULL);
14351  vma_delete(this, allocation);
14352 }
14353 
14354 VkResult VmaAllocator_T::ResizeAllocation(
14355  const VmaAllocation alloc,
14356  VkDeviceSize newSize)
14357 {
14358  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14359  {
14360  return VK_ERROR_VALIDATION_FAILED_EXT;
14361  }
14362  if(newSize == alloc->GetSize())
14363  {
14364  return VK_SUCCESS;
14365  }
14366 
14367  switch(alloc->GetType())
14368  {
14369  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14370  return VK_ERROR_FEATURE_NOT_PRESENT;
14371  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14372  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
14373  {
14374  alloc->ChangeSize(newSize);
14375  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
14376  return VK_SUCCESS;
14377  }
14378  else
14379  {
14380  return VK_ERROR_OUT_OF_POOL_MEMORY;
14381  }
14382  default:
14383  VMA_ASSERT(0);
14384  return VK_ERROR_VALIDATION_FAILED_EXT;
14385  }
14386 }
14387 
14388 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
14389 {
14390  // Initialize.
14391  InitStatInfo(pStats->total);
14392  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
14393  InitStatInfo(pStats->memoryType[i]);
14394  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14395  InitStatInfo(pStats->memoryHeap[i]);
14396 
14397  // Process default pools.
14398  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14399  {
14400  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14401  VMA_ASSERT(pBlockVector);
14402  pBlockVector->AddStats(pStats);
14403  }
14404 
14405  // Process custom pools.
14406  {
14407  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14408  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14409  {
14410  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
14411  }
14412  }
14413 
14414  // Process dedicated allocations.
14415  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14416  {
14417  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14418  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14419  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
14420  VMA_ASSERT(pDedicatedAllocVector);
14421  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
14422  {
14423  VmaStatInfo allocationStatInfo;
14424  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
14425  VmaAddStatInfo(pStats->total, allocationStatInfo);
14426  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
14427  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
14428  }
14429  }
14430 
14431  // Postprocess.
14432  VmaPostprocessCalcStatInfo(pStats->total);
14433  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
14434  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
14435  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
14436  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
14437 }
14438 
14439 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
14440 
14441 VkResult VmaAllocator_T::DefragmentationBegin(
14442  const VmaDefragmentationInfo2& info,
14443  VmaDefragmentationStats* pStats,
14444  VmaDefragmentationContext* pContext)
14445 {
14446  if(info.pAllocationsChanged != VMA_NULL)
14447  {
14448  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
14449  }
14450 
14451  *pContext = vma_new(this, VmaDefragmentationContext_T)(
14452  this, m_CurrentFrameIndex.load(), info.flags, pStats);
14453 
14454  (*pContext)->AddPools(info.poolCount, info.pPools);
14455  (*pContext)->AddAllocations(
14457 
14458  VkResult res = (*pContext)->Defragment(
14461  info.commandBuffer, pStats);
14462 
14463  if(res != VK_NOT_READY)
14464  {
14465  vma_delete(this, *pContext);
14466  *pContext = VMA_NULL;
14467  }
14468 
14469  return res;
14470 }
14471 
14472 VkResult VmaAllocator_T::DefragmentationEnd(
14473  VmaDefragmentationContext context)
14474 {
14475  vma_delete(this, context);
14476  return VK_SUCCESS;
14477 }
14478 
14479 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
14480 {
14481  if(hAllocation->CanBecomeLost())
14482  {
14483  /*
14484  Warning: This is a carefully designed algorithm.
14485  Do not modify unless you really know what you're doing :)
14486  */
14487  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14488  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14489  for(;;)
14490  {
14491  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14492  {
14493  pAllocationInfo->memoryType = UINT32_MAX;
14494  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
14495  pAllocationInfo->offset = 0;
14496  pAllocationInfo->size = hAllocation->GetSize();
14497  pAllocationInfo->pMappedData = VMA_NULL;
14498  pAllocationInfo->pUserData = hAllocation->GetUserData();
14499  return;
14500  }
14501  else if(localLastUseFrameIndex == localCurrFrameIndex)
14502  {
14503  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
14504  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
14505  pAllocationInfo->offset = hAllocation->GetOffset();
14506  pAllocationInfo->size = hAllocation->GetSize();
14507  pAllocationInfo->pMappedData = VMA_NULL;
14508  pAllocationInfo->pUserData = hAllocation->GetUserData();
14509  return;
14510  }
14511  else // Last use time earlier than current time.
14512  {
14513  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14514  {
14515  localLastUseFrameIndex = localCurrFrameIndex;
14516  }
14517  }
14518  }
14519  }
14520  else
14521  {
14522 #if VMA_STATS_STRING_ENABLED
14523  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14524  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14525  for(;;)
14526  {
14527  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14528  if(localLastUseFrameIndex == localCurrFrameIndex)
14529  {
14530  break;
14531  }
14532  else // Last use time earlier than current time.
14533  {
14534  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14535  {
14536  localLastUseFrameIndex = localCurrFrameIndex;
14537  }
14538  }
14539  }
14540 #endif
14541 
14542  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
14543  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
14544  pAllocationInfo->offset = hAllocation->GetOffset();
14545  pAllocationInfo->size = hAllocation->GetSize();
14546  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
14547  pAllocationInfo->pUserData = hAllocation->GetUserData();
14548  }
14549 }
14550 
14551 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
14552 {
14553  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
14554  if(hAllocation->CanBecomeLost())
14555  {
14556  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14557  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14558  for(;;)
14559  {
14560  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14561  {
14562  return false;
14563  }
14564  else if(localLastUseFrameIndex == localCurrFrameIndex)
14565  {
14566  return true;
14567  }
14568  else // Last use time earlier than current time.
14569  {
14570  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14571  {
14572  localLastUseFrameIndex = localCurrFrameIndex;
14573  }
14574  }
14575  }
14576  }
14577  else
14578  {
14579 #if VMA_STATS_STRING_ENABLED
14580  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14581  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14582  for(;;)
14583  {
14584  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14585  if(localLastUseFrameIndex == localCurrFrameIndex)
14586  {
14587  break;
14588  }
14589  else // Last use time earlier than current time.
14590  {
14591  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14592  {
14593  localLastUseFrameIndex = localCurrFrameIndex;
14594  }
14595  }
14596  }
14597 #endif
14598 
14599  return true;
14600  }
14601 }
14602 
14603 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
14604 {
14605  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
14606 
14607  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
14608 
14609  if(newCreateInfo.maxBlockCount == 0)
14610  {
14611  newCreateInfo.maxBlockCount = SIZE_MAX;
14612  }
14613  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
14614  {
14615  return VK_ERROR_INITIALIZATION_FAILED;
14616  }
14617 
14618  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
14619 
14620  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
14621 
14622  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
14623  if(res != VK_SUCCESS)
14624  {
14625  vma_delete(this, *pPool);
14626  *pPool = VMA_NULL;
14627  return res;
14628  }
14629 
14630  // Add to m_Pools.
14631  {
14632  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
14633  (*pPool)->SetId(m_NextPoolId++);
14634  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
14635  }
14636 
14637  return VK_SUCCESS;
14638 }
14639 
14640 void VmaAllocator_T::DestroyPool(VmaPool pool)
14641 {
14642  // Remove from m_Pools.
14643  {
14644  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
14645  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
14646  VMA_ASSERT(success && "Pool not found in Allocator.");
14647  }
14648 
14649  vma_delete(this, pool);
14650 }
14651 
14652 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
14653 {
14654  pool->m_BlockVector.GetPoolStats(pPoolStats);
14655 }
14656 
14657 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
14658 {
14659  m_CurrentFrameIndex.store(frameIndex);
14660 }
14661 
14662 void VmaAllocator_T::MakePoolAllocationsLost(
14663  VmaPool hPool,
14664  size_t* pLostAllocationCount)
14665 {
14666  hPool->m_BlockVector.MakePoolAllocationsLost(
14667  m_CurrentFrameIndex.load(),
14668  pLostAllocationCount);
14669 }
14670 
14671 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
14672 {
14673  return hPool->m_BlockVector.CheckCorruption();
14674 }
14675 
14676 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
14677 {
14678  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
14679 
14680  // Process default pools.
14681  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14682  {
14683  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
14684  {
14685  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14686  VMA_ASSERT(pBlockVector);
14687  VkResult localRes = pBlockVector->CheckCorruption();
14688  switch(localRes)
14689  {
14690  case VK_ERROR_FEATURE_NOT_PRESENT:
14691  break;
14692  case VK_SUCCESS:
14693  finalRes = VK_SUCCESS;
14694  break;
14695  default:
14696  return localRes;
14697  }
14698  }
14699  }
14700 
14701  // Process custom pools.
14702  {
14703  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14704  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14705  {
14706  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
14707  {
14708  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
14709  switch(localRes)
14710  {
14711  case VK_ERROR_FEATURE_NOT_PRESENT:
14712  break;
14713  case VK_SUCCESS:
14714  finalRes = VK_SUCCESS;
14715  break;
14716  default:
14717  return localRes;
14718  }
14719  }
14720  }
14721  }
14722 
14723  return finalRes;
14724 }
14725 
14726 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
14727 {
14728  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
14729  (*pAllocation)->InitLost();
14730 }
14731 
14732 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
14733 {
14734  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
14735 
14736  VkResult res;
14737  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
14738  {
14739  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
14740  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
14741  {
14742  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
14743  if(res == VK_SUCCESS)
14744  {
14745  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
14746  }
14747  }
14748  else
14749  {
14750  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
14751  }
14752  }
14753  else
14754  {
14755  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
14756  }
14757 
14758  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
14759  {
14760  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
14761  }
14762 
14763  return res;
14764 }
14765 
14766 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
14767 {
14768  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
14769  {
14770  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
14771  }
14772 
14773  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
14774 
14775  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
14776  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
14777  {
14778  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
14779  m_HeapSizeLimit[heapIndex] += size;
14780  }
14781 }
14782 
14783 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
14784 {
14785  if(hAllocation->CanBecomeLost())
14786  {
14787  return VK_ERROR_MEMORY_MAP_FAILED;
14788  }
14789 
14790  switch(hAllocation->GetType())
14791  {
14792  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14793  {
14794  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
14795  char *pBytes = VMA_NULL;
14796  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
14797  if(res == VK_SUCCESS)
14798  {
14799  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
14800  hAllocation->BlockAllocMap();
14801  }
14802  return res;
14803  }
14804  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14805  return hAllocation->DedicatedAllocMap(this, ppData);
14806  default:
14807  VMA_ASSERT(0);
14808  return VK_ERROR_MEMORY_MAP_FAILED;
14809  }
14810 }
14811 
14812 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
14813 {
14814  switch(hAllocation->GetType())
14815  {
14816  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14817  {
14818  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
14819  hAllocation->BlockAllocUnmap();
14820  pBlock->Unmap(this, 1);
14821  }
14822  break;
14823  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14824  hAllocation->DedicatedAllocUnmap(this);
14825  break;
14826  default:
14827  VMA_ASSERT(0);
14828  }
14829 }
14830 
14831 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
14832 {
14833  VkResult res = VK_SUCCESS;
14834  switch(hAllocation->GetType())
14835  {
14836  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14837  res = GetVulkanFunctions().vkBindBufferMemory(
14838  m_hDevice,
14839  hBuffer,
14840  hAllocation->GetMemory(),
14841  0); //memoryOffset
14842  break;
14843  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14844  {
14845  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
14846  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
14847  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
14848  break;
14849  }
14850  default:
14851  VMA_ASSERT(0);
14852  }
14853  return res;
14854 }
14855 
14856 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
14857 {
14858  VkResult res = VK_SUCCESS;
14859  switch(hAllocation->GetType())
14860  {
14861  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14862  res = GetVulkanFunctions().vkBindImageMemory(
14863  m_hDevice,
14864  hImage,
14865  hAllocation->GetMemory(),
14866  0); //memoryOffset
14867  break;
14868  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14869  {
14870  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
14871  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
14872  res = pBlock->BindImageMemory(this, hAllocation, hImage);
14873  break;
14874  }
14875  default:
14876  VMA_ASSERT(0);
14877  }
14878  return res;
14879 }
14880 
14881 void VmaAllocator_T::FlushOrInvalidateAllocation(
14882  VmaAllocation hAllocation,
14883  VkDeviceSize offset, VkDeviceSize size,
14884  VMA_CACHE_OPERATION op)
14885 {
14886  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
14887  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
14888  {
14889  const VkDeviceSize allocationSize = hAllocation->GetSize();
14890  VMA_ASSERT(offset <= allocationSize);
14891 
14892  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
14893 
14894  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
14895  memRange.memory = hAllocation->GetMemory();
14896 
14897  switch(hAllocation->GetType())
14898  {
14899  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14900  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
14901  if(size == VK_WHOLE_SIZE)
14902  {
14903  memRange.size = allocationSize - memRange.offset;
14904  }
14905  else
14906  {
14907  VMA_ASSERT(offset + size <= allocationSize);
14908  memRange.size = VMA_MIN(
14909  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
14910  allocationSize - memRange.offset);
14911  }
14912  break;
14913 
14914  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14915  {
14916  // 1. Still within this allocation.
14917  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
14918  if(size == VK_WHOLE_SIZE)
14919  {
14920  size = allocationSize - offset;
14921  }
14922  else
14923  {
14924  VMA_ASSERT(offset + size <= allocationSize);
14925  }
14926  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
14927 
14928  // 2. Adjust to whole block.
14929  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
14930  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
14931  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
14932  memRange.offset += allocationOffset;
14933  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
14934 
14935  break;
14936  }
14937 
14938  default:
14939  VMA_ASSERT(0);
14940  }
14941 
14942  switch(op)
14943  {
14944  case VMA_CACHE_FLUSH:
14945  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
14946  break;
14947  case VMA_CACHE_INVALIDATE:
14948  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
14949  break;
14950  default:
14951  VMA_ASSERT(0);
14952  }
14953  }
14954  // else: Just ignore this call.
14955 }
14956 
14957 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
14958 {
14959  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
14960 
14961  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14962  {
14963  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14964  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14965  VMA_ASSERT(pDedicatedAllocations);
14966  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
14967  VMA_ASSERT(success);
14968  }
14969 
14970  VkDeviceMemory hMemory = allocation->GetMemory();
14971 
14972  /*
14973  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14974  before vkFreeMemory.
14975 
14976  if(allocation->GetMappedData() != VMA_NULL)
14977  {
14978  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14979  }
14980  */
14981 
14982  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
14983 
14984  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
14985 }
14986 
14987 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
14988 {
14989  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
14990  !hAllocation->CanBecomeLost() &&
14991  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
14992  {
14993  void* pData = VMA_NULL;
14994  VkResult res = Map(hAllocation, &pData);
14995  if(res == VK_SUCCESS)
14996  {
14997  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
14998  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
14999  Unmap(hAllocation);
15000  }
15001  else
15002  {
15003  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15004  }
15005  }
15006 }
15007 
15008 #if VMA_STATS_STRING_ENABLED
15009 
15010 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15011 {
15012  bool dedicatedAllocationsStarted = false;
15013  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15014  {
15015  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15016  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15017  VMA_ASSERT(pDedicatedAllocVector);
15018  if(pDedicatedAllocVector->empty() == false)
15019  {
15020  if(dedicatedAllocationsStarted == false)
15021  {
15022  dedicatedAllocationsStarted = true;
15023  json.WriteString("DedicatedAllocations");
15024  json.BeginObject();
15025  }
15026 
15027  json.BeginString("Type ");
15028  json.ContinueString(memTypeIndex);
15029  json.EndString();
15030 
15031  json.BeginArray();
15032 
15033  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15034  {
15035  json.BeginObject(true);
15036  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
15037  hAlloc->PrintParameters(json);
15038  json.EndObject();
15039  }
15040 
15041  json.EndArray();
15042  }
15043  }
15044  if(dedicatedAllocationsStarted)
15045  {
15046  json.EndObject();
15047  }
15048 
15049  {
15050  bool allocationsStarted = false;
15051  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15052  {
15053  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
15054  {
15055  if(allocationsStarted == false)
15056  {
15057  allocationsStarted = true;
15058  json.WriteString("DefaultPools");
15059  json.BeginObject();
15060  }
15061 
15062  json.BeginString("Type ");
15063  json.ContinueString(memTypeIndex);
15064  json.EndString();
15065 
15066  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15067  }
15068  }
15069  if(allocationsStarted)
15070  {
15071  json.EndObject();
15072  }
15073  }
15074 
15075  // Custom pools
15076  {
15077  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15078  const size_t poolCount = m_Pools.size();
15079  if(poolCount > 0)
15080  {
15081  json.WriteString("Pools");
15082  json.BeginObject();
15083  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15084  {
15085  json.BeginString();
15086  json.ContinueString(m_Pools[poolIndex]->GetId());
15087  json.EndString();
15088 
15089  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15090  }
15091  json.EndObject();
15092  }
15093  }
15094 }
15095 
15096 #endif // #if VMA_STATS_STRING_ENABLED
15097 
15099 // Public interface
15100 
15101 VkResult vmaCreateAllocator(
15102  const VmaAllocatorCreateInfo* pCreateInfo,
15103  VmaAllocator* pAllocator)
15104 {
15105  VMA_ASSERT(pCreateInfo && pAllocator);
15106  VMA_DEBUG_LOG("vmaCreateAllocator");
15107  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
15108  return (*pAllocator)->Init(pCreateInfo);
15109 }
15110 
15111 void vmaDestroyAllocator(
15112  VmaAllocator allocator)
15113 {
15114  if(allocator != VK_NULL_HANDLE)
15115  {
15116  VMA_DEBUG_LOG("vmaDestroyAllocator");
15117  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15118  vma_delete(&allocationCallbacks, allocator);
15119  }
15120 }
15121 
15123  VmaAllocator allocator,
15124  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15125 {
15126  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15127  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15128 }
15129 
15131  VmaAllocator allocator,
15132  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15133 {
15134  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15135  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15136 }
15137 
15139  VmaAllocator allocator,
15140  uint32_t memoryTypeIndex,
15141  VkMemoryPropertyFlags* pFlags)
15142 {
15143  VMA_ASSERT(allocator && pFlags);
15144  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15145  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15146 }
15147 
15149  VmaAllocator allocator,
15150  uint32_t frameIndex)
15151 {
15152  VMA_ASSERT(allocator);
15153  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15154 
15155  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15156 
15157  allocator->SetCurrentFrameIndex(frameIndex);
15158 }
15159 
15160 void vmaCalculateStats(
15161  VmaAllocator allocator,
15162  VmaStats* pStats)
15163 {
15164  VMA_ASSERT(allocator && pStats);
15165  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15166  allocator->CalculateStats(pStats);
15167 }
15168 
15169 #if VMA_STATS_STRING_ENABLED
15170 
15171 void vmaBuildStatsString(
15172  VmaAllocator allocator,
15173  char** ppStatsString,
15174  VkBool32 detailedMap)
15175 {
15176  VMA_ASSERT(allocator && ppStatsString);
15177  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15178 
15179  VmaStringBuilder sb(allocator);
15180  {
15181  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15182  json.BeginObject();
15183 
15184  VmaStats stats;
15185  allocator->CalculateStats(&stats);
15186 
15187  json.WriteString("Total");
15188  VmaPrintStatInfo(json, stats.total);
15189 
15190  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15191  {
15192  json.BeginString("Heap ");
15193  json.ContinueString(heapIndex);
15194  json.EndString();
15195  json.BeginObject();
15196 
15197  json.WriteString("Size");
15198  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15199 
15200  json.WriteString("Flags");
15201  json.BeginArray(true);
15202  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15203  {
15204  json.WriteString("DEVICE_LOCAL");
15205  }
15206  json.EndArray();
15207 
15208  if(stats.memoryHeap[heapIndex].blockCount > 0)
15209  {
15210  json.WriteString("Stats");
15211  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
15212  }
15213 
15214  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15215  {
15216  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15217  {
15218  json.BeginString("Type ");
15219  json.ContinueString(typeIndex);
15220  json.EndString();
15221 
15222  json.BeginObject();
15223 
15224  json.WriteString("Flags");
15225  json.BeginArray(true);
15226  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15227  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15228  {
15229  json.WriteString("DEVICE_LOCAL");
15230  }
15231  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15232  {
15233  json.WriteString("HOST_VISIBLE");
15234  }
15235  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15236  {
15237  json.WriteString("HOST_COHERENT");
15238  }
15239  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15240  {
15241  json.WriteString("HOST_CACHED");
15242  }
15243  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15244  {
15245  json.WriteString("LAZILY_ALLOCATED");
15246  }
15247  json.EndArray();
15248 
15249  if(stats.memoryType[typeIndex].blockCount > 0)
15250  {
15251  json.WriteString("Stats");
15252  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
15253  }
15254 
15255  json.EndObject();
15256  }
15257  }
15258 
15259  json.EndObject();
15260  }
15261  if(detailedMap == VK_TRUE)
15262  {
15263  allocator->PrintDetailedMap(json);
15264  }
15265 
15266  json.EndObject();
15267  }
15268 
15269  const size_t len = sb.GetLength();
15270  char* const pChars = vma_new_array(allocator, char, len + 1);
15271  if(len > 0)
15272  {
15273  memcpy(pChars, sb.GetData(), len);
15274  }
15275  pChars[len] = '\0';
15276  *ppStatsString = pChars;
15277 }
15278 
15279 void vmaFreeStatsString(
15280  VmaAllocator allocator,
15281  char* pStatsString)
15282 {
15283  if(pStatsString != VMA_NULL)
15284  {
15285  VMA_ASSERT(allocator);
15286  size_t len = strlen(pStatsString);
15287  vma_delete_array(allocator, pStatsString, len + 1);
15288  }
15289 }
15290 
15291 #endif // #if VMA_STATS_STRING_ENABLED
15292 
15293 /*
15294 This function is not protected by any mutex because it just reads immutable data.
15295 */
15296 VkResult vmaFindMemoryTypeIndex(
15297  VmaAllocator allocator,
15298  uint32_t memoryTypeBits,
15299  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15300  uint32_t* pMemoryTypeIndex)
15301 {
15302  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15303  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15304  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15305 
15306  if(pAllocationCreateInfo->memoryTypeBits != 0)
15307  {
15308  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
15309  }
15310 
15311  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
15312  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
15313 
15314  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
15315  if(mapped)
15316  {
15317  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15318  }
15319 
15320  // Convert usage to requiredFlags and preferredFlags.
15321  switch(pAllocationCreateInfo->usage)
15322  {
15324  break;
15326  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15327  {
15328  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15329  }
15330  break;
15332  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15333  break;
15335  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15336  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15337  {
15338  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15339  }
15340  break;
15342  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15343  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
15344  break;
15345  default:
15346  break;
15347  }
15348 
15349  *pMemoryTypeIndex = UINT32_MAX;
15350  uint32_t minCost = UINT32_MAX;
15351  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
15352  memTypeIndex < allocator->GetMemoryTypeCount();
15353  ++memTypeIndex, memTypeBit <<= 1)
15354  {
15355  // This memory type is acceptable according to memoryTypeBits bitmask.
15356  if((memTypeBit & memoryTypeBits) != 0)
15357  {
15358  const VkMemoryPropertyFlags currFlags =
15359  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
15360  // This memory type contains requiredFlags.
15361  if((requiredFlags & ~currFlags) == 0)
15362  {
15363  // Calculate cost as number of bits from preferredFlags not present in this memory type.
15364  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
15365  // Remember memory type with lowest cost.
15366  if(currCost < minCost)
15367  {
15368  *pMemoryTypeIndex = memTypeIndex;
15369  if(currCost == 0)
15370  {
15371  return VK_SUCCESS;
15372  }
15373  minCost = currCost;
15374  }
15375  }
15376  }
15377  }
15378  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
15379 }
15380 
15382  VmaAllocator allocator,
15383  const VkBufferCreateInfo* pBufferCreateInfo,
15384  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15385  uint32_t* pMemoryTypeIndex)
15386 {
15387  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15388  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
15389  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15390  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15391 
15392  const VkDevice hDev = allocator->m_hDevice;
15393  VkBuffer hBuffer = VK_NULL_HANDLE;
15394  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
15395  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
15396  if(res == VK_SUCCESS)
15397  {
15398  VkMemoryRequirements memReq = {};
15399  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
15400  hDev, hBuffer, &memReq);
15401 
15402  res = vmaFindMemoryTypeIndex(
15403  allocator,
15404  memReq.memoryTypeBits,
15405  pAllocationCreateInfo,
15406  pMemoryTypeIndex);
15407 
15408  allocator->GetVulkanFunctions().vkDestroyBuffer(
15409  hDev, hBuffer, allocator->GetAllocationCallbacks());
15410  }
15411  return res;
15412 }
15413 
15415  VmaAllocator allocator,
15416  const VkImageCreateInfo* pImageCreateInfo,
15417  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15418  uint32_t* pMemoryTypeIndex)
15419 {
15420  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15421  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
15422  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15423  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15424 
15425  const VkDevice hDev = allocator->m_hDevice;
15426  VkImage hImage = VK_NULL_HANDLE;
15427  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
15428  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
15429  if(res == VK_SUCCESS)
15430  {
15431  VkMemoryRequirements memReq = {};
15432  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
15433  hDev, hImage, &memReq);
15434 
15435  res = vmaFindMemoryTypeIndex(
15436  allocator,
15437  memReq.memoryTypeBits,
15438  pAllocationCreateInfo,
15439  pMemoryTypeIndex);
15440 
15441  allocator->GetVulkanFunctions().vkDestroyImage(
15442  hDev, hImage, allocator->GetAllocationCallbacks());
15443  }
15444  return res;
15445 }
15446 
15447 VkResult vmaCreatePool(
15448  VmaAllocator allocator,
15449  const VmaPoolCreateInfo* pCreateInfo,
15450  VmaPool* pPool)
15451 {
15452  VMA_ASSERT(allocator && pCreateInfo && pPool);
15453 
15454  VMA_DEBUG_LOG("vmaCreatePool");
15455 
15456  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15457 
15458  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
15459 
15460 #if VMA_RECORDING_ENABLED
15461  if(allocator->GetRecorder() != VMA_NULL)
15462  {
15463  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
15464  }
15465 #endif
15466 
15467  return res;
15468 }
15469 
15470 void vmaDestroyPool(
15471  VmaAllocator allocator,
15472  VmaPool pool)
15473 {
15474  VMA_ASSERT(allocator);
15475 
15476  if(pool == VK_NULL_HANDLE)
15477  {
15478  return;
15479  }
15480 
15481  VMA_DEBUG_LOG("vmaDestroyPool");
15482 
15483  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15484 
15485 #if VMA_RECORDING_ENABLED
15486  if(allocator->GetRecorder() != VMA_NULL)
15487  {
15488  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
15489  }
15490 #endif
15491 
15492  allocator->DestroyPool(pool);
15493 }
15494 
15495 void vmaGetPoolStats(
15496  VmaAllocator allocator,
15497  VmaPool pool,
15498  VmaPoolStats* pPoolStats)
15499 {
15500  VMA_ASSERT(allocator && pool && pPoolStats);
15501 
15502  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15503 
15504  allocator->GetPoolStats(pool, pPoolStats);
15505 }
15506 
15508  VmaAllocator allocator,
15509  VmaPool pool,
15510  size_t* pLostAllocationCount)
15511 {
15512  VMA_ASSERT(allocator && pool);
15513 
15514  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15515 
15516 #if VMA_RECORDING_ENABLED
15517  if(allocator->GetRecorder() != VMA_NULL)
15518  {
15519  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
15520  }
15521 #endif
15522 
15523  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
15524 }
15525 
15526 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
15527 {
15528  VMA_ASSERT(allocator && pool);
15529 
15530  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15531 
15532  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
15533 
15534  return allocator->CheckPoolCorruption(pool);
15535 }
15536 
15537 VkResult vmaAllocateMemory(
15538  VmaAllocator allocator,
15539  const VkMemoryRequirements* pVkMemoryRequirements,
15540  const VmaAllocationCreateInfo* pCreateInfo,
15541  VmaAllocation* pAllocation,
15542  VmaAllocationInfo* pAllocationInfo)
15543 {
15544  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
15545 
15546  VMA_DEBUG_LOG("vmaAllocateMemory");
15547 
15548  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15549 
15550  VkResult result = allocator->AllocateMemory(
15551  *pVkMemoryRequirements,
15552  false, // requiresDedicatedAllocation
15553  false, // prefersDedicatedAllocation
15554  VK_NULL_HANDLE, // dedicatedBuffer
15555  VK_NULL_HANDLE, // dedicatedImage
15556  *pCreateInfo,
15557  VMA_SUBALLOCATION_TYPE_UNKNOWN,
15558  pAllocation);
15559 
15560 #if VMA_RECORDING_ENABLED
15561  if(allocator->GetRecorder() != VMA_NULL)
15562  {
15563  allocator->GetRecorder()->RecordAllocateMemory(
15564  allocator->GetCurrentFrameIndex(),
15565  *pVkMemoryRequirements,
15566  *pCreateInfo,
15567  *pAllocation);
15568  }
15569 #endif
15570 
15571  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
15572  {
15573  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
15574  }
15575 
15576  return result;
15577 }
15578 
15580  VmaAllocator allocator,
15581  VkBuffer buffer,
15582  const VmaAllocationCreateInfo* pCreateInfo,
15583  VmaAllocation* pAllocation,
15584  VmaAllocationInfo* pAllocationInfo)
15585 {
15586  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
15587 
15588  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
15589 
15590  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15591 
15592  VkMemoryRequirements vkMemReq = {};
15593  bool requiresDedicatedAllocation = false;
15594  bool prefersDedicatedAllocation = false;
15595  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
15596  requiresDedicatedAllocation,
15597  prefersDedicatedAllocation);
15598 
15599  VkResult result = allocator->AllocateMemory(
15600  vkMemReq,
15601  requiresDedicatedAllocation,
15602  prefersDedicatedAllocation,
15603  buffer, // dedicatedBuffer
15604  VK_NULL_HANDLE, // dedicatedImage
15605  *pCreateInfo,
15606  VMA_SUBALLOCATION_TYPE_BUFFER,
15607  pAllocation);
15608 
15609 #if VMA_RECORDING_ENABLED
15610  if(allocator->GetRecorder() != VMA_NULL)
15611  {
15612  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
15613  allocator->GetCurrentFrameIndex(),
15614  vkMemReq,
15615  requiresDedicatedAllocation,
15616  prefersDedicatedAllocation,
15617  *pCreateInfo,
15618  *pAllocation);
15619  }
15620 #endif
15621 
15622  if(pAllocationInfo && result == VK_SUCCESS)
15623  {
15624  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
15625  }
15626 
15627  return result;
15628 }
15629 
15630 VkResult vmaAllocateMemoryForImage(
15631  VmaAllocator allocator,
15632  VkImage image,
15633  const VmaAllocationCreateInfo* pCreateInfo,
15634  VmaAllocation* pAllocation,
15635  VmaAllocationInfo* pAllocationInfo)
15636 {
15637  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
15638 
15639  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
15640 
15641  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15642 
15643  VkMemoryRequirements vkMemReq = {};
15644  bool requiresDedicatedAllocation = false;
15645  bool prefersDedicatedAllocation = false;
15646  allocator->GetImageMemoryRequirements(image, vkMemReq,
15647  requiresDedicatedAllocation, prefersDedicatedAllocation);
15648 
15649  VkResult result = allocator->AllocateMemory(
15650  vkMemReq,
15651  requiresDedicatedAllocation,
15652  prefersDedicatedAllocation,
15653  VK_NULL_HANDLE, // dedicatedBuffer
15654  image, // dedicatedImage
15655  *pCreateInfo,
15656  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
15657  pAllocation);
15658 
15659 #if VMA_RECORDING_ENABLED
15660  if(allocator->GetRecorder() != VMA_NULL)
15661  {
15662  allocator->GetRecorder()->RecordAllocateMemoryForImage(
15663  allocator->GetCurrentFrameIndex(),
15664  vkMemReq,
15665  requiresDedicatedAllocation,
15666  prefersDedicatedAllocation,
15667  *pCreateInfo,
15668  *pAllocation);
15669  }
15670 #endif
15671 
15672  if(pAllocationInfo && result == VK_SUCCESS)
15673  {
15674  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
15675  }
15676 
15677  return result;
15678 }
15679 
15680 void vmaFreeMemory(
15681  VmaAllocator allocator,
15682  VmaAllocation allocation)
15683 {
15684  VMA_ASSERT(allocator);
15685 
15686  if(allocation == VK_NULL_HANDLE)
15687  {
15688  return;
15689  }
15690 
15691  VMA_DEBUG_LOG("vmaFreeMemory");
15692 
15693  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15694 
15695 #if VMA_RECORDING_ENABLED
15696  if(allocator->GetRecorder() != VMA_NULL)
15697  {
15698  allocator->GetRecorder()->RecordFreeMemory(
15699  allocator->GetCurrentFrameIndex(),
15700  allocation);
15701  }
15702 #endif
15703 
15704  allocator->FreeMemory(allocation);
15705 }
15706 
15707 VkResult vmaResizeAllocation(
15708  VmaAllocator allocator,
15709  VmaAllocation allocation,
15710  VkDeviceSize newSize)
15711 {
15712  VMA_ASSERT(allocator && allocation);
15713 
15714  VMA_DEBUG_LOG("vmaResizeAllocation");
15715 
15716  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15717 
15718 #if VMA_RECORDING_ENABLED
15719  if(allocator->GetRecorder() != VMA_NULL)
15720  {
15721  allocator->GetRecorder()->RecordResizeAllocation(
15722  allocator->GetCurrentFrameIndex(),
15723  allocation,
15724  newSize);
15725  }
15726 #endif
15727 
15728  return allocator->ResizeAllocation(allocation, newSize);
15729 }
15730 
15732  VmaAllocator allocator,
15733  VmaAllocation allocation,
15734  VmaAllocationInfo* pAllocationInfo)
15735 {
15736  VMA_ASSERT(allocator && allocation && pAllocationInfo);
15737 
15738  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15739 
15740 #if VMA_RECORDING_ENABLED
15741  if(allocator->GetRecorder() != VMA_NULL)
15742  {
15743  allocator->GetRecorder()->RecordGetAllocationInfo(
15744  allocator->GetCurrentFrameIndex(),
15745  allocation);
15746  }
15747 #endif
15748 
15749  allocator->GetAllocationInfo(allocation, pAllocationInfo);
15750 }
15751 
15752 VkBool32 vmaTouchAllocation(
15753  VmaAllocator allocator,
15754  VmaAllocation allocation)
15755 {
15756  VMA_ASSERT(allocator && allocation);
15757 
15758  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15759 
15760 #if VMA_RECORDING_ENABLED
15761  if(allocator->GetRecorder() != VMA_NULL)
15762  {
15763  allocator->GetRecorder()->RecordTouchAllocation(
15764  allocator->GetCurrentFrameIndex(),
15765  allocation);
15766  }
15767 #endif
15768 
15769  return allocator->TouchAllocation(allocation);
15770 }
15771 
15773  VmaAllocator allocator,
15774  VmaAllocation allocation,
15775  void* pUserData)
15776 {
15777  VMA_ASSERT(allocator && allocation);
15778 
15779  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15780 
15781  allocation->SetUserData(allocator, pUserData);
15782 
15783 #if VMA_RECORDING_ENABLED
15784  if(allocator->GetRecorder() != VMA_NULL)
15785  {
15786  allocator->GetRecorder()->RecordSetAllocationUserData(
15787  allocator->GetCurrentFrameIndex(),
15788  allocation,
15789  pUserData);
15790  }
15791 #endif
15792 }
15793 
15795  VmaAllocator allocator,
15796  VmaAllocation* pAllocation)
15797 {
15798  VMA_ASSERT(allocator && pAllocation);
15799 
15800  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
15801 
15802  allocator->CreateLostAllocation(pAllocation);
15803 
15804 #if VMA_RECORDING_ENABLED
15805  if(allocator->GetRecorder() != VMA_NULL)
15806  {
15807  allocator->GetRecorder()->RecordCreateLostAllocation(
15808  allocator->GetCurrentFrameIndex(),
15809  *pAllocation);
15810  }
15811 #endif
15812 }
15813 
15814 VkResult vmaMapMemory(
15815  VmaAllocator allocator,
15816  VmaAllocation allocation,
15817  void** ppData)
15818 {
15819  VMA_ASSERT(allocator && allocation && ppData);
15820 
15821  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15822 
15823  VkResult res = allocator->Map(allocation, ppData);
15824 
15825 #if VMA_RECORDING_ENABLED
15826  if(allocator->GetRecorder() != VMA_NULL)
15827  {
15828  allocator->GetRecorder()->RecordMapMemory(
15829  allocator->GetCurrentFrameIndex(),
15830  allocation);
15831  }
15832 #endif
15833 
15834  return res;
15835 }
15836 
15837 void vmaUnmapMemory(
15838  VmaAllocator allocator,
15839  VmaAllocation allocation)
15840 {
15841  VMA_ASSERT(allocator && allocation);
15842 
15843  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15844 
15845 #if VMA_RECORDING_ENABLED
15846  if(allocator->GetRecorder() != VMA_NULL)
15847  {
15848  allocator->GetRecorder()->RecordUnmapMemory(
15849  allocator->GetCurrentFrameIndex(),
15850  allocation);
15851  }
15852 #endif
15853 
15854  allocator->Unmap(allocation);
15855 }
15856 
15857 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15858 {
15859  VMA_ASSERT(allocator && allocation);
15860 
15861  VMA_DEBUG_LOG("vmaFlushAllocation");
15862 
15863  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15864 
15865  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
15866 
15867 #if VMA_RECORDING_ENABLED
15868  if(allocator->GetRecorder() != VMA_NULL)
15869  {
15870  allocator->GetRecorder()->RecordFlushAllocation(
15871  allocator->GetCurrentFrameIndex(),
15872  allocation, offset, size);
15873  }
15874 #endif
15875 }
15876 
15877 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15878 {
15879  VMA_ASSERT(allocator && allocation);
15880 
15881  VMA_DEBUG_LOG("vmaInvalidateAllocation");
15882 
15883  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15884 
15885  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
15886 
15887 #if VMA_RECORDING_ENABLED
15888  if(allocator->GetRecorder() != VMA_NULL)
15889  {
15890  allocator->GetRecorder()->RecordInvalidateAllocation(
15891  allocator->GetCurrentFrameIndex(),
15892  allocation, offset, size);
15893  }
15894 #endif
15895 }
15896 
15897 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
15898 {
15899  VMA_ASSERT(allocator);
15900 
15901  VMA_DEBUG_LOG("vmaCheckCorruption");
15902 
15903  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15904 
15905  return allocator->CheckCorruption(memoryTypeBits);
15906 }
15907 
15908 VkResult vmaDefragment(
15909  VmaAllocator allocator,
15910  VmaAllocation* pAllocations,
15911  size_t allocationCount,
15912  VkBool32* pAllocationsChanged,
15913  const VmaDefragmentationInfo *pDefragmentationInfo,
15914  VmaDefragmentationStats* pDefragmentationStats)
15915 {
15916  // Deprecated interface, reimplemented using new one.
15917 
15918  VmaDefragmentationInfo2 info2 = {};
15919  info2.allocationCount = (uint32_t)allocationCount;
15920  info2.pAllocations = pAllocations;
15921  info2.pAllocationsChanged = pAllocationsChanged;
15922  if(pDefragmentationInfo != VMA_NULL)
15923  {
15924  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
15925  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
15926  }
15927  else
15928  {
15929  info2.maxCpuAllocationsToMove = UINT32_MAX;
15930  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
15931  }
15932  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
15933 
15935  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
15936  if(res == VK_NOT_READY)
15937  {
15938  res = vmaDefragmentationEnd( allocator, ctx);
15939  }
15940  return res;
15941 }
15942 
15943 VkResult vmaDefragmentationBegin(
15944  VmaAllocator allocator,
15945  const VmaDefragmentationInfo2* pInfo,
15946  VmaDefragmentationStats* pStats,
15947  VmaDefragmentationContext *pContext)
15948 {
15949  VMA_ASSERT(allocator && pInfo && pContext);
15950  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
15951  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
15952 
15953  VMA_DEBUG_LOG("vmaDefragmentationBegin");
15954 
15955  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15956 
15957  return allocator->DefragmentationBegin(*pInfo, pStats, pContext);
15958 }
15959 
15960 VkResult vmaDefragmentationEnd(
15961  VmaAllocator allocator,
15962  VmaDefragmentationContext context)
15963 {
15964  VMA_ASSERT(allocator);
15965 
15966  VMA_DEBUG_LOG("vmaDefragmentationEnd");
15967 
15968  if(context != VK_NULL_HANDLE)
15969  {
15970  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15971 
15972  return allocator->DefragmentationEnd(context);
15973  }
15974  else
15975  {
15976  return VK_SUCCESS;
15977  }
15978 }
15979 
15980 VkResult vmaBindBufferMemory(
15981  VmaAllocator allocator,
15982  VmaAllocation allocation,
15983  VkBuffer buffer)
15984 {
15985  VMA_ASSERT(allocator && allocation && buffer);
15986 
15987  VMA_DEBUG_LOG("vmaBindBufferMemory");
15988 
15989  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15990 
15991  return allocator->BindBufferMemory(allocation, buffer);
15992 }
15993 
15994 VkResult vmaBindImageMemory(
15995  VmaAllocator allocator,
15996  VmaAllocation allocation,
15997  VkImage image)
15998 {
15999  VMA_ASSERT(allocator && allocation && image);
16000 
16001  VMA_DEBUG_LOG("vmaBindImageMemory");
16002 
16003  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16004 
16005  return allocator->BindImageMemory(allocation, image);
16006 }
16007 
16008 VkResult vmaCreateBuffer(
16009  VmaAllocator allocator,
16010  const VkBufferCreateInfo* pBufferCreateInfo,
16011  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16012  VkBuffer* pBuffer,
16013  VmaAllocation* pAllocation,
16014  VmaAllocationInfo* pAllocationInfo)
16015 {
16016  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16017 
16018  if(pBufferCreateInfo->size == 0)
16019  {
16020  return VK_ERROR_VALIDATION_FAILED_EXT;
16021  }
16022 
16023  VMA_DEBUG_LOG("vmaCreateBuffer");
16024 
16025  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16026 
16027  *pBuffer = VK_NULL_HANDLE;
16028  *pAllocation = VK_NULL_HANDLE;
16029 
16030  // 1. Create VkBuffer.
16031  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16032  allocator->m_hDevice,
16033  pBufferCreateInfo,
16034  allocator->GetAllocationCallbacks(),
16035  pBuffer);
16036  if(res >= 0)
16037  {
16038  // 2. vkGetBufferMemoryRequirements.
16039  VkMemoryRequirements vkMemReq = {};
16040  bool requiresDedicatedAllocation = false;
16041  bool prefersDedicatedAllocation = false;
16042  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16043  requiresDedicatedAllocation, prefersDedicatedAllocation);
16044 
16045  // Make sure alignment requirements for specific buffer usages reported
16046  // in Physical Device Properties are included in alignment reported by memory requirements.
16047  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16048  {
16049  VMA_ASSERT(vkMemReq.alignment %
16050  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16051  }
16052  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16053  {
16054  VMA_ASSERT(vkMemReq.alignment %
16055  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16056  }
16057  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16058  {
16059  VMA_ASSERT(vkMemReq.alignment %
16060  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16061  }
16062 
16063  // 3. Allocate memory using allocator.
16064  res = allocator->AllocateMemory(
16065  vkMemReq,
16066  requiresDedicatedAllocation,
16067  prefersDedicatedAllocation,
16068  *pBuffer, // dedicatedBuffer
16069  VK_NULL_HANDLE, // dedicatedImage
16070  *pAllocationCreateInfo,
16071  VMA_SUBALLOCATION_TYPE_BUFFER,
16072  pAllocation);
16073 
16074 #if VMA_RECORDING_ENABLED
16075  if(allocator->GetRecorder() != VMA_NULL)
16076  {
16077  allocator->GetRecorder()->RecordCreateBuffer(
16078  allocator->GetCurrentFrameIndex(),
16079  *pBufferCreateInfo,
16080  *pAllocationCreateInfo,
16081  *pAllocation);
16082  }
16083 #endif
16084 
16085  if(res >= 0)
16086  {
16087  // 3. Bind buffer with memory.
16088  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
16089  if(res >= 0)
16090  {
16091  // All steps succeeded.
16092  #if VMA_STATS_STRING_ENABLED
16093  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16094  #endif
16095  if(pAllocationInfo != VMA_NULL)
16096  {
16097  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16098  }
16099 
16100  return VK_SUCCESS;
16101  }
16102  allocator->FreeMemory(*pAllocation);
16103  *pAllocation = VK_NULL_HANDLE;
16104  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16105  *pBuffer = VK_NULL_HANDLE;
16106  return res;
16107  }
16108  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16109  *pBuffer = VK_NULL_HANDLE;
16110  return res;
16111  }
16112  return res;
16113 }
16114 
16115 void vmaDestroyBuffer(
16116  VmaAllocator allocator,
16117  VkBuffer buffer,
16118  VmaAllocation allocation)
16119 {
16120  VMA_ASSERT(allocator);
16121 
16122  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16123  {
16124  return;
16125  }
16126 
16127  VMA_DEBUG_LOG("vmaDestroyBuffer");
16128 
16129  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16130 
16131 #if VMA_RECORDING_ENABLED
16132  if(allocator->GetRecorder() != VMA_NULL)
16133  {
16134  allocator->GetRecorder()->RecordDestroyBuffer(
16135  allocator->GetCurrentFrameIndex(),
16136  allocation);
16137  }
16138 #endif
16139 
16140  if(buffer != VK_NULL_HANDLE)
16141  {
16142  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16143  }
16144 
16145  if(allocation != VK_NULL_HANDLE)
16146  {
16147  allocator->FreeMemory(allocation);
16148  }
16149 }
16150 
16151 VkResult vmaCreateImage(
16152  VmaAllocator allocator,
16153  const VkImageCreateInfo* pImageCreateInfo,
16154  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16155  VkImage* pImage,
16156  VmaAllocation* pAllocation,
16157  VmaAllocationInfo* pAllocationInfo)
16158 {
16159  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16160 
16161  if(pImageCreateInfo->extent.width == 0 ||
16162  pImageCreateInfo->extent.height == 0 ||
16163  pImageCreateInfo->extent.depth == 0 ||
16164  pImageCreateInfo->mipLevels == 0 ||
16165  pImageCreateInfo->arrayLayers == 0)
16166  {
16167  return VK_ERROR_VALIDATION_FAILED_EXT;
16168  }
16169 
16170  VMA_DEBUG_LOG("vmaCreateImage");
16171 
16172  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16173 
16174  *pImage = VK_NULL_HANDLE;
16175  *pAllocation = VK_NULL_HANDLE;
16176 
16177  // 1. Create VkImage.
16178  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16179  allocator->m_hDevice,
16180  pImageCreateInfo,
16181  allocator->GetAllocationCallbacks(),
16182  pImage);
16183  if(res >= 0)
16184  {
16185  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16186  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16187  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16188 
16189  // 2. Allocate memory using allocator.
16190  VkMemoryRequirements vkMemReq = {};
16191  bool requiresDedicatedAllocation = false;
16192  bool prefersDedicatedAllocation = false;
16193  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16194  requiresDedicatedAllocation, prefersDedicatedAllocation);
16195 
16196  res = allocator->AllocateMemory(
16197  vkMemReq,
16198  requiresDedicatedAllocation,
16199  prefersDedicatedAllocation,
16200  VK_NULL_HANDLE, // dedicatedBuffer
16201  *pImage, // dedicatedImage
16202  *pAllocationCreateInfo,
16203  suballocType,
16204  pAllocation);
16205 
16206 #if VMA_RECORDING_ENABLED
16207  if(allocator->GetRecorder() != VMA_NULL)
16208  {
16209  allocator->GetRecorder()->RecordCreateImage(
16210  allocator->GetCurrentFrameIndex(),
16211  *pImageCreateInfo,
16212  *pAllocationCreateInfo,
16213  *pAllocation);
16214  }
16215 #endif
16216 
16217  if(res >= 0)
16218  {
16219  // 3. Bind image with memory.
16220  res = allocator->BindImageMemory(*pAllocation, *pImage);
16221  if(res >= 0)
16222  {
16223  // All steps succeeded.
16224  #if VMA_STATS_STRING_ENABLED
16225  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
16226  #endif
16227  if(pAllocationInfo != VMA_NULL)
16228  {
16229  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16230  }
16231 
16232  return VK_SUCCESS;
16233  }
16234  allocator->FreeMemory(*pAllocation);
16235  *pAllocation = VK_NULL_HANDLE;
16236  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16237  *pImage = VK_NULL_HANDLE;
16238  return res;
16239  }
16240  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16241  *pImage = VK_NULL_HANDLE;
16242  return res;
16243  }
16244  return res;
16245 }
16246 
16247 void vmaDestroyImage(
16248  VmaAllocator allocator,
16249  VkImage image,
16250  VmaAllocation allocation)
16251 {
16252  VMA_ASSERT(allocator);
16253 
16254  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16255  {
16256  return;
16257  }
16258 
16259  VMA_DEBUG_LOG("vmaDestroyImage");
16260 
16261  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16262 
16263 #if VMA_RECORDING_ENABLED
16264  if(allocator->GetRecorder() != VMA_NULL)
16265  {
16266  allocator->GetRecorder()->RecordDestroyImage(
16267  allocator->GetCurrentFrameIndex(),
16268  allocation);
16269  }
16270 #endif
16271 
16272  if(image != VK_NULL_HANDLE)
16273  {
16274  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
16275  }
16276  if(allocation != VK_NULL_HANDLE)
16277  {
16278  allocator->FreeMemory(allocation);
16279  }
16280 }
16281 
16282 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1707
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2010
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1765
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side...
Definition: vk_mem_alloc.h:2762
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1739
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2335
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1719
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1967
Definition: vk_mem_alloc.h:2070
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:2715
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1711
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2435
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1762
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2798
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2224
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1606
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2316
Definition: vk_mem_alloc.h:2047
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:2718
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1700
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2123
Definition: vk_mem_alloc.h:1994
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1774
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2252
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1828
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1759
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1998
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1900
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1716
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:2752
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1899
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2802
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1791
VmaStatInfo total
Definition: vk_mem_alloc.h:1909
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2810
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2107
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2793
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1717
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1642
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1768
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2266
Definition: vk_mem_alloc.h:2260
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:1723
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1835
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2445
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1712
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1737
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2144
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2286
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:2322
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1698
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2269
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2767
VmaMemoryUsage
Definition: vk_mem_alloc.h:1945
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:2727
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2788
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:2806
Definition: vk_mem_alloc.h:1984
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2131
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1715
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1905
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1648
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:2706
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
Definition: vk_mem_alloc.h:2704
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:2733
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1669
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1741
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1674
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2808
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2118
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2332
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1708
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1888
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
Definition: vk_mem_alloc.h:2281
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1661
Definition: vk_mem_alloc.h:2256
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2054
Represents Opaque object that represents started defragmentation process.
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1901
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1665
Definition: vk_mem_alloc.h:2081
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2272
Definition: vk_mem_alloc.h:1993
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1714
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2113
Definition: vk_mem_alloc.h:2104
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1891
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1710
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2294
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1777
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2325
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2102
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2757
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2137
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1816
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1907
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:2034
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1900
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1721
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1747
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use...
Definition: vk_mem_alloc.h:2703
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:2781
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1663
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1720
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2308
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1713
Definition: vk_mem_alloc.h:2065
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1755
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2459
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1771
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1900
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1897
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2313
Parameters for defragmentation.
Definition: vk_mem_alloc.h:2712
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions...
Definition: vk_mem_alloc.h:2074
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:2440
Definition: vk_mem_alloc.h:2088
Definition: vk_mem_alloc.h:2100
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2804
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1706
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1895
Definition: vk_mem_alloc.h:1950
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2262
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1744
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1893
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1718
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1722
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2021
Definition: vk_mem_alloc.h:2095
Definition: vk_mem_alloc.h:1977
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2454
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1696
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1709
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2241
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Tries to resize an allocation in place, if there is enough free memory after it.
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2421
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2085
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2206
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1901
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1731
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1908
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2319
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1901
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side...
Definition: vk_mem_alloc.h:2772
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2426
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:2736